v2
This commit is contained in:
@@ -0,0 +1,3 @@
|
||||
## Azure Pipelines Configuration
|
||||
|
||||
Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
|
||||
@@ -0,0 +1,365 @@
|
||||
trigger:
|
||||
batch: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
pr:
|
||||
autoCancel: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 9 * * *
|
||||
displayName: Nightly
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- cron: 0 12 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-*
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
value: ansible_collections/community/docker
|
||||
- name: coverageBranches
|
||||
value: main
|
||||
- name: pipelinesCoverage
|
||||
value: coverage
|
||||
- name: entryPoint
|
||||
value: tests/utils/shippable/shippable.sh
|
||||
- name: fetchDepth
|
||||
value: 0
|
||||
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:1.9.0
|
||||
|
||||
pool: Standard
|
||||
|
||||
stages:
|
||||
|
||||
### Sanity & units
|
||||
- stage: Ansible_devel
|
||||
displayName: Sanity & Units devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: 'devel/sanity/1'
|
||||
- name: Sanity Extra # Only on devel
|
||||
test: 'devel/sanity/extra'
|
||||
- name: Units
|
||||
test: 'devel/units/1'
|
||||
- stage: Ansible_2_13
|
||||
displayName: Sanity & Units 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.13/sanity/1'
|
||||
- name: Units
|
||||
test: '2.13/units/1'
|
||||
- stage: Ansible_2_12
|
||||
displayName: Sanity & Units 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.12/sanity/1'
|
||||
- name: Units
|
||||
test: '2.12/units/1'
|
||||
- stage: Ansible_2_11
|
||||
displayName: Sanity & Units 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.11/sanity/1'
|
||||
- name: Units
|
||||
test: '2.11/units/1'
|
||||
- stage: Ansible_2_10
|
||||
displayName: Sanity & Units 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.10/sanity/1'
|
||||
- name: Units
|
||||
test: '2.10/units/1'
|
||||
- stage: Ansible_2_9
|
||||
displayName: Sanity & Units 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.9/sanity/1'
|
||||
- name: Units
|
||||
test: '2.9/units/1'
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
displayName: Docker devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: Fedora 36
|
||||
test: fedora36
|
||||
- name: Fedora 35
|
||||
test: fedora35
|
||||
- name: openSUSE 15
|
||||
test: opensuse15
|
||||
- name: Ubuntu 18.04
|
||||
test: ubuntu1804
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu2204
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_13
|
||||
displayName: Docker 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.13/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: openSUSE 15 py2
|
||||
test: opensuse15py2
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_12
|
||||
displayName: Docker 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.12/linux/{0}
|
||||
targets:
|
||||
- name: CentOS 8
|
||||
test: centos8
|
||||
- name: Fedora 34
|
||||
test: fedora34
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_11
|
||||
displayName: Docker 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.11/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 33
|
||||
test: fedora33
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_10
|
||||
displayName: Docker 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.10/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 32
|
||||
test: fedora32
|
||||
- name: Ubuntu 16.04
|
||||
test: ubuntu1604
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_9
|
||||
displayName: Docker 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.9/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 31
|
||||
test: fedora31
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
|
||||
### Community Docker
|
||||
- stage: Docker_community_devel
|
||||
displayName: Docker (community images) devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux-community/{0}
|
||||
targets:
|
||||
- name: Debian Bullseye
|
||||
test: debian-bullseye/3.9
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.10
|
||||
- name: CentOS Stream 8
|
||||
test: centos-stream8/3.8
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
|
||||
### Remote
|
||||
- stage: Remote_devel
|
||||
displayName: Remote devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: devel/rhel/{0}
|
||||
targets:
|
||||
- test: '7.9'
|
||||
- test: '9.0-pypi-latest'
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_13
|
||||
displayName: Remote 2.13
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.13/rhel/{0}
|
||||
targets:
|
||||
- test: '8.5'
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_12
|
||||
displayName: Remote 2.12
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.12/rhel/{0}
|
||||
targets:
|
||||
- test: '8.4'
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_11
|
||||
displayName: Remote 2.11
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.11/rhel/{0}
|
||||
targets:
|
||||
- test: '8.3'
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_10
|
||||
displayName: Remote 2.10
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.10/rhel/{0}
|
||||
targets:
|
||||
- test: '7.8'
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- stage: Remote_2_9
|
||||
displayName: Remote 2.9
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
nameFormat: RHEL {0}
|
||||
testFormat: 2.9/rhel/{0}
|
||||
targets:
|
||||
- test: '8.2'
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
|
||||
## Finally
|
||||
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Ansible_devel
|
||||
- Ansible_2_13
|
||||
- Ansible_2_12
|
||||
- Ansible_2_11
|
||||
- Ansible_2_10
|
||||
- Ansible_2_9
|
||||
- Remote_devel
|
||||
- Remote_2_13
|
||||
- Remote_2_12
|
||||
- Remote_2_11
|
||||
- Remote_2_10
|
||||
- Remote_2_9
|
||||
- Docker_devel
|
||||
- Docker_2_13
|
||||
- Docker_2_12
|
||||
- Docker_2_11
|
||||
- Docker_2_10
|
||||
- Docker_2_9
|
||||
- Docker_community_devel
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
||||
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
# Aggregate code coverage results for later processing.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
agent_temp_directory="$1"
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
mkdir "${agent_temp_directory}/coverage/"
|
||||
|
||||
options=(--venv --venv-system-site-packages --color -v)
|
||||
|
||||
ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||
|
||||
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
||||
# Only analyze coverage if the installed version of ansible-test supports it.
|
||||
# Doing so allows this script to work unmodified for multiple Ansible versions.
|
||||
ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
|
||||
fi
|
||||
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
|
||||
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
|
||||
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
|
||||
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
|
||||
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
source_directory = sys.argv[1]
|
||||
|
||||
if '/ansible_collections/' in os.getcwd():
|
||||
output_path = "tests/output"
|
||||
else:
|
||||
output_path = "test/results"
|
||||
|
||||
destination_directory = os.path.join(output_path, 'coverage')
|
||||
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
jobs = {}
|
||||
count = 0
|
||||
|
||||
for name in os.listdir(source_directory):
|
||||
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
|
||||
label = match.group('label')
|
||||
attempt = int(match.group('attempt'))
|
||||
jobs[label] = max(attempt, jobs.get(label, 0))
|
||||
|
||||
for label, attempt in jobs.items():
|
||||
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
|
||||
source = os.path.join(source_directory, name)
|
||||
source_files = os.listdir(source)
|
||||
|
||||
for source_file in source_files:
|
||||
source_path = os.path.join(source, source_file)
|
||||
destination_path = os.path.join(destination_directory, source_file + '.' + label)
|
||||
print('"%s" -> "%s"' % (source_path, destination_path))
|
||||
shutil.copyfile(source_path, destination_path)
|
||||
count += 1
|
||||
|
||||
print('Coverage file count: %d' % count)
|
||||
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
|
||||
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
# Check the test results and set variables for use in later steps.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
if [[ "$PWD" =~ /ansible_collections/ ]]; then
|
||||
output_path="tests/output"
|
||||
else
|
||||
output_path="test/results"
|
||||
fi
|
||||
|
||||
echo "##vso[task.setVariable variable=outputPath]${output_path}"
|
||||
|
||||
if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveTestResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveBotResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveCoverageData]true"
|
||||
fi
|
||||
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Upload code coverage reports to codecov.io.
|
||||
Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||
Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import dataclasses
|
||||
import pathlib
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import typing as t
|
||||
import urllib.request
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class CoverageFile:
|
||||
name: str
|
||||
path: pathlib.Path
|
||||
flags: t.List[str]
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class Args:
|
||||
dry_run: bool
|
||||
path: pathlib.Path
|
||||
|
||||
|
||||
def parse_args() -> Args:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-n', '--dry-run', action='store_true')
|
||||
parser.add_argument('path', type=pathlib.Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Store arguments in a typed dataclass
|
||||
fields = dataclasses.fields(Args)
|
||||
kwargs = {field.name: getattr(args, field.name) for field in fields}
|
||||
|
||||
return Args(**kwargs)
|
||||
|
||||
|
||||
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
|
||||
processed = []
|
||||
for file in directory.joinpath('reports').glob('coverage*.xml'):
|
||||
name = file.stem.replace('coverage=', '')
|
||||
|
||||
# Get flags from name
|
||||
flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
|
||||
flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
|
||||
|
||||
processed.append(CoverageFile(name, file, flags))
|
||||
|
||||
return tuple(processed)
|
||||
|
||||
|
||||
def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
|
||||
for file in files:
|
||||
cmd = [
|
||||
str(codecov_bin),
|
||||
'--name', file.name,
|
||||
'--file', str(file.path),
|
||||
]
|
||||
for flag in file.flags:
|
||||
cmd.extend(['--flags', flag])
|
||||
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would run command: {cmd}')
|
||||
continue
|
||||
|
||||
subprocess.run(cmd, check=True)
|
||||
|
||||
|
||||
def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
|
||||
return
|
||||
|
||||
with urllib.request.urlopen(url) as resp:
|
||||
with dest.open('w+b') as f:
|
||||
# Read data in chunks rather than all at once
|
||||
shutil.copyfileobj(resp, f, 64 * 1024)
|
||||
|
||||
dest.chmod(flags)
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov'
|
||||
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
|
||||
codecov_bin = pathlib.Path(tmpdir) / 'codecov'
|
||||
download_file(url, codecov_bin, 0o755, args.dry_run)
|
||||
|
||||
files = process_files(args.path)
|
||||
upload_files(codecov_bin, files, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
if ! ansible-test --help >/dev/null 2>&1; then
|
||||
# Install the devel version of ansible-test for generating code coverage reports.
|
||||
# This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
|
||||
# Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
|
||||
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
||||
fi
|
||||
|
||||
ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
|
||||
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
# Configure the test environment and run the tests.
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
entry_point="$1"
|
||||
test="$2"
|
||||
read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
|
||||
|
||||
export COMMIT_MESSAGE
|
||||
export COMPLETE
|
||||
export COVERAGE
|
||||
export IS_PULL_REQUEST
|
||||
|
||||
if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
|
||||
IS_PULL_REQUEST=true
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
|
||||
else
|
||||
IS_PULL_REQUEST=
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
|
||||
fi
|
||||
|
||||
COMPLETE=
|
||||
COVERAGE=
|
||||
|
||||
if [ "${BUILD_REASON}" = "Schedule" ]; then
|
||||
COMPLETE=yes
|
||||
|
||||
if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
|
||||
COVERAGE=yes
|
||||
fi
|
||||
fi
|
||||
|
||||
"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
|
||||
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env python
|
||||
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
start = time.time()
|
||||
|
||||
sys.stdin.reconfigure(errors='surrogateescape')
|
||||
sys.stdout.reconfigure(errors='surrogateescape')
|
||||
|
||||
for line in sys.stdin:
|
||||
seconds = time.time() - start
|
||||
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,39 @@
|
||||
# This template adds a job for processing code coverage data.
|
||||
# It will upload results to Azure Pipelines and codecov.io.
|
||||
# Use it from a job stage that completes after all other jobs have completed.
|
||||
# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
|
||||
|
||||
jobs:
|
||||
- job: Coverage
|
||||
displayName: Code Coverage
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- task: DownloadPipelineArtifact@2
|
||||
displayName: Download Coverage Data
|
||||
inputs:
|
||||
path: coverage/
|
||||
patterns: "Coverage */*=coverage.combined"
|
||||
- bash: .azure-pipelines/scripts/combine-coverage.py coverage/
|
||||
displayName: Combine Coverage Data
|
||||
- bash: .azure-pipelines/scripts/report-coverage.sh
|
||||
displayName: Generate Coverage Report
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- task: PublishCodeCoverageResults@1
|
||||
inputs:
|
||||
codeCoverageTool: Cobertura
|
||||
# Azure Pipelines only accepts a single coverage data file.
|
||||
# That means only Python or PowerShell coverage can be uploaded, but not both.
|
||||
# Set the "pipelinesCoverage" variable to determine which type is uploaded.
|
||||
# Use "coverage" for Python and "coverage-powershell" for PowerShell.
|
||||
summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
|
||||
displayName: Publish to Azure Pipelines
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
|
||||
displayName: Publish to codecov.io
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
continueOnError: true
|
||||
@@ -0,0 +1,55 @@
|
||||
# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
|
||||
# If this matrix template does not provide the required functionality, consider using the test template directly instead.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test target.
|
||||
# Each item in the list must contain a "test" or "name" key.
|
||||
# Both may be provided. If one is omitted, the other will be used.
|
||||
- name: targets
|
||||
type: object
|
||||
|
||||
# An optional list of values which will be used to multiply the targets list into a matrix.
|
||||
# Values can be strings or numbers.
|
||||
- name: groups
|
||||
type: object
|
||||
default: []
|
||||
|
||||
# An optional format string used to generate the job name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: nameFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to generate the test name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: testFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to add the group to the job name.
|
||||
# {0} is the formatted name of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: nameGroupFormat
|
||||
type: string
|
||||
default: "{0} - {{1}}"
|
||||
|
||||
# An optional format string used to add the group to the test name.
|
||||
# {0} is the formatted test of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: testGroupFormat
|
||||
type: string
|
||||
default: "{0}/{{1}}"
|
||||
|
||||
jobs:
|
||||
- template: test.yml
|
||||
parameters:
|
||||
jobs:
|
||||
- ${{ if eq(length(parameters.groups), 0) }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
|
||||
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
|
||||
- ${{ if not(eq(length(parameters.groups), 0)) }}:
|
||||
- ${{ each group in parameters.groups }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
|
||||
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
|
||||
@@ -0,0 +1,45 @@
|
||||
# This template uses the provided list of jobs to create test one or more test jobs.
|
||||
# It can be used directly if needed, or through the matrix template.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test job.
|
||||
# Each item in the list must contain a "job" and "name" key.
|
||||
- name: jobs
|
||||
type: object
|
||||
|
||||
jobs:
|
||||
- ${{ each job in parameters.jobs }}:
|
||||
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
|
||||
displayName: ${{ job.name }}
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
|
||||
displayName: Run Tests
|
||||
- bash: .azure-pipelines/scripts/process-results.sh
|
||||
condition: succeededOrFailed()
|
||||
displayName: Process Results
|
||||
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Aggregate Coverage Data
|
||||
- task: PublishTestResults@2
|
||||
condition: eq(variables.haveTestResults, 'true')
|
||||
inputs:
|
||||
testResultsFiles: "$(outputPath)/junit/*.xml"
|
||||
displayName: Publish Test Results
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveBotResults, 'true')
|
||||
displayName: Publish Bot Results
|
||||
inputs:
|
||||
targetPath: "$(outputPath)/bot/"
|
||||
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Publish Coverage Data
|
||||
inputs:
|
||||
targetPath: "$(Agent.TempDirectory)/coverage/"
|
||||
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
5
collections/ansible_collections/community/docker/.github/patchback.yml
vendored
Normal file
5
collections/ansible_collections/community/docker/.github/patchback.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
backport_branch_prefix: patchback/backports/
|
||||
backport_label_prefix: backport-
|
||||
target_branch_prefix: stable-
|
||||
...
|
||||
51
collections/ansible_collections/community/docker/.github/workflows/docs-pr.yml
vendored
Normal file
51
collections/ansible_collections/community/docker/.github/workflows/docs-pr.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Collection Docs
|
||||
concurrency:
|
||||
group: docs-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened, closed]
|
||||
|
||||
jobs:
|
||||
build-docs:
|
||||
permissions:
|
||||
contents: read
|
||||
name: Build Ansible Docs
|
||||
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-pr.yml@main
|
||||
|
||||
comment:
|
||||
permissions:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-docs
|
||||
name: PR comments
|
||||
steps:
|
||||
- name: PR comment
|
||||
uses: ansible-community/github-docs-build/actions/ansible-docs-build-comment@main
|
||||
with:
|
||||
body-includes: '## Docs Build'
|
||||
reactions: heart
|
||||
action: ${{ needs.build-docs.outputs.changed != 'true' && 'remove' || '' }}
|
||||
on-closed-body: |
|
||||
## Docs Build 📝
|
||||
|
||||
This PR is closed and any previously published docsite has been unpublished.
|
||||
on-merged-body: |
|
||||
## Docs Build 📝
|
||||
|
||||
Thank you for contribution!✨
|
||||
|
||||
This PR has been merged and your docs changes will be incorporated when they are next published.
|
||||
body: |
|
||||
## Docs Build 📝
|
||||
|
||||
Thank you for contribution!✨
|
||||
|
||||
The docsite for **this PR** is available for download as an artifact from this run:
|
||||
${{ needs.build-docs.outputs.artifact-url }}
|
||||
|
||||
File changes:
|
||||
|
||||
${{ needs.build-docs.outputs.diff-files-rendered }}
|
||||
|
||||
${{ needs.build-docs.outputs.diff-rendered }}
|
||||
114
collections/ansible_collections/community/docker/.github/workflows/ee.yml
vendored
Normal file
114
collections/ansible_collections/community/docker/.github/workflows/ee.yml
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
name: execution environment
|
||||
on:
|
||||
# Run CI against all pushes (direct commits, also merged PRs), Pull Requests
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
pull_request:
|
||||
# Run CI once per day (at 04:30 UTC)
|
||||
# This ensures that even if there haven't been commits that we are still testing against latest version of ansible-builder
|
||||
schedule:
|
||||
- cron: '30 4 * * *'
|
||||
|
||||
env:
|
||||
NAMESPACE: community
|
||||
COLLECTION_NAME: docker
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and test EE (Ⓐ${{ matrix.runner_tag }})
|
||||
strategy:
|
||||
matrix:
|
||||
runner_tag:
|
||||
- devel
|
||||
- stable-2.12-latest
|
||||
- stable-2.11-latest
|
||||
- stable-2.9-latest
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install ansible-builder and ansible-navigator
|
||||
run: pip install ansible-builder ansible-navigator
|
||||
|
||||
- name: Verify requirements
|
||||
run: ansible-builder introspect --sanitize .
|
||||
|
||||
- name: Make sure galaxy.yml has version entry
|
||||
run: >-
|
||||
python -c
|
||||
'import yaml ;
|
||||
f = open("galaxy.yml", "rb") ;
|
||||
data = yaml.safe_load(f) ;
|
||||
f.close() ;
|
||||
data["version"] = data.get("version") or "0.0.1" ;
|
||||
f = open("galaxy.yml", "wb") ;
|
||||
f.write(yaml.dump(data).encode("utf-8")) ;
|
||||
f.close() ;
|
||||
'
|
||||
working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
|
||||
|
||||
- name: Build collection
|
||||
run: |
|
||||
ansible-galaxy collection build --output-path ../../../
|
||||
working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
|
||||
|
||||
- name: Create files for building execution environment
|
||||
run: |
|
||||
COLLECTION_FILENAME="$(ls "${{ env.NAMESPACE }}-${{ env.COLLECTION_NAME }}"-*.tar.gz)"
|
||||
|
||||
# EE config
|
||||
cat > execution-environment.yml <<EOF
|
||||
---
|
||||
version: 1
|
||||
build_arg_defaults:
|
||||
EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:${{ matrix.runner_tag }}'
|
||||
dependencies:
|
||||
galaxy: requirements.yml
|
||||
EOF
|
||||
echo "::group::execution-environment.yml"
|
||||
cat execution-environment.yml
|
||||
echo "::endgroup::"
|
||||
|
||||
# Requirements
|
||||
cat > requirements.yml <<EOF
|
||||
---
|
||||
collections:
|
||||
- name: ${COLLECTION_FILENAME}
|
||||
type: file
|
||||
EOF
|
||||
echo "::group::requirements.yml"
|
||||
cat requirements.yml
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Build image based on ${{ matrix.runner_tag }}
|
||||
run: |
|
||||
mkdir -p context/_build/
|
||||
cp "${{ env.NAMESPACE }}-${{ env.COLLECTION_NAME }}"-*.tar.gz context/_build/
|
||||
ansible-builder build -v 3 -t test-ee:latest --container-runtime=docker
|
||||
|
||||
- name: Make /var/run/docker.sock accessible by everyone
|
||||
run: sudo chmod a+rw /var/run/docker.sock
|
||||
|
||||
- name: Run basic tests
|
||||
run: >
|
||||
ansible-navigator run
|
||||
--mode stdout
|
||||
--pull-policy never
|
||||
--set-environment-variable ANSIBLE_PRIVATE_ROLE_VARS=true
|
||||
--container-engine docker
|
||||
--container-options=-v --container-options=/var/run/docker.sock:/var/run/docker.sock
|
||||
--execution-environment-image test-ee:latest
|
||||
-v
|
||||
all.yml
|
||||
working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}/tests/ee
|
||||
609
collections/ansible_collections/community/docker/CHANGELOG.rst
Normal file
609
collections/ansible_collections/community/docker/CHANGELOG.rst
Normal file
@@ -0,0 +1,609 @@
|
||||
=========================================
|
||||
Docker Community Collection Release Notes
|
||||
=========================================
|
||||
|
||||
.. contents:: Topics
|
||||
|
||||
|
||||
v2.6.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_container - added ``image_label_mismatch`` parameter (https://github.com/ansible-collections/community.docker/issues/314, https://github.com/ansible-collections/community.docker/pull/370).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- Support for Ansible 2.9 and ansible-base 2.10 is deprecated, and will be removed in the next major release (community.docker 3.0.0). Some modules might still work with these versions afterwards, but we will no longer keep compatibility code that was needed to support them (https://github.com/ansible-collections/community.docker/pull/361).
|
||||
- The dependency on docker-compose for Execution Environments is deprecated and will be removed in community.docker 3.0.0. The `Python docker-compose library <https://pypi.org/project/docker-compose/>`__ is unmaintained and can cause dependency issues. You can manually still install it in an Execution Environment when needed (https://github.com/ansible-collections/community.docker/pull/373).
|
||||
- Various modules - the default of ``tls_hostname`` that was supposed to be removed in community.docker 2.0.0 will now be removed in version 3.0.0 (https://github.com/ansible-collections/community.docker/pull/362).
|
||||
- docker_stack - the return values ``out`` and ``err`` that were supposed to be removed in community.docker 2.0.0 will now be removed in version 3.0.0 (https://github.com/ansible-collections/community.docker/pull/362).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_container - fail with a meaningful message instead of crashing if a port is specified with more than three colon-separated parts (https://github.com/ansible-collections/community.docker/pull/367, https://github.com/ansible-collections/community.docker/issues/365).
|
||||
- docker_container - remove unused code that will cause problems with Python 3.13 (https://github.com/ansible-collections/community.docker/pull/354).
|
||||
|
||||
v2.5.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Maintenance release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Include ``PSF-license.txt`` file for ``plugins/module_utils/_version.py``.
|
||||
|
||||
v2.5.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_config - add support for ``template_driver`` with one option ``golang`` (https://github.com/ansible-collections/community.docker/issues/332, https://github.com/ansible-collections/community.docker/pull/345).
|
||||
- docker_swarm - adds ``data_path_addr`` parameter during swarm initialization or when joining (https://github.com/ansible-collections/community.docker/issues/339).
|
||||
|
||||
v2.4.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Prepare collection for inclusion in an Execution Environment by declaring its dependencies. The ``docker_stack*`` modules are not supported (https://github.com/ansible-collections/community.docker/pull/336).
|
||||
- current_container_facts - add detection for GitHub Actions (https://github.com/ansible-collections/community.docker/pull/336).
|
||||
- docker_container - support returning Docker container log output when using Docker's ``local`` logging driver, an optimized local logging driver introduced in Docker 18.09 (https://github.com/ansible-collections/community.docker/pull/337).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker connection plugin - make sure that ``docker_extra_args`` is used for querying the Docker version. Also ensures that the Docker version is only queried when needed. This is currently the case if a remote user is specified (https://github.com/ansible-collections/community.docker/issues/325, https://github.com/ansible-collections/community.docker/pull/327).
|
||||
|
||||
v2.3.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker connection plugin - implement connection reset by clearing internal container user cache (https://github.com/ansible-collections/community.docker/pull/312).
|
||||
- docker connection plugin - simplify ``actual_user`` handling code (https://github.com/ansible-collections/community.docker/pull/311).
|
||||
- docker connection plugin - the plugin supports new ways to define the timeout. These are the ``ANSIBLE_DOCKER_TIMEOUT`` environment variable, the ``timeout`` setting in the ``docker_connection`` section of ``ansible.cfg``, and the ``ansible_docker_timeout`` variable (https://github.com/ansible-collections/community.docker/pull/297).
|
||||
- docker_api connection plugin - implement connection reset by clearing internal container user/group ID cache (https://github.com/ansible-collections/community.docker/pull/312).
|
||||
- docker_api connection plugin - the plugin supports new ways to define the timeout. These are the ``ANSIBLE_DOCKER_TIMEOUT`` environment variable, the ``timeout`` setting in the ``docker_connection`` section of ``ansible.cfg``, and the ``ansible_docker_timeout`` variable (https://github.com/ansible-collections/community.docker/pull/308).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker connection plugin - fix option handling to be compatible with ansible-core 2.13 (https://github.com/ansible-collections/community.docker/pull/297, https://github.com/ansible-collections/community.docker/issues/307).
|
||||
- docker_api connection plugin - fix option handling to be compatible with ansible-core 2.13 (https://github.com/ansible-collections/community.docker/pull/308).
|
||||
|
||||
v2.2.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_compose - fix Python 3 type error when extracting warnings or errors from docker-compose's output (https://github.com/ansible-collections/community.docker/pull/305).
|
||||
|
||||
v2.2.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_config - add support for rolling update, set ``rolling_versions`` to ``true`` to enable (https://github.com/ansible-collections/community.docker/pull/295, https://github.com/ansible-collections/community.docker/issues/109).
|
||||
- docker_secret - add support for rolling update, set ``rolling_versions`` to ``true`` to enable (https://github.com/ansible-collections/community.docker/pull/293, https://github.com/ansible-collections/community.docker/issues/21).
|
||||
- docker_swarm_service - add support for setting capabilities with the ``cap_add`` and ``cap_drop`` parameters. Usage is the same as with the ``capabilities`` and ``cap_drop`` parameters for ``docker_container`` (https://github.com/ansible-collections/community.docker/pull/294).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_container, docker_image - adjust image finding code to pecularities of ``podman-docker``'s API emulation when Docker short names like ``redis`` are used (https://github.com/ansible-collections/community.docker/issues/292).
|
||||
|
||||
v2.1.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Emergency release to amend breaking change in previous release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix unintended breaking change caused by `an earlier fix <https://github.com/ansible-collections/community.docker/pull/258>`_ by vendoring the deprecated Python standard library ``distutils.version`` until this collection stops supporting Ansible 2.9 and ansible-base 2.10 (https://github.com/ansible-collections/community.docker/issues/267, https://github.com/ansible-collections/community.docker/pull/269).
|
||||
|
||||
v2.1.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_container_exec - add ``detach`` parameter (https://github.com/ansible-collections/community.docker/issues/250, https://github.com/ansible-collections/community.docker/pull/255).
|
||||
- docker_container_exec - add ``env`` option (https://github.com/ansible-collections/community.docker/issues/248, https://github.com/ansible-collections/community.docker/pull/254).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Various modules and plugins - use vendored version of ``distutils.version`` included in ansible-core 2.12 if available. This avoids breakage when ``distutils`` is removed from the standard library of Python 3.12. Note that ansible-core 2.11, ansible-base 2.10 and Ansible 2.9 are right now not compatible with Python 3.12, hence this fix does not target these ansible-core/-base/2.9 versions (https://github.com/ansible-collections/community.docker/pull/258).
|
||||
- docker connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``docker`` executable (https://github.com/ansible-collections/community.docker/pull/257).
|
||||
- docker_container_exec - disallow using the ``chdir`` option for Docker API before 1.35 (https://github.com/ansible-collections/community.docker/pull/253).
|
||||
|
||||
v2.0.2
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_api connection plugin - avoid passing an unnecessary argument to a Docker SDK for Python call that is only supported by version 3.0.0 or later (https://github.com/ansible-collections/community.docker/pull/243).
|
||||
- docker_container_exec - ``chdir`` is only supported since Docker SDK for Python 3.0.0. Make sure that this option can only use when 3.0.0 or later is installed, and prevent passing this parameter on when ``chdir`` is not provided to this module (https://github.com/ansible-collections/community.docker/pull/243, https://github.com/ansible-collections/community.docker/issues/242).
|
||||
- nsenter connection plugin - ensure the ``nsenter_pid`` option is retrieved in ``_connect`` instead of ``__init__`` to prevent a crasher due to bad initialization order (https://github.com/ansible-collections/community.docker/pull/249).
|
||||
- nsenter connection plugin - replace the use of ``--all-namespaces`` with specific namespaces to support compatibility with Busybox nsenter (used on, for example, Alpine containers) (https://github.com/ansible-collections/community.docker/pull/249).
|
||||
|
||||
v2.0.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Maintenance release with some documentation fixes.
|
||||
|
||||
v2.0.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
New major release with some deprecations removed and a breaking change in the ``docker_compose`` module regarding the ``timeout`` parameter.
|
||||
|
||||
Breaking Changes / Porting Guide
|
||||
--------------------------------
|
||||
|
||||
- docker_compose - fixed ``timeout`` defaulting behavior so that ``stop_grace_period``, if defined in the compose file, will be used if `timeout`` is not specified (https://github.com/ansible-collections/community.docker/pull/163).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- docker_container - using the special value ``all`` in ``published_ports`` has been deprecated. Use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/210).
|
||||
|
||||
Removed Features (previously deprecated)
|
||||
----------------------------------------
|
||||
|
||||
- docker_container - the default value of ``container_default_behavior`` changed to ``no_defaults`` (https://github.com/ansible-collections/community.docker/pull/210).
|
||||
- docker_container - the default value of ``network_mode`` is now the name of the first network specified in ``networks`` if such are specified and ``networks_cli_compatible=true`` (https://github.com/ansible-collections/community.docker/pull/210).
|
||||
- docker_container - the special value ``all`` can no longer be used in ``published_ports`` next to other values. Please use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/210).
|
||||
- docker_login - removed the ``email`` option (https://github.com/ansible-collections/community.docker/pull/210).
|
||||
|
||||
v1.10.0
|
||||
=======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Add the modules docker_container_exec, docker_image_load and docker_plugin to the ``docker`` module defaults group (https://github.com/ansible-collections/community.docker/pull/209).
|
||||
- docker_config - add option ``data_src`` to read configuration data from target (https://github.com/ansible-collections/community.docker/issues/64, https://github.com/ansible-collections/community.docker/pull/203).
|
||||
- docker_secret - add option ``data_src`` to read secret data from target (https://github.com/ansible-collections/community.docker/issues/64, https://github.com/ansible-collections/community.docker/pull/203).
|
||||
|
||||
v1.9.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_compose - fixed incorrect ``changed`` status for services with ``profiles`` defined, but none enabled (https://github.com/ansible-collections/community.docker/pull/192).
|
||||
|
||||
v1.9.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
New bugfixes and features release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_* modules - include ``ImportError`` traceback when reporting that Docker SDK for Python could not be found (https://github.com/ansible-collections/community.docker/pull/188).
|
||||
- docker_compose - added ``env_file`` option for specifying custom environment files (https://github.com/ansible-collections/community.docker/pull/174).
|
||||
- docker_container - added ``publish_all_ports`` option to publish all exposed ports to random ports except those explicitly bound with ``published_ports`` (this was already added in community.docker 1.8.0) (https://github.com/ansible-collections/community.docker/pull/162).
|
||||
- docker_container - added new ``command_handling`` option with current deprecated default value ``compatibility`` which allows to control how the module handles shell quoting when interpreting lists, and how the module handles empty lists/strings. The default will switch to ``correct`` in community.docker 3.0.0 (https://github.com/ansible-collections/community.docker/pull/186).
|
||||
- docker_container - lifted restriction preventing the creation of anonymous volumes with the ``mounts`` option (https://github.com/ansible-collections/community.docker/pull/181).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- docker_container - the new ``command_handling``'s default value, ``compatibility``, is deprecated and will change to ``correct`` in community.docker 3.0.0. A deprecation warning is emitted by the module in cases where the behavior will change. Please note that ansible-core will output a deprecation warning only once, so if it is shown for an earlier task, there could be more tasks with this warning where it is not shown (https://github.com/ansible-collections/community.docker/pull/186).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_compose - fixes task failures when bringing up services while using ``docker-compose <1.17.0`` (https://github.com/ansible-collections/community.docker/issues/180).
|
||||
- docker_container - make sure to also return ``container`` on ``detached=false`` when status code is non-zero (https://github.com/ansible-collections/community.docker/pull/178).
|
||||
- docker_stack_info - make sure that module isn't skipped in check mode (https://github.com/ansible-collections/community.docker/pull/183).
|
||||
- docker_stack_task_info - make sure that module isn't skipped in check mode (https://github.com/ansible-collections/community.docker/pull/183).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Connection
|
||||
~~~~~~~~~~
|
||||
|
||||
- nsenter - execute on host running controller container
|
||||
|
||||
v1.8.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9 (https://github.com/ansible-collections/community.docker/pull/164).
|
||||
- docker_compose - added ``profiles`` option to specify service profiles when starting services (https://github.com/ansible-collections/community.docker/pull/167).
|
||||
- docker_containers inventory plugin - when ``connection_type=docker-api``, now pass Docker daemon connection options from inventory plugin to connection plugin. This can be disabled by setting ``configure_docker_daemon=false`` (https://github.com/ansible-collections/community.docker/pull/157).
|
||||
- docker_host_info - allow values for keys in ``containers_filters``, ``images_filters``, ``networks_filters``, and ``volumes_filters`` to be passed as YAML lists (https://github.com/ansible-collections/community.docker/pull/160).
|
||||
- docker_plugin - added ``alias`` option to specify local names for docker plugins (https://github.com/ansible-collections/community.docker/pull/161).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_compose - fix idempotence bug when using ``stopped: true`` (https://github.com/ansible-collections/community.docker/issues/142, https://github.com/ansible-collections/community.docker/pull/159).
|
||||
|
||||
v1.7.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Small feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_image - allow to tag images by ID (https://github.com/ansible-collections/community.docker/pull/149).
|
||||
|
||||
v1.6.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release to reduce deprecation warning spam.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_* modules and plugins, except ``docker_swarm`` connection plugin and ``docker_compose`` and ``docker_stack*` modules - only emit ``tls_hostname`` deprecation message if TLS is actually used (https://github.com/ansible-collections/community.docker/pull/143).
|
||||
|
||||
v1.6.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular bugfix and feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- common module utils - correct error messages for guiding to install proper Docker SDK for Python module (https://github.com/ansible-collections/community.docker/pull/125).
|
||||
- docker_container - allow ``memory_swap: -1`` to set memory swap limit to unlimited. This is useful when the user cannot set memory swap limits due to cgroup limitations or other reasons, as by default Docker will try to set swap usage to two times the value of ``memory`` (https://github.com/ansible-collections/community.docker/pull/138).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- docker_* modules and plugins, except ``docker_swarm`` connection plugin and ``docker_compose`` and ``docker_stack*` modules - the current default ``localhost`` for ``tls_hostname`` is deprecated. In community.docker 2.0.0 it will be computed from ``docker_host`` instead (https://github.com/ansible-collections/community.docker/pull/134).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker-compose - fix not pulling when ``state: present`` and ``stopped: true`` (https://github.com/ansible-collections/community.docker/issues/12, https://github.com/ansible-collections/community.docker/pull/119).
|
||||
- docker_plugin - also configure plugin after installing (https://github.com/ansible-collections/community.docker/issues/118, https://github.com/ansible-collections/community.docker/pull/135).
|
||||
- docker_swarm_services - avoid crash during idempotence check if ``published_port`` is not specified (https://github.com/ansible-collections/community.docker/issues/107, https://github.com/ansible-collections/community.docker/pull/136).
|
||||
|
||||
v1.5.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Add the ``use_ssh_client`` option to most docker modules and plugins (https://github.com/ansible-collections/community.docker/issues/108, https://github.com/ansible-collections/community.docker/pull/114).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- all modules - use ``to_native`` to convert exceptions to strings (https://github.com/ansible-collections/community.docker/pull/121).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- docker_container_exec - Execute command in a docker container
|
||||
|
||||
v1.4.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Security release to address another potential secret leak. Also includes regular bugfixes and features.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_swarm_service - change ``publish.published_port`` option from mandatory to optional. Docker will assign random high port if not specified (https://github.com/ansible-collections/community.docker/issues/99).
|
||||
|
||||
Breaking Changes / Porting Guide
|
||||
--------------------------------
|
||||
|
||||
- docker_swarm - if ``join_token`` is specified, a returned join token with the same value will be replaced by ``VALUE_SPECIFIED_IN_NO_LOG_PARAMETER``. Make sure that you do not blindly use the join tokens from the return value of this module when the module is invoked with ``join_token`` specified! This breaking change appears in a minor release since it is necessary to fix a security issue (https://github.com/ansible-collections/community.docker/pull/103).
|
||||
|
||||
Security Fixes
|
||||
--------------
|
||||
|
||||
- docker_swarm - the ``join_token`` option is now marked as ``no_log`` so it is no longer written into logs (https://github.com/ansible-collections/community.docker/pull/103).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- ``docker_swarm_service`` - fix KeyError on caused by reference to deprecated option ``update_failure_action`` (https://github.com/ansible-collections/community.docker/pull/100).
|
||||
- docker_swarm_service - mark ``secrets`` module option with ``no_log=False`` since it does not leak secrets (https://github.com/ansible-collections/community.general/pull/2001).
|
||||
|
||||
v1.3.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Regular feature and bugfix release.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_container - add ``storage_opts`` option to specify storage options (https://github.com/ansible-collections/community.docker/issues/91, https://github.com/ansible-collections/community.docker/pull/93).
|
||||
- docker_image - allows to specify platform to pull for ``source=pull`` with new option ``pull_platform`` (https://github.com/ansible-collections/community.docker/issues/79, https://github.com/ansible-collections/community.docker/pull/89).
|
||||
- docker_image - properly support image IDs (hashes) for loading and tagging images (https://github.com/ansible-collections/community.docker/issues/86, https://github.com/ansible-collections/community.docker/pull/87).
|
||||
- docker_swarm_service - adding support for maximum number of tasks per node (``replicas_max_per_node``) when running swarm service in replicated mode. Introduced in API 1.40 (https://github.com/ansible-collections/community.docker/issues/7, https://github.com/ansible-collections/community.docker/pull/92).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_container - fix healthcheck disabling idempotency issue with strict comparison (https://github.com/ansible-collections/community.docker/issues/85).
|
||||
- docker_image - prevent module failure when removing image that is removed between inspection and removal (https://github.com/ansible-collections/community.docker/pull/87).
|
||||
- docker_image - prevent module failure when removing non-existant image by ID (https://github.com/ansible-collections/community.docker/pull/87).
|
||||
- docker_image_info - prevent module failure when image vanishes between listing and inspection (https://github.com/ansible-collections/community.docker/pull/87).
|
||||
- docker_image_info - prevent module failure when querying non-existant image by ID (https://github.com/ansible-collections/community.docker/pull/87).
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- docker_image_load - Load docker image(s) from archives
|
||||
- docker_plugin - Manage Docker plugins
|
||||
|
||||
v1.2.2
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Security bugfix release to address CVE-2021-20191.
|
||||
|
||||
Security Fixes
|
||||
--------------
|
||||
|
||||
- docker_swarm - enabled ``no_log`` for the option ``signing_ca_key`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.docker/pull/80).
|
||||
|
||||
v1.2.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Bugfix release.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker connection plugin - fix Docker version parsing, as some docker versions have a leading ``v`` in the output of the command ``docker version --format "{{.Server.Version}}"`` (https://github.com/ansible-collections/community.docker/pull/76).
|
||||
|
||||
v1.2.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Feature release with one new feature and two bugfixes.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_container - added ``default_host_ip`` option which allows to explicitly set the default IP string for published ports without explicitly specified IPs. When using IPv6 binds with Docker 20.10.2 or newer, this needs to be set to an empty string (``""``) (https://github.com/ansible-collections/community.docker/issues/70, https://github.com/ansible-collections/community.docker/pull/71).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_container - allow IPv6 zones (RFC 4007) in bind IPs (https://github.com/ansible-collections/community.docker/pull/66).
|
||||
- docker_image - fix crash on loading images with versions of Docker SDK for Python before 2.5.0 (https://github.com/ansible-collections/community.docker/issues/72, https://github.com/ansible-collections/community.docker/pull/73).
|
||||
|
||||
v1.1.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Feature release with three new plugins and modules.
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_container - support specifying ``cgroup_parent`` (https://github.com/ansible-collections/community.docker/issues/6, https://github.com/ansible-collections/community.docker/pull/59).
|
||||
- docker_container - when a container is started with ``detached=false``, ``status`` is now also returned when it is 0 (https://github.com/ansible-collections/community.docker/issues/26, https://github.com/ansible-collections/community.docker/pull/58).
|
||||
- docker_image - support ``platform`` when building images (https://github.com/ansible-collections/community.docker/issues/22, https://github.com/ansible-collections/community.docker/pull/54).
|
||||
|
||||
Deprecated Features
|
||||
-------------------
|
||||
|
||||
- docker_container - currently ``published_ports`` can contain port mappings next to the special value ``all``, in which case the port mappings are ignored. This behavior is deprecated for community.docker 2.0.0, at which point it will either be forbidden, or this behavior will be properly implemented similar to how the Docker CLI tool handles this (https://github.com/ansible-collections/community.docker/issues/8, https://github.com/ansible-collections/community.docker/pull/60).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_image - if ``push=true`` is used with ``repository``, and the image does not need to be tagged, still push. This can happen if ``repository`` and ``name`` are equal (https://github.com/ansible-collections/community.docker/issues/52, https://github.com/ansible-collections/community.docker/pull/53).
|
||||
- docker_image - report error when loading a broken archive that contains no image (https://github.com/ansible-collections/community.docker/issues/46, https://github.com/ansible-collections/community.docker/pull/55).
|
||||
- docker_image - report error when the loaded archive does not contain the specified image (https://github.com/ansible-collections/community.docker/issues/41, https://github.com/ansible-collections/community.docker/pull/55).
|
||||
|
||||
New Plugins
|
||||
-----------
|
||||
|
||||
Connection
|
||||
~~~~~~~~~~
|
||||
|
||||
- docker_api - Run tasks in docker containers
|
||||
|
||||
Inventory
|
||||
~~~~~~~~~
|
||||
|
||||
- docker_containers - Ansible dynamic inventory plugin for Docker containers.
|
||||
|
||||
New Modules
|
||||
-----------
|
||||
|
||||
- current_container_facts - Return facts about whether the module runs in a Docker container
|
||||
|
||||
v1.0.1
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
Maintenance release with a bugfix for ``docker_container``.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_container - the validation for ``capabilities`` in ``device_requests`` was incorrect (https://github.com/ansible-collections/community.docker/issues/42, https://github.com/ansible-collections/community.docker/pull/43).
|
||||
|
||||
v1.0.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
This is the first production (non-prerelease) release of ``community.docker``.
|
||||
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- Add collection-side support of the ``docker`` action group / module defaults group (https://github.com/ansible-collections/community.docker/pull/17).
|
||||
- docker_image - return docker build output (https://github.com/ansible-collections/community.general/pull/805).
|
||||
- docker_secret - add a warning when the secret does not have an ``ansible_key`` label but the ``force`` parameter is not set (https://github.com/ansible-collections/community.docker/issues/30, https://github.com/ansible-collections/community.docker/pull/31).
|
||||
|
||||
v0.1.0
|
||||
======
|
||||
|
||||
Release Summary
|
||||
---------------
|
||||
|
||||
The ``community.docker`` continues the work on the Ansible docker modules and plugins from their state in ``community.general`` 1.2.0. The changes listed here are thus relative to the modules and plugins ``community.general.docker*``.
|
||||
|
||||
All deprecation removals planned for ``community.general`` 2.0.0 have been applied. All deprecation removals scheduled for ``community.general`` 3.0.0 have been re-scheduled for ``community.docker`` 2.0.0.
|
||||
|
||||
|
||||
Minor Changes
|
||||
-------------
|
||||
|
||||
- docker_container - now supports the ``device_requests`` option, which allows to request additional resources such as GPUs (https://github.com/ansible/ansible/issues/65748, https://github.com/ansible-collections/community.general/pull/1119).
|
||||
|
||||
Removed Features (previously deprecated)
|
||||
----------------------------------------
|
||||
|
||||
- docker_container - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_container - the default of ``networks_cli_compatible`` changed to ``true`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_container - the unused option ``trust_image_content`` has been removed (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - ``state=build`` has been removed. Use ``present`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the ``container_limits``, ``dockerfile``, ``http_timeout``, ``nocache``, ``rm``, ``path``, ``buildargs``, ``pull`` have been removed. Use the corresponding suboptions of ``build`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the ``force`` option has been removed. Use the more specific ``force_*`` options instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the ``source`` option is now mandatory (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the ``use_tls`` option has been removed. Use ``tls`` and ``validate_certs`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the default of the ``build.pull`` option changed to ``false`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image_facts - this alias is on longer availabe, use ``docker_image_info`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_network - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_network - the ``ipam_options`` option has been removed. Use ``ipam_config`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_service - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm - ``state=inspect`` has been removed. Use ``docker_swarm_info`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``constraints`` option has been removed. Use ``placement.constraints`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``limit_cpu`` and ``limit_memory`` options has been removed. Use the corresponding suboptions in ``limits`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``log_driver`` and ``log_driver_options`` options has been removed. Use the corresponding suboptions in ``logging`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``reserve_cpu`` and ``reserve_memory`` options has been removed. Use the corresponding suboptions in ``reservations`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``restart_policy``, ``restart_policy_attempts``, ``restart_policy_delay`` and ``restart_policy_window`` options has been removed. Use the corresponding suboptions in ``restart_config`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``update_delay``, ``update_parallelism``, ``update_failure_action``, ``update_monitor``, ``update_max_failure_ratio`` and ``update_order`` options has been removed. Use the corresponding suboptions in ``update_config`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_volume - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_volume - the ``force`` option has been removed. Use ``recreate`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- docker_login - fix internal config file storage to handle credentials for more than one registry (https://github.com/ansible-collections/community.general/issues/1117).
|
||||
674
collections/ansible_collections/community/docker/COPYING
Normal file
674
collections/ansible_collections/community/docker/COPYING
Normal file
@@ -0,0 +1,674 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
4205
collections/ansible_collections/community/docker/FILES.json
Normal file
4205
collections/ansible_collections/community/docker/FILES.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"collection_info": {
|
||||
"namespace": "community",
|
||||
"name": "docker",
|
||||
"version": "2.6.0",
|
||||
"authors": [
|
||||
"Ansible Docker Working Group"
|
||||
],
|
||||
"readme": "README.md",
|
||||
"tags": [
|
||||
"docker"
|
||||
],
|
||||
"description": "Modules and plugins for working with Docker",
|
||||
"license": [],
|
||||
"license_file": "COPYING",
|
||||
"dependencies": {},
|
||||
"repository": "https://github.com/ansible-collections/community.docker",
|
||||
"documentation": null,
|
||||
"homepage": "https://github.com/ansible-collections/community.docker",
|
||||
"issues": "https://github.com/ansible-collections/community.docker/issues"
|
||||
},
|
||||
"file_manifest_file": {
|
||||
"name": "FILES.json",
|
||||
"ftype": "file",
|
||||
"chksum_type": "sha256",
|
||||
"chksum_sha256": "9a706eb8776f7e40d4a5f7f4ab402a6eeb1c561d1f932304fa8a252ff5ceb897",
|
||||
"format": 1
|
||||
},
|
||||
"format": 1
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
--------------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
otherwise using this software ("Python") in source or binary form and
|
||||
its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
distribute, and otherwise use Python alone or in any derivative version,
|
||||
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;
|
||||
All Rights Reserved" are retained in Python alone or in any derivative version
|
||||
prepared by Licensee.
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python.
|
||||
|
||||
4. PSF is making Python available to Licensee on an "AS IS"
|
||||
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. Nothing in this License Agreement shall be deemed to create any
|
||||
relationship of agency, partnership, or joint venture between PSF and
|
||||
Licensee. This License Agreement does not grant permission to use PSF
|
||||
trademarks or trade name in a trademark sense to endorse or promote
|
||||
products or services of Licensee, or any third party.
|
||||
|
||||
8. By copying, installing or otherwise using Python, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
101
collections/ansible_collections/community/docker/README.md
Normal file
101
collections/ansible_collections/community/docker/README.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Docker Community Collection
|
||||
|
||||
[](https://docs.ansible.com/ansible/latest/collections/community/docker/)
|
||||
[](https://dev.azure.com/ansible/community.docker/_build?definitionId=25)
|
||||
[](https://codecov.io/gh/ansible-collections/community.docker)
|
||||
|
||||
This repo contains the `community.docker` Ansible Collection. The collection includes many modules and plugins to work with Docker.
|
||||
|
||||
Please note that this collection does **not** support Windows targets. The connection plugins included in this collection support Windows targets on a best-effort basis, but we are not testing this in CI.
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 and ansible-core 2.13 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
Most modules and plugins require the [Docker SDK for Python](https://pypi.org/project/docker/). For Python 2.6 support, use [the deprecated docker-py library](https://pypi.org/project/docker-py/) instead.
|
||||
|
||||
Both libraries cannot be installed at the same time. If you accidentally did install them simultaneously, you have to uninstall *both* before re-installing one of them.
|
||||
|
||||
## Included content
|
||||
|
||||
* Connection plugins:
|
||||
- community.docker.docker: use Docker containers as remotes
|
||||
* Inventory plugins:
|
||||
- community.docker.docker_machine: collect Docker machines as inventory
|
||||
- community.docker.docker_swarm: collect Docker Swarm nodes as inventory
|
||||
* Modules:
|
||||
* Docker:
|
||||
- community.docker.docker_container: manage Docker containers
|
||||
- community.docker.docker_container_exec: run commands in Docker containers
|
||||
- community.docker.docker_container_info: retrieve information on Docker containers
|
||||
- community.docker.docker_host_info: retrieve information on the Docker daemon
|
||||
- community.docker.docker_image: manage Docker images
|
||||
- community.docker.docker_image_info: retrieve information on Docker images
|
||||
- community.docker.docker_image_load: load Docker images from archives
|
||||
- community.docker.docker_login: log in and out to/from registries
|
||||
- community.docker.docker_network: manage Docker networks
|
||||
- community.docker.docker_network_info: retrieve information on Docker networks
|
||||
- community.docker.docker_plugin: manage Docker plugins
|
||||
- community.docker.docker_prune: prune Docker containers, images, networks, volumes, and build data
|
||||
- community.docker.docker_volume: manage Docker volumes
|
||||
- community.docker.docker_volume_info: retrieve information on Docker volumes
|
||||
* Docker Compose:
|
||||
- community.docker.docker_compose: manage Docker Compose files
|
||||
* Docker Swarm:
|
||||
- community.docker.docker_config: manage configurations
|
||||
- community.docker.docker_node: manage Docker Swarm nodes
|
||||
- community.docker.docker_node_info: retrieve information on Docker Swarm nodes
|
||||
- community.docker.docker_secret: manage secrets
|
||||
- community.docker.docker_swarm: manage Docker Swarm
|
||||
- community.docker.docker_swarm_info: retrieve information on Docker Swarm
|
||||
- community.docker.docker_swarm_service: manage Docker Swarm services
|
||||
- community.docker.docker_swarm_service_info: retrieve information on Docker Swarm services
|
||||
* Docker Stack:
|
||||
- community.docker.docker_stack: manage Docker Stacks
|
||||
- community.docker.docker_stack_info: retrieve information on Docker Stacks
|
||||
- community.docker.docker_stack_task_info: retrieve information on tasks in Docker Stacks
|
||||
* Other:
|
||||
- current_container_facts: return facts about whether the module runs in a Docker container
|
||||
|
||||
## Using this collection
|
||||
|
||||
Before using the General community collection, you need to install the collection with the `ansible-galaxy` CLI:
|
||||
|
||||
ansible-galaxy collection install community.docker
|
||||
|
||||
You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format:
|
||||
|
||||
```yaml
|
||||
collections:
|
||||
- name: community.docker
|
||||
```
|
||||
|
||||
See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
|
||||
|
||||
## Contributing to this collection
|
||||
|
||||
If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
|
||||
|
||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||
|
||||
## Release notes
|
||||
|
||||
See the [changelog](https://github.com/ansible-collections/community.docker/tree/main/CHANGELOG.rst).
|
||||
|
||||
## More information
|
||||
|
||||
- [Ansible Collection overview](https://github.com/ansible-collections/overview)
|
||||
- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html)
|
||||
- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html)
|
||||
- [Ansible Collections Checklist](https://github.com/ansible-collections/overview/blob/master/collection_requirements.rst)
|
||||
- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
|
||||
- [The Bullhorn (the Ansible Contributor newsletter)](https://us19.campaign-archive.com/home/?u=56d874e027110e35dea0e03c1&id=d6635f5420)
|
||||
- [Changes impacting Contributors](https://github.com/ansible-collections/overview/issues/45)
|
||||
|
||||
## Licensing
|
||||
|
||||
GNU General Public License v3.0 or later.
|
||||
|
||||
See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text.
|
||||
@@ -0,0 +1,669 @@
|
||||
ancestor: null
|
||||
releases:
|
||||
0.1.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_login - fix internal config file storage to handle credentials for
|
||||
more than one registry (https://github.com/ansible-collections/community.general/issues/1117).
|
||||
minor_changes:
|
||||
- docker_container - now supports the ``device_requests`` option, which allows
|
||||
to request additional resources such as GPUs (https://github.com/ansible/ansible/issues/65748,
|
||||
https://github.com/ansible-collections/community.general/pull/1119).
|
||||
release_summary: 'The ``community.docker`` continues the work on the Ansible
|
||||
docker modules and plugins from their state in ``community.general`` 1.2.0.
|
||||
The changes listed here are thus relative to the modules and plugins ``community.general.docker*``.
|
||||
|
||||
|
||||
All deprecation removals planned for ``community.general`` 2.0.0 have been
|
||||
applied. All deprecation removals scheduled for ``community.general`` 3.0.0
|
||||
have been re-scheduled for ``community.docker`` 2.0.0.
|
||||
|
||||
'
|
||||
removed_features:
|
||||
- docker_container - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_container - the default of ``networks_cli_compatible`` changed to ``true``
|
||||
(https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_container - the unused option ``trust_image_content`` has been removed
|
||||
(https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - ``state=build`` has been removed. Use ``present`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the ``container_limits``, ``dockerfile``, ``http_timeout``,
|
||||
``nocache``, ``rm``, ``path``, ``buildargs``, ``pull`` have been removed.
|
||||
Use the corresponding suboptions of ``build`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the ``force`` option has been removed. Use the more specific
|
||||
``force_*`` options instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the ``source`` option is now mandatory (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the ``use_tls`` option has been removed. Use ``tls`` and ``validate_certs``
|
||||
instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image - the default of the ``build.pull`` option changed to ``false``
|
||||
(https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_image_facts - this alias is on longer availabe, use ``docker_image_info``
|
||||
instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_network - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_network - the ``ipam_options`` option has been removed. Use ``ipam_config``
|
||||
instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_service - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm - ``state=inspect`` has been removed. Use ``docker_swarm_info``
|
||||
instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``constraints`` option has been removed. Use ``placement.constraints``
|
||||
instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``limit_cpu`` and ``limit_memory`` options has
|
||||
been removed. Use the corresponding suboptions in ``limits`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``log_driver`` and ``log_driver_options`` options
|
||||
has been removed. Use the corresponding suboptions in ``logging`` instead
|
||||
(https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``reserve_cpu`` and ``reserve_memory`` options
|
||||
has been removed. Use the corresponding suboptions in ``reservations`` instead
|
||||
(https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``restart_policy``, ``restart_policy_attempts``,
|
||||
``restart_policy_delay`` and ``restart_policy_window`` options has been removed.
|
||||
Use the corresponding suboptions in ``restart_config`` instead (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_swarm_service - the ``update_delay``, ``update_parallelism``, ``update_failure_action``,
|
||||
``update_monitor``, ``update_max_failure_ratio`` and ``update_order`` options
|
||||
has been removed. Use the corresponding suboptions in ``update_config`` instead
|
||||
(https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_volume - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
|
||||
- docker_volume - the ``force`` option has been removed. Use ``recreate`` instead
|
||||
(https://github.com/ansible-collections/community.docker/pull/1).
|
||||
fragments:
|
||||
- 0.1.0.yml
|
||||
- c.g-1118-docker_login-config-store.yml
|
||||
- c.g-1119-docker_container-device-reqests.yml
|
||||
- c.g-2.0.0-deprecations.yml
|
||||
release_date: '2020-10-30'
|
||||
1.0.0:
|
||||
changes:
|
||||
minor_changes:
|
||||
- Add collection-side support of the ``docker`` action group / module defaults
|
||||
group (https://github.com/ansible-collections/community.docker/pull/17).
|
||||
- docker_image - return docker build output (https://github.com/ansible-collections/community.general/pull/805).
|
||||
- docker_secret - add a warning when the secret does not have an ``ansible_key``
|
||||
label but the ``force`` parameter is not set (https://github.com/ansible-collections/community.docker/issues/30,
|
||||
https://github.com/ansible-collections/community.docker/pull/31).
|
||||
release_summary: 'This is the first production (non-prerelease) release of ``community.docker``.
|
||||
|
||||
'
|
||||
fragments:
|
||||
- 1.0.0.yml
|
||||
- 17-action-group.yml
|
||||
- 31-docker-secret.yml
|
||||
- community.general-805-docker_image-build-output.yml
|
||||
release_date: '2020-11-17'
|
||||
1.0.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_container - the validation for ``capabilities`` in ``device_requests``
|
||||
was incorrect (https://github.com/ansible-collections/community.docker/issues/42,
|
||||
https://github.com/ansible-collections/community.docker/pull/43).
|
||||
release_summary: Maintenance release with a bugfix for ``docker_container``.
|
||||
fragments:
|
||||
- 1.0.1.yml
|
||||
- 43-docker_container-device_requests.yml
|
||||
release_date: '2020-12-11'
|
||||
1.1.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_image - if ``push=true`` is used with ``repository``, and the image
|
||||
does not need to be tagged, still push. This can happen if ``repository``
|
||||
and ``name`` are equal (https://github.com/ansible-collections/community.docker/issues/52,
|
||||
https://github.com/ansible-collections/community.docker/pull/53).
|
||||
- docker_image - report error when loading a broken archive that contains no
|
||||
image (https://github.com/ansible-collections/community.docker/issues/46,
|
||||
https://github.com/ansible-collections/community.docker/pull/55).
|
||||
- docker_image - report error when the loaded archive does not contain the specified
|
||||
image (https://github.com/ansible-collections/community.docker/issues/41,
|
||||
https://github.com/ansible-collections/community.docker/pull/55).
|
||||
deprecated_features:
|
||||
- docker_container - currently ``published_ports`` can contain port mappings
|
||||
next to the special value ``all``, in which case the port mappings are ignored.
|
||||
This behavior is deprecated for community.docker 2.0.0, at which point it
|
||||
will either be forbidden, or this behavior will be properly implemented similar
|
||||
to how the Docker CLI tool handles this (https://github.com/ansible-collections/community.docker/issues/8,
|
||||
https://github.com/ansible-collections/community.docker/pull/60).
|
||||
minor_changes:
|
||||
- docker_container - support specifying ``cgroup_parent`` (https://github.com/ansible-collections/community.docker/issues/6,
|
||||
https://github.com/ansible-collections/community.docker/pull/59).
|
||||
- docker_container - when a container is started with ``detached=false``, ``status``
|
||||
is now also returned when it is 0 (https://github.com/ansible-collections/community.docker/issues/26,
|
||||
https://github.com/ansible-collections/community.docker/pull/58).
|
||||
- docker_image - support ``platform`` when building images (https://github.com/ansible-collections/community.docker/issues/22,
|
||||
https://github.com/ansible-collections/community.docker/pull/54).
|
||||
release_summary: Feature release with three new plugins and modules.
|
||||
fragments:
|
||||
- 1.1.0.yml
|
||||
- 53-docker_image-tag-push.yml
|
||||
- 54-docker_image-build-platform.yml
|
||||
- 55-docker_image-loading.yml
|
||||
- 58-docker_container-non-detached-status.yml
|
||||
- 59-docker_container-cgroup-parent.yml
|
||||
- 60-docker_container-publish-all.yml
|
||||
modules:
|
||||
- description: Return facts about whether the module runs in a Docker container
|
||||
name: current_container_facts
|
||||
namespace: ''
|
||||
plugins:
|
||||
connection:
|
||||
- description: Run tasks in docker containers
|
||||
name: docker_api
|
||||
namespace: null
|
||||
inventory:
|
||||
- description: Ansible dynamic inventory plugin for Docker containers.
|
||||
name: docker_containers
|
||||
namespace: null
|
||||
release_date: '2021-01-03'
|
||||
1.10.0:
|
||||
changes:
|
||||
minor_changes:
|
||||
- Add the modules docker_container_exec, docker_image_load and docker_plugin
|
||||
to the ``docker`` module defaults group (https://github.com/ansible-collections/community.docker/pull/209).
|
||||
- docker_config - add option ``data_src`` to read configuration data from target
|
||||
(https://github.com/ansible-collections/community.docker/issues/64, https://github.com/ansible-collections/community.docker/pull/203).
|
||||
- docker_secret - add option ``data_src`` to read secret data from target (https://github.com/ansible-collections/community.docker/issues/64,
|
||||
https://github.com/ansible-collections/community.docker/pull/203).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 1.10.0.yml
|
||||
- 203-docker_secret-config-data_src.yml
|
||||
- 209-action-group.yml
|
||||
release_date: '2021-10-05'
|
||||
1.2.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_container - allow IPv6 zones (RFC 4007) in bind IPs (https://github.com/ansible-collections/community.docker/pull/66).
|
||||
- docker_image - fix crash on loading images with versions of Docker SDK for
|
||||
Python before 2.5.0 (https://github.com/ansible-collections/community.docker/issues/72,
|
||||
https://github.com/ansible-collections/community.docker/pull/73).
|
||||
minor_changes:
|
||||
- docker_container - added ``default_host_ip`` option which allows to explicitly
|
||||
set the default IP string for published ports without explicitly specified
|
||||
IPs. When using IPv6 binds with Docker 20.10.2 or newer, this needs to be
|
||||
set to an empty string (``""``) (https://github.com/ansible-collections/community.docker/issues/70,
|
||||
https://github.com/ansible-collections/community.docker/pull/71).
|
||||
release_summary: Feature release with one new feature and two bugfixes.
|
||||
fragments:
|
||||
- 1.2.0.yml
|
||||
- 66-ipv6-zones.yml
|
||||
- 71-docker_container-default_host_ip.yml
|
||||
- 73-docker_image-fix-old-docker-py-version.yml
|
||||
release_date: '2021-01-25'
|
||||
1.2.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker connection plugin - fix Docker version parsing, as some docker versions
|
||||
have a leading ``v`` in the output of the command ``docker version --format
|
||||
"{{.Server.Version}}"`` (https://github.com/ansible-collections/community.docker/pull/76).
|
||||
release_summary: Bugfix release.
|
||||
fragments:
|
||||
- 1.2.1.yml
|
||||
- 76-leading-v-support-in-docker-version.yml
|
||||
release_date: '2021-01-28'
|
||||
1.2.2:
|
||||
changes:
|
||||
release_summary: Security bugfix release to address CVE-2021-20191.
|
||||
security_fixes:
|
||||
- docker_swarm - enabled ``no_log`` for the option ``signing_ca_key`` to prevent
|
||||
accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.docker/pull/80).
|
||||
fragments:
|
||||
- 1.2.2.yml
|
||||
- CVE-2021-20191_no_log.yml
|
||||
release_date: '2021-02-05'
|
||||
1.3.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_container - fix healthcheck disabling idempotency issue with strict
|
||||
comparison (https://github.com/ansible-collections/community.docker/issues/85).
|
||||
- docker_image - prevent module failure when removing image that is removed
|
||||
between inspection and removal (https://github.com/ansible-collections/community.docker/pull/87).
|
||||
- docker_image - prevent module failure when removing non-existant image by
|
||||
ID (https://github.com/ansible-collections/community.docker/pull/87).
|
||||
- docker_image_info - prevent module failure when image vanishes between listing
|
||||
and inspection (https://github.com/ansible-collections/community.docker/pull/87).
|
||||
- docker_image_info - prevent module failure when querying non-existant image
|
||||
by ID (https://github.com/ansible-collections/community.docker/pull/87).
|
||||
minor_changes:
|
||||
- docker_container - add ``storage_opts`` option to specify storage options
|
||||
(https://github.com/ansible-collections/community.docker/issues/91, https://github.com/ansible-collections/community.docker/pull/93).
|
||||
- docker_image - allows to specify platform to pull for ``source=pull`` with
|
||||
new option ``pull_platform`` (https://github.com/ansible-collections/community.docker/issues/79,
|
||||
https://github.com/ansible-collections/community.docker/pull/89).
|
||||
- docker_image - properly support image IDs (hashes) for loading and tagging
|
||||
images (https://github.com/ansible-collections/community.docker/issues/86,
|
||||
https://github.com/ansible-collections/community.docker/pull/87).
|
||||
- docker_swarm_service - adding support for maximum number of tasks per node
|
||||
(``replicas_max_per_node``) when running swarm service in replicated mode.
|
||||
Introduced in API 1.40 (https://github.com/ansible-collections/community.docker/issues/7,
|
||||
https://github.com/ansible-collections/community.docker/pull/92).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 1.3.0.yml
|
||||
- 87-docker_image-load-image-ids.yml
|
||||
- 88-docker_container-healthcheck.yml
|
||||
- 89-docker_image-pull-platform.yml
|
||||
- 92-replicas-max-per-node.yml
|
||||
- 93-docker_container-storage_opts.yml
|
||||
modules:
|
||||
- description: Load docker image(s) from archives
|
||||
name: docker_image_load
|
||||
namespace: ''
|
||||
- description: Manage Docker plugins
|
||||
name: docker_plugin
|
||||
namespace: ''
|
||||
release_date: '2021-03-08'
|
||||
1.4.0:
|
||||
changes:
|
||||
breaking_changes:
|
||||
- docker_swarm - if ``join_token`` is specified, a returned join token with
|
||||
the same value will be replaced by ``VALUE_SPECIFIED_IN_NO_LOG_PARAMETER``.
|
||||
Make sure that you do not blindly use the join tokens from the return value
|
||||
of this module when the module is invoked with ``join_token`` specified! This
|
||||
breaking change appears in a minor release since it is necessary to fix a
|
||||
security issue (https://github.com/ansible-collections/community.docker/pull/103).
|
||||
bugfixes:
|
||||
- '``docker_swarm_service`` - fix KeyError on caused by reference to deprecated
|
||||
option ``update_failure_action`` (https://github.com/ansible-collections/community.docker/pull/100).'
|
||||
- docker_swarm_service - mark ``secrets`` module option with ``no_log=False``
|
||||
since it does not leak secrets (https://github.com/ansible-collections/community.general/pull/2001).
|
||||
minor_changes:
|
||||
- docker_swarm_service - change ``publish.published_port`` option from mandatory
|
||||
to optional. Docker will assign random high port if not specified (https://github.com/ansible-collections/community.docker/issues/99).
|
||||
release_summary: Security release to address another potential secret leak.
|
||||
Also includes regular bugfixes and features.
|
||||
security_fixes:
|
||||
- docker_swarm - the ``join_token`` option is now marked as ``no_log`` so it
|
||||
is no longer written into logs (https://github.com/ansible-collections/community.docker/pull/103).
|
||||
fragments:
|
||||
- 1.4.0.yml
|
||||
- 100-fix-update_failture_action-keyerror-in-docker_swarm_service.yaml
|
||||
- 101-make-service-published-port-optional.yaml
|
||||
- 102-no_log-false.yml
|
||||
- 103-docker_swarm-join_token.yml
|
||||
release_date: '2021-03-14'
|
||||
1.5.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- all modules - use ``to_native`` to convert exceptions to strings (https://github.com/ansible-collections/community.docker/pull/121).
|
||||
minor_changes:
|
||||
- Add the ``use_ssh_client`` option to most docker modules and plugins (https://github.com/ansible-collections/community.docker/issues/108,
|
||||
https://github.com/ansible-collections/community.docker/pull/114).
|
||||
release_summary: Regular feature release.
|
||||
fragments:
|
||||
- 1.5.0.yml
|
||||
- 114-use_ssh_client.yml
|
||||
- 121-exception-handling.yml
|
||||
modules:
|
||||
- description: Execute command in a docker container
|
||||
name: docker_container_exec
|
||||
namespace: ''
|
||||
release_date: '2021-04-11'
|
||||
1.6.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- 'docker-compose - fix not pulling when ``state: present`` and ``stopped: true``
|
||||
(https://github.com/ansible-collections/community.docker/issues/12, https://github.com/ansible-collections/community.docker/pull/119).'
|
||||
- docker_plugin - also configure plugin after installing (https://github.com/ansible-collections/community.docker/issues/118,
|
||||
https://github.com/ansible-collections/community.docker/pull/135).
|
||||
- docker_swarm_services - avoid crash during idempotence check if ``published_port``
|
||||
is not specified (https://github.com/ansible-collections/community.docker/issues/107,
|
||||
https://github.com/ansible-collections/community.docker/pull/136).
|
||||
deprecated_features:
|
||||
- docker_* modules and plugins, except ``docker_swarm`` connection plugin and
|
||||
``docker_compose`` and ``docker_stack*` modules - the current default ``localhost``
|
||||
for ``tls_hostname`` is deprecated. In community.docker 2.0.0 it will be computed
|
||||
from ``docker_host`` instead (https://github.com/ansible-collections/community.docker/pull/134).
|
||||
minor_changes:
|
||||
- common module utils - correct error messages for guiding to install proper
|
||||
Docker SDK for Python module (https://github.com/ansible-collections/community.docker/pull/125).
|
||||
- 'docker_container - allow ``memory_swap: -1`` to set memory swap limit to
|
||||
unlimited. This is useful when the user cannot set memory swap limits due
|
||||
to cgroup limitations or other reasons, as by default Docker will try to set
|
||||
swap usage to two times the value of ``memory`` (https://github.com/ansible-collections/community.docker/pull/138).'
|
||||
release_summary: Regular bugfix and feature release.
|
||||
fragments:
|
||||
- 1.6.0.yml
|
||||
- 12-correct_pull_wo_starting.yaml
|
||||
- 125-correct-error-message-for-docker-sdk-version.yaml
|
||||
- 134-tls_hostname.yml
|
||||
- 135-docker_plugin-config.yml
|
||||
- 136-docker_swarm_service-fix-idempotence-bug.yml
|
||||
- 138-docker_container-allow-memory-swap-unlimited.yml
|
||||
release_date: '2021-05-11'
|
||||
1.6.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_* modules and plugins, except ``docker_swarm`` connection plugin and
|
||||
``docker_compose`` and ``docker_stack*` modules - only emit ``tls_hostname``
|
||||
deprecation message if TLS is actually used (https://github.com/ansible-collections/community.docker/pull/143).
|
||||
release_summary: Bugfix release to reduce deprecation warning spam.
|
||||
fragments:
|
||||
- 1.6.1.yml
|
||||
- 143-tls_hostname-deprecation.yml
|
||||
release_date: '2021-05-17'
|
||||
1.7.0:
|
||||
changes:
|
||||
minor_changes:
|
||||
- docker_image - allow to tag images by ID (https://github.com/ansible-collections/community.docker/pull/149).
|
||||
release_summary: Small feature and bugfix release.
|
||||
fragments:
|
||||
- 1.7.0.yml
|
||||
- 149-docker_image-tagging.yml
|
||||
release_date: '2021-06-08'
|
||||
1.8.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- 'docker_compose - fix idempotence bug when using ``stopped: true`` (https://github.com/ansible-collections/community.docker/issues/142,
|
||||
https://github.com/ansible-collections/community.docker/pull/159).'
|
||||
minor_changes:
|
||||
- Avoid internal ansible-core module_utils in favor of equivalent public API
|
||||
available since at least Ansible 2.9 (https://github.com/ansible-collections/community.docker/pull/164).
|
||||
- docker_compose - added ``profiles`` option to specify service profiles when
|
||||
starting services (https://github.com/ansible-collections/community.docker/pull/167).
|
||||
- docker_containers inventory plugin - when ``connection_type=docker-api``,
|
||||
now pass Docker daemon connection options from inventory plugin to connection
|
||||
plugin. This can be disabled by setting ``configure_docker_daemon=false``
|
||||
(https://github.com/ansible-collections/community.docker/pull/157).
|
||||
- docker_host_info - allow values for keys in ``containers_filters``, ``images_filters``,
|
||||
``networks_filters``, and ``volumes_filters`` to be passed as YAML lists (https://github.com/ansible-collections/community.docker/pull/160).
|
||||
- docker_plugin - added ``alias`` option to specify local names for docker plugins
|
||||
(https://github.com/ansible-collections/community.docker/pull/161).
|
||||
release_summary: Regular bugfix and feature release.
|
||||
fragments:
|
||||
- 1.8.0.yml
|
||||
- 157-inventory-connection-options.yml
|
||||
- 159-docker_compose-idempotence-fix.yml
|
||||
- 160-docker_host_info-label-fitler-lists.yml
|
||||
- 161-docker_plugin-alias-option.yml
|
||||
- 167-docker_compose-profiles-option.yml
|
||||
- ansible-core-_text.yml
|
||||
release_date: '2021-06-28'
|
||||
1.9.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_compose - fixes task failures when bringing up services while using
|
||||
``docker-compose <1.17.0`` (https://github.com/ansible-collections/community.docker/issues/180).
|
||||
- docker_container - make sure to also return ``container`` on ``detached=false``
|
||||
when status code is non-zero (https://github.com/ansible-collections/community.docker/pull/178).
|
||||
- docker_stack_info - make sure that module isn't skipped in check mode (https://github.com/ansible-collections/community.docker/pull/183).
|
||||
- docker_stack_task_info - make sure that module isn't skipped in check mode
|
||||
(https://github.com/ansible-collections/community.docker/pull/183).
|
||||
deprecated_features:
|
||||
- docker_container - the new ``command_handling``'s default value, ``compatibility``,
|
||||
is deprecated and will change to ``correct`` in community.docker 3.0.0. A
|
||||
deprecation warning is emitted by the module in cases where the behavior will
|
||||
change. Please note that ansible-core will output a deprecation warning only
|
||||
once, so if it is shown for an earlier task, there could be more tasks with
|
||||
this warning where it is not shown (https://github.com/ansible-collections/community.docker/pull/186).
|
||||
minor_changes:
|
||||
- docker_* modules - include ``ImportError`` traceback when reporting that Docker
|
||||
SDK for Python could not be found (https://github.com/ansible-collections/community.docker/pull/188).
|
||||
- docker_compose - added ``env_file`` option for specifying custom environment
|
||||
files (https://github.com/ansible-collections/community.docker/pull/174).
|
||||
- docker_container - added ``publish_all_ports`` option to publish all exposed
|
||||
ports to random ports except those explicitly bound with ``published_ports``
|
||||
(this was already added in community.docker 1.8.0) (https://github.com/ansible-collections/community.docker/pull/162).
|
||||
- docker_container - added new ``command_handling`` option with current deprecated
|
||||
default value ``compatibility`` which allows to control how the module handles
|
||||
shell quoting when interpreting lists, and how the module handles empty lists/strings.
|
||||
The default will switch to ``correct`` in community.docker 3.0.0 (https://github.com/ansible-collections/community.docker/pull/186).
|
||||
- docker_container - lifted restriction preventing the creation of anonymous
|
||||
volumes with the ``mounts`` option (https://github.com/ansible-collections/community.docker/pull/181).
|
||||
release_summary: New bugfixes and features release.
|
||||
fragments:
|
||||
- 1.9.0.yml
|
||||
- 162-docker_container_publish_all_option.yml
|
||||
- 174-docker_compose-env_file.yml
|
||||
- 178-docker_container-container.yml
|
||||
- 181-docker_container-allow-anonymous-volume-mounts.yml
|
||||
- 182-docker_compose-fix-start-keyword-failures.yml
|
||||
- 183-info-check_mode.yml
|
||||
- 186-docker_container-command-entrypoint.yml
|
||||
- 188-improve-import-errors.yml
|
||||
plugins:
|
||||
connection:
|
||||
- description: execute on host running controller container
|
||||
name: nsenter
|
||||
namespace: null
|
||||
release_date: '2021-08-03'
|
||||
1.9.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_compose - fixed incorrect ``changed`` status for services with ``profiles``
|
||||
defined, but none enabled (https://github.com/ansible-collections/community.docker/pull/192).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 1.9.1.yml
|
||||
- 192-docker_compose-profiles-idempotency-fix.yml
|
||||
release_date: '2021-08-29'
|
||||
2.0.0:
|
||||
changes:
|
||||
breaking_changes:
|
||||
- docker_compose - fixed ``timeout`` defaulting behavior so that ``stop_grace_period``,
|
||||
if defined in the compose file, will be used if `timeout`` is not specified
|
||||
(https://github.com/ansible-collections/community.docker/pull/163).
|
||||
deprecated_features:
|
||||
- docker_container - using the special value ``all`` in ``published_ports``
|
||||
has been deprecated. Use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/210).
|
||||
release_summary: New major release with some deprecations removed and a breaking
|
||||
change in the ``docker_compose`` module regarding the ``timeout`` parameter.
|
||||
removed_features:
|
||||
- docker_container - the default value of ``container_default_behavior`` changed
|
||||
to ``no_defaults`` (https://github.com/ansible-collections/community.docker/pull/210).
|
||||
- docker_container - the default value of ``network_mode`` is now the name of
|
||||
the first network specified in ``networks`` if such are specified and ``networks_cli_compatible=true``
|
||||
(https://github.com/ansible-collections/community.docker/pull/210).
|
||||
- docker_container - the special value ``all`` can no longer be used in ``published_ports``
|
||||
next to other values. Please use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/210).
|
||||
- docker_login - removed the ``email`` option (https://github.com/ansible-collections/community.docker/pull/210).
|
||||
fragments:
|
||||
- 163-docker_compose-timeout-fix.yml
|
||||
- 2.0.0.yml
|
||||
- 210-deprecations.yml
|
||||
release_date: '2021-10-21'
|
||||
2.0.1:
|
||||
changes:
|
||||
release_summary: Maintenance release with some documentation fixes.
|
||||
fragments:
|
||||
- 2.0.1.yml
|
||||
release_date: '2021-11-13'
|
||||
2.0.2:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_api connection plugin - avoid passing an unnecessary argument to a
|
||||
Docker SDK for Python call that is only supported by version 3.0.0 or later
|
||||
(https://github.com/ansible-collections/community.docker/pull/243).
|
||||
- docker_container_exec - ``chdir`` is only supported since Docker SDK for Python
|
||||
3.0.0. Make sure that this option can only use when 3.0.0 or later is installed,
|
||||
and prevent passing this parameter on when ``chdir`` is not provided to this
|
||||
module (https://github.com/ansible-collections/community.docker/pull/243,
|
||||
https://github.com/ansible-collections/community.docker/issues/242).
|
||||
- nsenter connection plugin - ensure the ``nsenter_pid`` option is retrieved
|
||||
in ``_connect`` instead of ``__init__`` to prevent a crasher due to bad initialization
|
||||
order (https://github.com/ansible-collections/community.docker/pull/249).
|
||||
- nsenter connection plugin - replace the use of ``--all-namespaces`` with specific
|
||||
namespaces to support compatibility with Busybox nsenter (used on, for example,
|
||||
Alpine containers) (https://github.com/ansible-collections/community.docker/pull/249).
|
||||
release_summary: Bugfix release.
|
||||
fragments:
|
||||
- 2.0.2.yml
|
||||
- 243-docker_container_exec-chdir.yml
|
||||
- 249-nsenter-fixes.yml
|
||||
release_date: '2021-12-09'
|
||||
2.1.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- Various modules and plugins - use vendored version of ``distutils.version``
|
||||
included in ansible-core 2.12 if available. This avoids breakage when ``distutils``
|
||||
is removed from the standard library of Python 3.12. Note that ansible-core
|
||||
2.11, ansible-base 2.10 and Ansible 2.9 are right now not compatible with
|
||||
Python 3.12, hence this fix does not target these ansible-core/-base/2.9 versions
|
||||
(https://github.com/ansible-collections/community.docker/pull/258).
|
||||
- docker connection plugin - replace deprecated ``distutils.spawn.find_executable``
|
||||
with Ansible's ``get_bin_path`` to find the ``docker`` executable (https://github.com/ansible-collections/community.docker/pull/257).
|
||||
- docker_container_exec - disallow using the ``chdir`` option for Docker API
|
||||
before 1.35 (https://github.com/ansible-collections/community.docker/pull/253).
|
||||
minor_changes:
|
||||
- docker_container_exec - add ``detach`` parameter (https://github.com/ansible-collections/community.docker/issues/250,
|
||||
https://github.com/ansible-collections/community.docker/pull/255).
|
||||
- docker_container_exec - add ``env`` option (https://github.com/ansible-collections/community.docker/issues/248,
|
||||
https://github.com/ansible-collections/community.docker/pull/254).
|
||||
release_summary: Feature and bugfix release.
|
||||
fragments:
|
||||
- 2.1.0.yml
|
||||
- 253-chdir-min-version.yml
|
||||
- 254-docker_container_exec-env.yml
|
||||
- 255-docker_container_exec-detach.yml
|
||||
- 257-remove-distutils-spawn.yml
|
||||
- 258-distutils.version.yml
|
||||
release_date: '2022-01-04'
|
||||
2.1.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- Fix unintended breaking change caused by `an earlier fix <https://github.com/ansible-collections/community.docker/pull/258>`_
|
||||
by vendoring the deprecated Python standard library ``distutils.version``
|
||||
until this collection stops supporting Ansible 2.9 and ansible-base 2.10 (https://github.com/ansible-collections/community.docker/issues/267,
|
||||
https://github.com/ansible-collections/community.docker/pull/269).
|
||||
release_summary: Emergency release to amend breaking change in previous release.
|
||||
fragments:
|
||||
- 2.1.1.yml
|
||||
- 269-distutils-version-fix.yml
|
||||
release_date: '2022-01-05'
|
||||
2.2.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_container, docker_image - adjust image finding code to pecularities
|
||||
of ``podman-docker``'s API emulation when Docker short names like ``redis``
|
||||
are used (https://github.com/ansible-collections/community.docker/issues/292).
|
||||
minor_changes:
|
||||
- docker_config - add support for rolling update, set ``rolling_versions`` to
|
||||
``true`` to enable (https://github.com/ansible-collections/community.docker/pull/295,
|
||||
https://github.com/ansible-collections/community.docker/issues/109).
|
||||
- docker_secret - add support for rolling update, set ``rolling_versions`` to
|
||||
``true`` to enable (https://github.com/ansible-collections/community.docker/pull/293,
|
||||
https://github.com/ansible-collections/community.docker/issues/21).
|
||||
- docker_swarm_service - add support for setting capabilities with the ``cap_add``
|
||||
and ``cap_drop`` parameters. Usage is the same as with the ``capabilities``
|
||||
and ``cap_drop`` parameters for ``docker_container`` (https://github.com/ansible-collections/community.docker/pull/294).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 2.2.0.yml
|
||||
- 270-rolling-secrets.yml
|
||||
- 271-swarm-service-capabilities.yml
|
||||
- 272-rolling-configs.yml
|
||||
- 292-docker-podman-compatibility.yml
|
||||
release_date: '2022-02-21'
|
||||
2.2.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_compose - fix Python 3 type error when extracting warnings or errors
|
||||
from docker-compose's output (https://github.com/ansible-collections/community.docker/pull/305).
|
||||
release_summary: Regular bugfix release.
|
||||
fragments:
|
||||
- 2.2.1.yml
|
||||
- 305-docker_compose-errors-warnings.yml
|
||||
release_date: '2022-03-14'
|
||||
2.3.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker connection plugin - fix option handling to be compatible with ansible-core
|
||||
2.13 (https://github.com/ansible-collections/community.docker/pull/297, https://github.com/ansible-collections/community.docker/issues/307).
|
||||
- docker_api connection plugin - fix option handling to be compatible with ansible-core
|
||||
2.13 (https://github.com/ansible-collections/community.docker/pull/308).
|
||||
minor_changes:
|
||||
- docker connection plugin - implement connection reset by clearing internal
|
||||
container user cache (https://github.com/ansible-collections/community.docker/pull/312).
|
||||
- docker connection plugin - simplify ``actual_user`` handling code (https://github.com/ansible-collections/community.docker/pull/311).
|
||||
- docker connection plugin - the plugin supports new ways to define the timeout.
|
||||
These are the ``ANSIBLE_DOCKER_TIMEOUT`` environment variable, the ``timeout``
|
||||
setting in the ``docker_connection`` section of ``ansible.cfg``, and the ``ansible_docker_timeout``
|
||||
variable (https://github.com/ansible-collections/community.docker/pull/297).
|
||||
- docker_api connection plugin - implement connection reset by clearing internal
|
||||
container user/group ID cache (https://github.com/ansible-collections/community.docker/pull/312).
|
||||
- docker_api connection plugin - the plugin supports new ways to define the
|
||||
timeout. These are the ``ANSIBLE_DOCKER_TIMEOUT`` environment variable, the
|
||||
``timeout`` setting in the ``docker_connection`` section of ``ansible.cfg``,
|
||||
and the ``ansible_docker_timeout`` variable (https://github.com/ansible-collections/community.docker/pull/308).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 2.3.0.yml
|
||||
- 297-docker-connection-config.yml
|
||||
- 308-docker_api-connection-config.yml
|
||||
- 311-docker-actual_user.yml
|
||||
- 312-docker-connection-reset.yml
|
||||
release_date: '2022-03-28'
|
||||
2.4.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker connection plugin - make sure that ``docker_extra_args`` is used for
|
||||
querying the Docker version. Also ensures that the Docker version is only
|
||||
queried when needed. This is currently the case if a remote user is specified
|
||||
(https://github.com/ansible-collections/community.docker/issues/325, https://github.com/ansible-collections/community.docker/pull/327).
|
||||
minor_changes:
|
||||
- Prepare collection for inclusion in an Execution Environment by declaring
|
||||
its dependencies. The ``docker_stack*`` modules are not supported (https://github.com/ansible-collections/community.docker/pull/336).
|
||||
- current_container_facts - add detection for GitHub Actions (https://github.com/ansible-collections/community.docker/pull/336).
|
||||
- docker_container - support returning Docker container log output when using
|
||||
Docker's ``local`` logging driver, an optimized local logging driver introduced
|
||||
in Docker 18.09 (https://github.com/ansible-collections/community.docker/pull/337).
|
||||
release_summary: Regular feature and bugfix release.
|
||||
fragments:
|
||||
- 2.4.0.yml
|
||||
- 327-connection-fix.yml
|
||||
- 336-ee.yml
|
||||
- 337-container-output-from-local-logging-driver.yml
|
||||
release_date: '2022-04-25'
|
||||
2.5.0:
|
||||
changes:
|
||||
minor_changes:
|
||||
- docker_config - add support for ``template_driver`` with one option ``golang``
|
||||
(https://github.com/ansible-collections/community.docker/issues/332, https://github.com/ansible-collections/community.docker/pull/345).
|
||||
- docker_swarm - adds ``data_path_addr`` parameter during swarm initialization
|
||||
or when joining (https://github.com/ansible-collections/community.docker/issues/339).
|
||||
release_summary: Regular feature release.
|
||||
fragments:
|
||||
- 2.5.0.yml
|
||||
- 344-adds-data-path-addr.yml
|
||||
- 345-docker_config-template-driver.yml
|
||||
release_date: '2022-05-14'
|
||||
2.5.1:
|
||||
changes:
|
||||
bugfixes:
|
||||
- Include ``PSF-license.txt`` file for ``plugins/module_utils/_version.py``.
|
||||
release_summary: Maintenance release.
|
||||
fragments:
|
||||
- 2.5.1.yml
|
||||
- psf-license.yml
|
||||
release_date: '2022-05-16'
|
||||
2.6.0:
|
||||
changes:
|
||||
bugfixes:
|
||||
- docker_container - fail with a meaningful message instead of crashing if a
|
||||
port is specified with more than three colon-separated parts (https://github.com/ansible-collections/community.docker/pull/367,
|
||||
https://github.com/ansible-collections/community.docker/issues/365).
|
||||
- docker_container - remove unused code that will cause problems with Python
|
||||
3.13 (https://github.com/ansible-collections/community.docker/pull/354).
|
||||
deprecated_features:
|
||||
- Support for Ansible 2.9 and ansible-base 2.10 is deprecated, and will be removed
|
||||
in the next major release (community.docker 3.0.0). Some modules might still
|
||||
work with these versions afterwards, but we will no longer keep compatibility
|
||||
code that was needed to support them (https://github.com/ansible-collections/community.docker/pull/361).
|
||||
- The dependency on docker-compose for Execution Environments is deprecated
|
||||
and will be removed in community.docker 3.0.0. The `Python docker-compose
|
||||
library <https://pypi.org/project/docker-compose/>`__ is unmaintained and
|
||||
can cause dependency issues. You can manually still install it in an Execution
|
||||
Environment when needed (https://github.com/ansible-collections/community.docker/pull/373).
|
||||
- Various modules - the default of ``tls_hostname`` that was supposed to be
|
||||
removed in community.docker 2.0.0 will now be removed in version 3.0.0 (https://github.com/ansible-collections/community.docker/pull/362).
|
||||
- docker_stack - the return values ``out`` and ``err`` that were supposed to
|
||||
be removed in community.docker 2.0.0 will now be removed in version 3.0.0
|
||||
(https://github.com/ansible-collections/community.docker/pull/362).
|
||||
minor_changes:
|
||||
- docker_container - added ``image_label_mismatch`` parameter (https://github.com/ansible-collections/community.docker/issues/314,
|
||||
https://github.com/ansible-collections/community.docker/pull/370).
|
||||
release_summary: Bugfix and feature release.
|
||||
fragments:
|
||||
- 2.6.0.yml
|
||||
- 354-remove-dead-code.yml
|
||||
- 362-deprecations.yml
|
||||
- 367-docker_container-ports-validation.yml
|
||||
- 370-add-image-label-mismatch.yml
|
||||
- 373-deprecate-docker-compose-dependency.yml
|
||||
- deprecate-ansible-2.9-2.10.yml
|
||||
release_date: '2022-05-24'
|
||||
@@ -0,0 +1,29 @@
|
||||
changelog_filename_template: ../CHANGELOG.rst
|
||||
changelog_filename_version_depth: 0
|
||||
changes_file: changelog.yaml
|
||||
changes_format: combined
|
||||
keep_fragments: false
|
||||
mention_ancestor: true
|
||||
new_plugins_after_name: removed_features
|
||||
notesdir: fragments
|
||||
prelude_section_name: release_summary
|
||||
prelude_section_title: Release Summary
|
||||
sections:
|
||||
- - major_changes
|
||||
- Major Changes
|
||||
- - minor_changes
|
||||
- Minor Changes
|
||||
- - breaking_changes
|
||||
- Breaking Changes / Porting Guide
|
||||
- - deprecated_features
|
||||
- Deprecated Features
|
||||
- - removed_features
|
||||
- Removed Features (previously deprecated)
|
||||
- - security_fixes
|
||||
- Security Fixes
|
||||
- - bugfixes
|
||||
- Bugfixes
|
||||
- - known_issues
|
||||
- Known Issues
|
||||
title: Docker Community Collection
|
||||
trivial_section_name: trivial
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
sections:
|
||||
- title: Scenario Guide
|
||||
toctree:
|
||||
- scenario_guide
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
edit_on_github:
|
||||
repository: ansible-collections/community.docker
|
||||
branch: main
|
||||
path_prefix: ''
|
||||
|
||||
extra_links:
|
||||
- description: Submit a bug report
|
||||
url: https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=bug_report.md
|
||||
- description: Request a feature
|
||||
url: https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=feature_request.md
|
||||
|
||||
communication:
|
||||
matrix_rooms:
|
||||
- topic: General usage and support questions
|
||||
room: '#users:ansible.im'
|
||||
irc_channels:
|
||||
- topic: General usage and support questions
|
||||
network: Libera
|
||||
channel: '#ansible'
|
||||
mailing_lists:
|
||||
- topic: Ansible Project List
|
||||
url: https://groups.google.com/g/ansible-project
|
||||
@@ -0,0 +1,235 @@
|
||||
.. _ansible_collections.community.docker.docsite.scenario_guide:
|
||||
|
||||
Docker Guide
|
||||
============
|
||||
|
||||
The `community.docker collection <https://galaxy.ansible.com/community/docker>`_ offers several modules and plugins for orchestrating Docker containers and Docker Swarm.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
:depth: 1
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Most of the modules and plugins in community.docker require the `Docker SDK for Python <https://docker-py.readthedocs.io/en/stable/>`_. The SDK needs to be installed on the machines where the modules and plugins are executed, and for the Python version(s) with which the modules and plugins are executed. You can use the :ref:`community.general.python_requirements_info module <ansible_collections.community.general.python_requirements_info_module>` to make sure that the Docker SDK for Python is installed on the correct machine and for the Python version used by Ansible.
|
||||
|
||||
Note that plugins (inventory plugins and connection plugins) are always executed in the context of Ansible itself. If you use a plugin that requires the Docker SDK for Python, you need to install it on the machine running ``ansible`` or ``ansible-playbook`` and for the same Python interpreter used by Ansible. To see which Python is used, run ``ansible --version``.
|
||||
|
||||
You can install the Docker SDK for Python for Python 3.6 or later as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install docker
|
||||
|
||||
For Python 2.7, you need to use a version between 2.0.0 and 4.4.4 since the Python package for Docker removed support for Python 2.7 on 5.0.0. You can install the specific version of the Docker SDK for Python as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install 'docker==4.4.4'
|
||||
|
||||
For Python 2.6, you need a version before 2.0.0. For these versions, the SDK was called ``docker-py``, so you need to install it as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install 'docker-py>=1.10.0'
|
||||
|
||||
Please install only one of ``docker`` or ``docker-py``. Installing both will result in a broken installation. If this happens, Ansible will detect it and inform you about it. If that happens, you must uninstall both and reinstall the correct version.
|
||||
|
||||
If in doubt, always install ``docker`` and never ``docker-py``.
|
||||
|
||||
|
||||
Connecting to the Docker API
|
||||
----------------------------
|
||||
|
||||
You can connect to a local or remote API using parameters passed to each task or by setting environment variables. The order of precedence is command line parameters and then environment variables. If neither a command line option nor an environment variable is found, Ansible uses the default value provided under `Parameters`_.
|
||||
|
||||
|
||||
Parameters
|
||||
..........
|
||||
|
||||
Most plugins and modules can be configured by the following parameters:
|
||||
|
||||
docker_host
|
||||
The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix://var/run/docker.sock``. To connect to a remote host, provide the TCP connection string (for example: ``tcp://192.0.2.23:2376``). If TLS is used to encrypt the connection to the API, then the module will automatically replace 'tcp' in the connection URL with 'https'.
|
||||
|
||||
api_version
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by the Docker SDK for Python installed.
|
||||
|
||||
timeout
|
||||
The maximum amount of time in seconds to wait on a response from the API. Defaults to 60 seconds.
|
||||
|
||||
tls
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Defaults to ``false``.
|
||||
|
||||
validate_certs
|
||||
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. Default is ``false``.
|
||||
|
||||
cacert_path
|
||||
Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
|
||||
cert_path
|
||||
Path to the client's TLS certificate file.
|
||||
|
||||
key_path
|
||||
Path to the client's TLS key file.
|
||||
|
||||
tls_hostname
|
||||
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults to ``localhost``.
|
||||
|
||||
ssl_version
|
||||
Provide a valid SSL version number. The default value is determined by the Docker SDK for Python.
|
||||
|
||||
|
||||
Environment variables
|
||||
.....................
|
||||
|
||||
You can also control how the plugins and modules connect to the Docker API by setting the following environment variables.
|
||||
|
||||
For plugins, they have to be set for the environment Ansible itself runs in. For modules, they have to be set for the environment the modules are executed in. For modules running on remote machines, the environment variables have to be set on that machine for the user used to execute the modules with.
|
||||
|
||||
DOCKER_HOST
|
||||
The URL or Unix socket path used to connect to the Docker API.
|
||||
|
||||
DOCKER_API_VERSION
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
|
||||
by docker-py.
|
||||
|
||||
DOCKER_TIMEOUT
|
||||
The maximum amount of time in seconds to wait on a response from the API.
|
||||
|
||||
DOCKER_CERT_PATH
|
||||
Path to the directory containing the client certificate, client key and CA certificate.
|
||||
|
||||
DOCKER_SSL_VERSION
|
||||
Provide a valid SSL version number.
|
||||
|
||||
DOCKER_TLS
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host.
|
||||
|
||||
DOCKER_TLS_VERIFY
|
||||
Secure the connection to the API by using TLS and verify the authenticity of the Docker Host.
|
||||
|
||||
|
||||
Plain Docker daemon: images, networks, volumes, and containers
|
||||
--------------------------------------------------------------
|
||||
|
||||
For working with a plain Docker daemon, that is without Swarm, there are connection plugins, an inventory plugin, and several modules available:
|
||||
|
||||
docker connection plugin
|
||||
The :ref:`community.docker.docker connection plugin <ansible_collections.community.docker.docker_connection>` uses the Docker CLI utility to connect to Docker containers and execute modules in them. It essentially wraps ``docker exec`` and ``docker cp``. This connection plugin is supported by the :ref:`ansible.posix.synchronize module <ansible_collections.ansible.posix.synchronize_module>`.
|
||||
|
||||
docker_api connection plugin
|
||||
The :ref:`community.docker.docker_api connection plugin <ansible_collections.community.docker.docker_api_connection>` talks directly to the Docker daemon to connect to Docker containers and execute modules in them.
|
||||
|
||||
docker_containers inventory plugin
|
||||
The :ref:`community.docker.docker_containers inventory plugin <ansible_collections.community.docker.docker_containers_inventory>` allows you to dynamically add Docker containers from a Docker Daemon to your Ansible inventory. See :ref:`dynamic_inventory` for details on dynamic inventories.
|
||||
|
||||
The `docker inventory script <https://github.com/ansible-community/contrib-scripts/blob/main/inventory/docker.py>`_ is deprecated. Please use the inventory plugin instead. The inventory plugin has several compatibility options. If you need to collect Docker containers from multiple Docker daemons, you need to add every Docker daemon as an individual inventory source.
|
||||
|
||||
docker_host_info module
|
||||
The :ref:`community.docker.docker_host_info module <ansible_collections.community.docker.docker_host_info_module>` allows you to retrieve information on a Docker daemon, such as all containers, images, volumes, networks and so on.
|
||||
|
||||
docker_login module
|
||||
The :ref:`community.docker.docker_login module <ansible_collections.community.docker.docker_login_module>` allows you to log in and out of a remote registry, such as Docker Hub or a private registry. It provides similar functionality to the ``docker login`` and ``docker logout`` CLI commands.
|
||||
|
||||
docker_prune module
|
||||
The :ref:`community.docker.docker_prune module <ansible_collections.community.docker.docker_prune_module>` allows you to prune no longer needed containers, images, volumes and so on. It provides similar functionality to the ``docker prune`` CLI command.
|
||||
|
||||
docker_image module
|
||||
The :ref:`community.docker.docker_image module <ansible_collections.community.docker.docker_image_module>` provides full control over images, including: build, pull, push, tag and remove.
|
||||
|
||||
docker_image_info module
|
||||
The :ref:`community.docker.docker_image_info module <ansible_collections.community.docker.docker_image_info_module>` allows you to list and inspect images.
|
||||
|
||||
docker_network module
|
||||
The :ref:`community.docker.docker_network module <ansible_collections.community.docker.docker_network_module>` provides full control over Docker networks.
|
||||
|
||||
docker_network_info module
|
||||
The :ref:`community.docker.docker_network_info module <ansible_collections.community.docker.docker_network_info_module>` allows you to inspect Docker networks.
|
||||
|
||||
docker_volume_info module
|
||||
The :ref:`community.docker.docker_volume_info module <ansible_collections.community.docker.docker_volume_info_module>` provides full control over Docker volumes.
|
||||
|
||||
docker_volume module
|
||||
The :ref:`community.docker.docker_volume module <ansible_collections.community.docker.docker_volume_module>` allows you to inspect Docker volumes.
|
||||
|
||||
docker_container module
|
||||
The :ref:`community.docker.docker_container module <ansible_collections.community.docker.docker_container_module>` manages the container lifecycle by providing the ability to create, update, stop, start and destroy a Docker container.
|
||||
|
||||
docker_container_info module
|
||||
The :ref:`community.docker.docker_container_info module <ansible_collections.community.docker.docker_container_info_module>` allows you to inspect a Docker container.
|
||||
|
||||
|
||||
Docker Compose
|
||||
--------------
|
||||
|
||||
The :ref:`community.docker.docker_compose module <ansible_collections.community.docker.docker_compose_module>`
|
||||
allows you to use your existing Docker compose files to orchestrate containers on a single Docker daemon or on Swarm.
|
||||
Supports compose versions 1 and 2.
|
||||
|
||||
Next to Docker SDK for Python, you need to install `docker-compose <https://github.com/docker/compose>`_ on the remote machines to use the module.
|
||||
|
||||
|
||||
Docker Machine
|
||||
--------------
|
||||
|
||||
The :ref:`community.docker.docker_machine inventory plugin <ansible_collections.community.docker.docker_machine_inventory>` allows you to dynamically add Docker Machine hosts to your Ansible inventory.
|
||||
|
||||
|
||||
Docker stack
|
||||
------------
|
||||
|
||||
The :ref:`community.docker.docker_stack module <ansible_collections.community.docker.docker_stack_module>` module allows you to control Docker stacks. Information on stacks can be retrieved by the :ref:`community.docker.docker_stack_info module <ansible_collections.community.docker.docker_stack_info_module>`, and information on stack tasks can be retrieved by the :ref:`community.docker.docker_stack_task_info module <ansible_collections.community.docker.docker_stack_task_info_module>`.
|
||||
|
||||
|
||||
Docker Swarm
|
||||
------------
|
||||
|
||||
The community.docker collection provides multiple plugins and modules for managing Docker Swarms.
|
||||
|
||||
Swarm management
|
||||
................
|
||||
|
||||
One inventory plugin and several modules are provided to manage Docker Swarms:
|
||||
|
||||
docker_swarm inventory plugin
|
||||
The :ref:`community.docker.docker_swarm inventory plugin <ansible_collections.community.docker.docker_swarm_inventory>` allows you to dynamically add all Docker Swarm nodes to your Ansible inventory.
|
||||
|
||||
docker_swarm module
|
||||
The :ref:`community.docker.docker_swarm module <ansible_collections.community.docker.docker_swarm_module>` allows you to globally configure Docker Swarm manager nodes to join and leave swarms, and to change the Docker Swarm configuration.
|
||||
|
||||
docker_swarm_info module
|
||||
The :ref:`community.docker.docker_swarm_info module <ansible_collections.community.docker.docker_swarm_info_module>` allows you to retrieve information on Docker Swarm.
|
||||
|
||||
docker_node module
|
||||
The :ref:`community.docker.docker_node module <ansible_collections.community.docker.docker_node_module>` allows you to manage Docker Swarm nodes.
|
||||
|
||||
docker_node_info module
|
||||
The :ref:`community.docker.docker_node_info module <ansible_collections.community.docker.docker_node_info_module>` allows you to retrieve information on Docker Swarm nodes.
|
||||
|
||||
Configuration management
|
||||
........................
|
||||
|
||||
The community.docker collection offers modules to manage Docker Swarm configurations and secrets:
|
||||
|
||||
docker_config module
|
||||
The :ref:`community.docker.docker_config module <ansible_collections.community.docker.docker_config_module>` allows you to create and modify Docker Swarm configs.
|
||||
|
||||
docker_secret module
|
||||
The :ref:`community.docker.docker_secret module <ansible_collections.community.docker.docker_secret_module>` allows you to create and modify Docker Swarm secrets.
|
||||
|
||||
|
||||
Swarm services
|
||||
..............
|
||||
|
||||
Docker Swarm services can be created and updated with the :ref:`community.docker.docker_swarm_service module <ansible_collections.community.docker.docker_swarm_service_module>`, and information on them can be queried by the :ref:`community.docker.docker_swarm_service_info module <ansible_collections.community.docker.docker_swarm_service_info_module>`.
|
||||
|
||||
|
||||
Helpful links
|
||||
-------------
|
||||
|
||||
Still using Dockerfile to build images? Check out `ansible-bender <https://github.com/ansible-community/ansible-bender>`_, and start building images from your Ansible playbooks.
|
||||
|
||||
Use `Ansible Operator <https://learn.openshift.com/ansibleop/ansible-operator-overview/>`_ to launch your docker-compose file on `OpenShift <https://www.okd.io/>`_. Go from an app on your laptop to a fully scalable app in the cloud with Kubernetes in just a few moments.
|
||||
@@ -0,0 +1,2 @@
|
||||
docker
|
||||
docker-compose
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
version: 1
|
||||
dependencies:
|
||||
python: meta/ee-requirements.txt
|
||||
system: meta/ee-bindep.txt
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
requires_ansible: '>=2.9.10'
|
||||
action_groups:
|
||||
docker:
|
||||
- docker_compose
|
||||
- docker_config
|
||||
- docker_container
|
||||
- docker_container_exec
|
||||
- docker_container_info
|
||||
- docker_host_info
|
||||
- docker_image
|
||||
- docker_image_info
|
||||
- docker_image_load
|
||||
- docker_login
|
||||
- docker_network
|
||||
- docker_network_info
|
||||
- docker_node
|
||||
- docker_node_info
|
||||
- docker_plugin
|
||||
- docker_prune
|
||||
- docker_secret
|
||||
- docker_swarm
|
||||
- docker_swarm_info
|
||||
- docker_swarm_service
|
||||
- docker_swarm_service_info
|
||||
- docker_volume
|
||||
- docker_volume_info
|
||||
@@ -0,0 +1,451 @@
|
||||
# Based on the chroot connection plugin by Maykel Moya
|
||||
#
|
||||
# (c) 2014, Lorin Hochstein
|
||||
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- Lorin Hochestein (!UNKNOWN)
|
||||
- Leendert Brouwer (!UNKNOWN)
|
||||
name: docker
|
||||
short_description: Run tasks in docker containers
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses the Docker CLI to execute commands in the container. If you prefer
|
||||
to directly connect to the Docker daemon, use the
|
||||
R(community.docker.docker_api,ansible_collections.community.docker.docker_api_connection)
|
||||
connection plugin.
|
||||
options:
|
||||
remote_addr:
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
remote_user:
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
- If Docker is too old to allow this (< 1.7), the one set by Docker itself will be used.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
docker_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the docker command line.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_docker_extra_args
|
||||
ini:
|
||||
- section: docker_connection
|
||||
key: extra_cli_args
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
'''
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
from ansible.compat import selectors
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
|
||||
transport = 'community.docker.docker'
|
||||
has_pipelining = True
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
# Note: docker supports running as non-root in some configurations.
|
||||
# (For instance, setting the UNIX socket file to be readable and
|
||||
# writable by a specific UNIX group and then putting users into that
|
||||
# group). Therefore we don't check that the user is root when using
|
||||
# this connection. But if the user is getting a permission denied
|
||||
# error it probably means that docker on their system is only
|
||||
# configured to be connected to by root and they are not running as
|
||||
# root.
|
||||
|
||||
self._docker_args = []
|
||||
self._container_user_cache = {}
|
||||
self._version = None
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||
|
||||
if 'docker_command' in kwargs:
|
||||
self.docker_cmd = kwargs['docker_command']
|
||||
else:
|
||||
try:
|
||||
self.docker_cmd = get_bin_path('docker')
|
||||
except ValueError:
|
||||
raise AnsibleError("docker command not found in PATH")
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_version(version):
|
||||
version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
|
||||
version = re.sub(u'^v', u'', version)
|
||||
return version
|
||||
|
||||
def _old_docker_version(self):
|
||||
cmd_args = self._docker_args
|
||||
|
||||
old_version_subcommand = ['version']
|
||||
|
||||
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
||||
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
|
||||
return old_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _new_docker_version(self):
|
||||
# no result yet, must be newer Docker version
|
||||
cmd_args = self._docker_args
|
||||
|
||||
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
|
||||
|
||||
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
||||
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
return new_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _get_docker_version(self):
|
||||
|
||||
cmd, cmd_output, err, returncode = self._old_docker_version()
|
||||
if returncode == 0:
|
||||
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
|
||||
if line.startswith(u'Server version:'): # old docker versions
|
||||
return self._sanitize_version(line.split()[2])
|
||||
|
||||
cmd, cmd_output, err, returncode = self._new_docker_version()
|
||||
if returncode:
|
||||
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
|
||||
|
||||
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
|
||||
|
||||
def _get_docker_remote_user(self):
|
||||
""" Get the default user configured in the docker container """
|
||||
container = self.get_option('remote_addr')
|
||||
if container in self._container_user_cache:
|
||||
return self._container_user_cache[container]
|
||||
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', container],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
out = to_text(out, errors='surrogate_or_strict')
|
||||
|
||||
if p.returncode != 0:
|
||||
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
|
||||
self._container_user_cache[container] = None
|
||||
return None
|
||||
|
||||
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||
user = out.strip() or u'root'
|
||||
self._container_user_cache[container] = user
|
||||
return user
|
||||
|
||||
def _build_exec_cmd(self, cmd):
|
||||
""" Build the local docker exec command to run cmd on remote_host
|
||||
|
||||
If remote_user is available and is supported by the docker
|
||||
version we are using, it will be provided to docker exec.
|
||||
"""
|
||||
|
||||
local_cmd = [self.docker_cmd]
|
||||
|
||||
if self._docker_args:
|
||||
local_cmd += self._docker_args
|
||||
|
||||
local_cmd += [b'exec']
|
||||
|
||||
if self.remote_user is not None:
|
||||
local_cmd += [b'-u', self.remote_user]
|
||||
|
||||
# -i is needed to keep stdin open which allows pipelining to work
|
||||
local_cmd += [b'-i', self.get_option('remote_addr')] + cmd
|
||||
|
||||
return local_cmd
|
||||
|
||||
def _set_docker_args(self):
|
||||
# TODO: this is mostly for backwards compatibility, play_context is used as fallback for older versions
|
||||
# docker arguments
|
||||
del self._docker_args[:]
|
||||
extra_args = self.get_option('docker_extra_args') or getattr(self._play_context, 'docker_extra_args', '')
|
||||
if extra_args:
|
||||
self._docker_args += extra_args.split(' ')
|
||||
|
||||
def _set_conn_data(self):
|
||||
|
||||
''' initialize for the connection, cannot do only in init since all data is not ready at that point '''
|
||||
|
||||
self._set_docker_args()
|
||||
|
||||
self.remote_user = self.get_option('remote_user')
|
||||
if self.remote_user is None and self._play_context.remote_user is not None:
|
||||
self.remote_user = self._play_context.remote_user
|
||||
|
||||
# timeout, use unless default and pc is different, backwards compat
|
||||
self.timeout = self.get_option('container_timeout')
|
||||
if self.timeout == 10 and self.timeout != self._play_context.timeout:
|
||||
self.timeout = self._play_context.timeout
|
||||
|
||||
@property
|
||||
def docker_version(self):
|
||||
|
||||
if not self._version:
|
||||
self._set_docker_args()
|
||||
|
||||
self._version = self._get_docker_version()
|
||||
if self._version == u'dev':
|
||||
display.warning(u'Docker version number is "dev". Will assume latest version.')
|
||||
if self._version != u'dev' and LooseVersion(self._version) < LooseVersion(u'1.3'):
|
||||
raise AnsibleError('docker connection type requires docker 1.3 or higher')
|
||||
return self._version
|
||||
|
||||
def _get_actual_user(self):
|
||||
if self.remote_user is not None:
|
||||
# An explicit user is provided
|
||||
if self.docker_version == u'dev' or LooseVersion(self.docker_version) >= LooseVersion(u'1.7'):
|
||||
# Support for specifying the exec user was added in docker 1.7
|
||||
return self.remote_user
|
||||
else:
|
||||
self.remote_user = None
|
||||
actual_user = self._get_docker_remote_user()
|
||||
if actual_user != self.get_option('remote_user'):
|
||||
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
|
||||
.format(self.docker_version, self.actual_user or u'?'))
|
||||
return actual_user
|
||||
elif self._display.verbosity > 2:
|
||||
# Since we're not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we don't need to.
|
||||
return self._get_docker_remote_user()
|
||||
else:
|
||||
return None
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
self._set_conn_data()
|
||||
actual_user = self._get_actual_user()
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
actual_user or u'?'), host=self.get_option('remote_addr')
|
||||
)
|
||||
self._connected = True
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" Run a command on the docker host """
|
||||
|
||||
self._set_conn_data()
|
||||
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
|
||||
|
||||
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self.get_option('remote_addr'))
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
|
||||
p = subprocess.Popen(
|
||||
local_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b''
|
||||
try:
|
||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||
|
||||
for key, event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
|
||||
if not chunk:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||
become_output += chunk
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
display.debug("done with docker.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we're choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
'''
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
return ntpath.normpath(remote_path)
|
||||
else:
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" Transfer a file from local to docker container """
|
||||
self._set_conn_data()
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise AnsibleFileNotFound(
|
||||
"file or module does not exist: %s" % to_native(in_path))
|
||||
|
||||
out_path = shlex_quote(out_path)
|
||||
# Older docker doesn't have native support for copying files into
|
||||
# running containers, so we use docker exec to implement this
|
||||
# Although docker version 1.8 and later provide support, the
|
||||
# owner and group of the files are always set to root
|
||||
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
|
||||
if not os.fstat(in_file.fileno()).st_size:
|
||||
count = ' count=0'
|
||||
else:
|
||||
count = ''
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
|
||||
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
self._set_conn_data()
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
# out_path is the final file path, but docker takes a directory, not a
|
||||
# file path
|
||||
out_dir = os.path.dirname(out_path)
|
||||
|
||||
args = [self.docker_cmd, "cp", "%s:%s" % (self.get_option('remote_addr'), in_path), out_dir]
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
||||
else:
|
||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||
|
||||
if p.returncode != 0:
|
||||
# Older docker doesn't have native support for fetching files command `cp`
|
||||
# If `cp` fails, try to use `dd` instead
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=out_file, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
# Rename if needed
|
||||
if actual_out_path != out_path:
|
||||
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
def reset(self):
|
||||
# Clear container user cache
|
||||
self._container_user_cache = {}
|
||||
@@ -0,0 +1,388 @@
|
||||
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
name: docker_api
|
||||
short_description: Run tasks in docker containers
|
||||
version_added: 1.1.0
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses Docker SDK for Python to interact directly with the Docker daemon instead of
|
||||
using the Docker CLI. Use the
|
||||
R(community.docker.docker,ansible_collections.community.docker.docker_connection)
|
||||
connection plugin if you want to use the Docker CLI.
|
||||
options:
|
||||
remote_user:
|
||||
type: str
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
remote_addr:
|
||||
type: str
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.var_names
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
'''
|
||||
|
||||
import io
|
||||
import os
|
||||
import os.path
|
||||
import shutil
|
||||
import tarfile
|
||||
|
||||
from ansible.errors import AnsibleFileNotFound, AnsibleConnectionFailure
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.socket_handler import (
|
||||
DockerSocketHandler,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError, NotFound
|
||||
except Exception:
|
||||
# missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
|
||||
pass
|
||||
|
||||
MIN_DOCKER_PY = '1.7.0'
|
||||
MIN_DOCKER_API = None
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
|
||||
transport = 'community.docker.docker_api'
|
||||
has_pipelining = True
|
||||
|
||||
def _call_client(self, callable, not_found_can_be_resource=False):
|
||||
try:
|
||||
return callable()
|
||||
except NotFound as e:
|
||||
if not_found_can_be_resource:
|
||||
raise AnsibleConnectionFailure('Could not find container "{1}" or resource in it ({0})'.format(e, self.get_option('remote_addr')))
|
||||
else:
|
||||
raise AnsibleConnectionFailure('Could not find container "{1}" ({0})'.format(e, self.get_option('remote_addr')))
|
||||
except APIError as e:
|
||||
if e.response and e.response.status_code == 409:
|
||||
raise AnsibleConnectionFailure('The container "{1}" has been paused ({0})'.format(e, self.get_option('remote_addr')))
|
||||
self.client.fail(
|
||||
'An unexpected docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
|
||||
)
|
||||
except DockerException as e:
|
||||
self.client.fail(
|
||||
'An unexpected docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
|
||||
)
|
||||
except RequestException as e:
|
||||
self.client.fail(
|
||||
'An unexpected requests error occurred for container "{1}" when docker-py tried to talk to the docker daemon: {0}'
|
||||
.format(e, self.get_option('remote_addr'))
|
||||
)
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
self.client = None
|
||||
self.ids = dict()
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||
|
||||
self.actual_user = None
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
self.actual_user = self.get_option('remote_user')
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
self.actual_user or u'?'), host=self.get_option('remote_addr')
|
||||
)
|
||||
if self.client is None:
|
||||
self.client = AnsibleDockerClient(self, min_docker_version=MIN_DOCKER_PY, min_docker_api_version=MIN_DOCKER_API)
|
||||
self._connected = True
|
||||
|
||||
if self.actual_user is None and display.verbosity > 2:
|
||||
# Since we're not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we don't need to
|
||||
display.vvv(u"Trying to determine actual user")
|
||||
result = self._call_client(lambda: self.client.inspect_container(self.get_option('remote_addr')))
|
||||
if result.get('Config'):
|
||||
self.actual_user = result['Config'].get('User')
|
||||
if self.actual_user is not None:
|
||||
display.vvv(u"Actual user is '{0}'".format(self.actual_user))
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" Run a command on the docker host """
|
||||
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
command = [self._play_context.executable, '-c', to_text(cmd)]
|
||||
|
||||
do_become = self.become and self.become.expect_prompt() and sudoable
|
||||
|
||||
display.vvv(
|
||||
u"EXEC {0}{1}{2}".format(
|
||||
to_text(command),
|
||||
', with stdin ({0} bytes)'.format(len(in_data)) if in_data is not None else '',
|
||||
', with become prompt' if do_become else '',
|
||||
),
|
||||
host=self.get_option('remote_addr')
|
||||
)
|
||||
|
||||
need_stdin = True if (in_data is not None) or do_become else False
|
||||
|
||||
exec_data = self._call_client(lambda: self.client.exec_create(
|
||||
self.get_option('remote_addr'),
|
||||
command,
|
||||
stdout=True,
|
||||
stderr=True,
|
||||
stdin=need_stdin,
|
||||
user=self.get_option('remote_user') or '',
|
||||
# workdir=None, - only works for Docker SDK for Python 3.0.0 and later
|
||||
))
|
||||
exec_id = exec_data['Id']
|
||||
|
||||
if need_stdin:
|
||||
exec_socket = self._call_client(lambda: self.client.exec_start(
|
||||
exec_id,
|
||||
detach=False,
|
||||
socket=True,
|
||||
))
|
||||
try:
|
||||
with DockerSocketHandler(display, exec_socket, container=self.get_option('remote_addr')) as exec_socket_handler:
|
||||
if do_become:
|
||||
become_output = [b'']
|
||||
|
||||
def append_become_output(stream_id, data):
|
||||
become_output[0] += data
|
||||
|
||||
exec_socket_handler.set_block_done_callback(append_become_output)
|
||||
|
||||
while not self.become.check_success(become_output[0]) and not self.become.check_password_prompt(become_output[0]):
|
||||
if not exec_socket_handler.select(self.get_option('container_timeout')):
|
||||
stdout, stderr = exec_socket_handler.consume()
|
||||
raise AnsibleConnectionFailure('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output[0]))
|
||||
|
||||
if exec_socket_handler.is_eof():
|
||||
raise AnsibleConnectionFailure('privilege output closed while waiting for password prompt:\n' + to_native(become_output[0]))
|
||||
|
||||
if not self.become.check_success(become_output[0]):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
exec_socket_handler.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
|
||||
if in_data is not None:
|
||||
exec_socket_handler.write(in_data)
|
||||
|
||||
stdout, stderr = exec_socket_handler.consume()
|
||||
finally:
|
||||
exec_socket.close()
|
||||
else:
|
||||
stdout, stderr = self._call_client(lambda: self.client.exec_start(
|
||||
exec_id,
|
||||
detach=False,
|
||||
stream=False,
|
||||
socket=False,
|
||||
demux=True,
|
||||
))
|
||||
|
||||
result = self._call_client(lambda: self.client.exec_inspect(exec_id))
|
||||
|
||||
return result.get('ExitCode') or 0, stdout or b'', stderr or b''
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we're choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
'''
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
return ntpath.normpath(remote_path)
|
||||
else:
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" Transfer a file from local to docker container """
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise AnsibleFileNotFound(
|
||||
"file or module does not exist: %s" % to_native(in_path))
|
||||
|
||||
if self.actual_user not in self.ids:
|
||||
dummy, ids, dummy = self.exec_command(b'id -u && id -g')
|
||||
try:
|
||||
user_id, group_id = ids.splitlines()
|
||||
self.ids[self.actual_user] = int(user_id), int(group_id)
|
||||
display.vvvv(
|
||||
'PUT: Determined uid={0} and gid={1} for user "{2}"'.format(user_id, group_id, self.actual_user),
|
||||
host=self.get_option('remote_addr')
|
||||
)
|
||||
except Exception as e:
|
||||
raise AnsibleConnectionFailure(
|
||||
'Error while determining user and group ID of current user in container "{1}": {0}\nGot value: {2!r}'
|
||||
.format(e, self.get_option('remote_addr'), ids)
|
||||
)
|
||||
|
||||
b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
|
||||
|
||||
out_dir, out_file = os.path.split(out_path)
|
||||
|
||||
# TODO: stream tar file, instead of creating it in-memory into a BytesIO
|
||||
|
||||
bio = io.BytesIO()
|
||||
with tarfile.open(fileobj=bio, mode='w|', dereference=True, encoding='utf-8') as tar:
|
||||
# Note that without both name (bytes) and arcname (unicode), this either fails for
|
||||
# Python 2.6/2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
|
||||
# form) it works with Python 2.6, 2.7, 3.5, 3.6, and 3.7 up to 3.9.
|
||||
tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
|
||||
user_id, group_id = self.ids[self.actual_user]
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.uname = ''
|
||||
if self.actual_user:
|
||||
tarinfo.uname = self.actual_user
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.gname = ''
|
||||
tarinfo.mode &= 0o700
|
||||
with open(b_in_path, 'rb') as f:
|
||||
tar.addfile(tarinfo, fileobj=f)
|
||||
data = bio.getvalue()
|
||||
|
||||
ok = self._call_client(lambda: self.client.put_archive(
|
||||
self.get_option('remote_addr'),
|
||||
out_dir,
|
||||
data, # can also be file object for streaming; this is only clear from the
|
||||
# implementation of put_archive(), which uses requests's put().
|
||||
# See https://2.python-requests.org/en/master/user/advanced/#streaming-uploads
|
||||
# WARNING: might not work with all transports!
|
||||
), not_found_can_be_resource=True)
|
||||
if not ok:
|
||||
raise AnsibleConnectionFailure(
|
||||
'Unknown error while creating file "{0}" in container "{1}".'
|
||||
.format(out_path, self.get_option('remote_addr'))
|
||||
)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
|
||||
|
||||
considered_in_paths = set()
|
||||
|
||||
while True:
|
||||
if in_path in considered_in_paths:
|
||||
raise AnsibleConnectionFailure('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path))
|
||||
considered_in_paths.add(in_path)
|
||||
|
||||
display.vvvv('FETCH: Fetching "%s"' % in_path, host=self.get_option('remote_addr'))
|
||||
stream, stats = self._call_client(lambda: self.client.get_archive(
|
||||
self.get_option('remote_addr'),
|
||||
in_path,
|
||||
), not_found_can_be_resource=True)
|
||||
|
||||
# TODO: stream tar file instead of downloading it into a BytesIO
|
||||
|
||||
bio = io.BytesIO()
|
||||
for chunk in stream:
|
||||
bio.write(chunk)
|
||||
bio.seek(0)
|
||||
|
||||
with tarfile.open(fileobj=bio, mode='r|') as tar:
|
||||
symlink_member = None
|
||||
first = True
|
||||
for member in tar:
|
||||
if not first:
|
||||
raise AnsibleConnectionFailure('Received tarfile contains more than one file!')
|
||||
first = False
|
||||
if member.issym():
|
||||
symlink_member = member
|
||||
continue
|
||||
if not member.isfile():
|
||||
raise AnsibleConnectionFailure('Remote file "%s" is not a regular file or a symbolic link' % in_path)
|
||||
in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
|
||||
with open(b_out_path, 'wb') as out_f:
|
||||
shutil.copyfileobj(in_f, out_f, member.size)
|
||||
if first:
|
||||
raise AnsibleConnectionFailure('Received tarfile is empty!')
|
||||
# If the only member was a file, it's already extracted. If it is a symlink, process it now.
|
||||
if symlink_member is not None:
|
||||
in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
|
||||
display.vvvv('FETCH: Following symbolic link to "%s"' % in_path, host=self.get_option('remote_addr'))
|
||||
continue
|
||||
return
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
def reset(self):
|
||||
self.ids.clear()
|
||||
@@ -0,0 +1,239 @@
|
||||
# (c) 2021 Jeff Goldschrafe <jeff@holyhandgrenade.org>
|
||||
# Based on Ansible local connection plugin by:
|
||||
# (c) 2012 Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: nsenter
|
||||
short_description: execute on host running controller container
|
||||
version_added: 1.9.0
|
||||
description:
|
||||
- This connection plugin allows Ansible, running in a privileged container, to execute tasks on the container
|
||||
host instead of in the container itself.
|
||||
- This is useful for running Ansible in a pull model, while still keeping the Ansible control node
|
||||
containerized.
|
||||
- It relies on having privileged access to run C(nsenter) in the host's PID namespace, allowing it to enter the
|
||||
namespaces of the provided PID (default PID 1, or init/systemd).
|
||||
author: Jeff Goldschrafe (@jgoldschrafe)
|
||||
options:
|
||||
nsenter_pid:
|
||||
description:
|
||||
- PID to attach with using nsenter.
|
||||
- The default should be fine unless you are attaching as a non-root user.
|
||||
type: int
|
||||
default: 1
|
||||
vars:
|
||||
- name: ansible_nsenter_pid
|
||||
env:
|
||||
- name: ANSIBLE_NSENTER_PID
|
||||
ini:
|
||||
- section: nsenter_connection
|
||||
key: nsenter_pid
|
||||
notes:
|
||||
- The remote user is ignored; this plugin always runs as root.
|
||||
- >-
|
||||
This plugin requires the Ansible controller container to be launched in the following way:
|
||||
(1) The container image contains the C(nsenter) program;
|
||||
(2) The container is launched in privileged mode;
|
||||
(3) The container is launched in the host's PID namespace (C(--pid host)).
|
||||
'''
|
||||
|
||||
import os
|
||||
import pty
|
||||
import shutil
|
||||
import subprocess
|
||||
import fcntl
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.module_utils.compat import selectors
|
||||
from ansible.module_utils.six import binary_type, text_type
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.path import unfrackpath
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
'''Connections to a container host using nsenter
|
||||
'''
|
||||
|
||||
transport = 'community.docker.nsenter'
|
||||
has_pipelining = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Connection, self).__init__(*args, **kwargs)
|
||||
self.cwd = None
|
||||
|
||||
def _connect(self):
|
||||
self._nsenter_pid = self.get_option("nsenter_pid")
|
||||
|
||||
# Because nsenter requires very high privileges, our remote user
|
||||
# is always assumed to be root.
|
||||
self._play_context.remote_user = "root"
|
||||
|
||||
if not self._connected:
|
||||
display.vvv(
|
||||
u"ESTABLISH NSENTER CONNECTION FOR USER: {0}".format(
|
||||
self._play_context.remote_user
|
||||
),
|
||||
host=self._play_context.remote_addr,
|
||||
)
|
||||
self._connected = True
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=True):
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
display.debug("in nsenter.exec_command()")
|
||||
|
||||
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
|
||||
|
||||
if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
|
||||
raise AnsibleError("failed to find the executable specified %s."
|
||||
" Please verify if the executable exists and re-try." % executable)
|
||||
|
||||
# Rewrite the provided command to prefix it with nsenter
|
||||
nsenter_cmd_parts = [
|
||||
"nsenter",
|
||||
"--ipc",
|
||||
"--mount",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--uts",
|
||||
"--preserve-credentials",
|
||||
"--target={0}".format(self._nsenter_pid),
|
||||
"--",
|
||||
]
|
||||
|
||||
if isinstance(cmd, (text_type, binary_type)):
|
||||
cmd_parts = nsenter_cmd_parts + [cmd]
|
||||
cmd = to_bytes(" ".join(cmd_parts))
|
||||
else:
|
||||
cmd_parts = nsenter_cmd_parts + cmd
|
||||
cmd = [to_bytes(arg) for arg in cmd_parts]
|
||||
|
||||
display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
master = None
|
||||
stdin = subprocess.PIPE
|
||||
|
||||
# This plugin does not support pipelining. This diverges from the behavior of
|
||||
# the core "local" connection plugin that this one derives from.
|
||||
if sudoable and self.become and self.become.expect_prompt():
|
||||
# Create a pty if sudoable for privlege escalation that needs it.
|
||||
# Falls back to using a standard pipe if this fails, which may
|
||||
# cause the command to fail in certain situations where we are escalating
|
||||
# privileges or the command otherwise needs a pty.
|
||||
try:
|
||||
master, stdin = pty.openpty()
|
||||
except (IOError, OSError) as e:
|
||||
display.debug("Unable to open pty: %s" % to_native(e))
|
||||
|
||||
p = subprocess.Popen(
|
||||
cmd,
|
||||
shell=isinstance(cmd, (text_type, binary_type)),
|
||||
executable=executable if isinstance(cmd, (text_type, binary_type)) else None,
|
||||
cwd=self.cwd,
|
||||
stdin=stdin,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
# if we created a master, we can close the other half of the pty now, otherwise master is stdin
|
||||
if master is not None:
|
||||
os.close(stdin)
|
||||
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b''
|
||||
try:
|
||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self._play_context.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||
|
||||
for key, event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
|
||||
if not chunk:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||
become_output += chunk
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
if master is None:
|
||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
else:
|
||||
os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
# finally, close the other half of the pty, if it was created
|
||||
if master:
|
||||
os.close(master)
|
||||
|
||||
display.debug("done with nsenter.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
|
||||
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||
|
||||
display.vvv(u"PUT {0} to {1}".format(in_path, out_path), host=self._play_context.remote_addr)
|
||||
try:
|
||||
with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
|
||||
in_data = in_file.read()
|
||||
rc, out, err = self.exec_command(cmd=["tee", out_path], in_data=in_data)
|
||||
if rc != 0:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, err))
|
||||
except IOError as e:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, to_native(e)))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
|
||||
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||
|
||||
try:
|
||||
rc, out, err = self.exec_command(cmd=["cat", in_path])
|
||||
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
|
||||
if rc != 0:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(in_path, err))
|
||||
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||
out_file.write(out)
|
||||
except IOError as e:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
self._connected = False
|
||||
@@ -0,0 +1,187 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r'''
|
||||
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix://var/run/docker.sock
|
||||
aliases: [ docker_url ]
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
- The current default value is C(localhost). This default is deprecated and will change in community.docker
|
||||
2.0.0 to be a value computed from I(docker_host). Explicitly specify C(localhost) to make sure this value
|
||||
will still be used, and to disable the deprecation message which will be shown otherwise.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_cert:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ssl_version:
|
||||
description:
|
||||
- Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
|
||||
used instead.
|
||||
type: str
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: no
|
||||
version_added: 1.5.0
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
debug:
|
||||
description:
|
||||
- Debug mode
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
|
||||
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
|
||||
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
||||
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
|
||||
and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
'''
|
||||
|
||||
# For plugins: allow to define common options with Ansible variables
|
||||
|
||||
VAR_NAMES = r'''
|
||||
options:
|
||||
docker_host:
|
||||
vars:
|
||||
- name: ansible_docker_docker_host
|
||||
tls_hostname:
|
||||
vars:
|
||||
- name: ansible_docker_tls_hostname
|
||||
api_version:
|
||||
vars:
|
||||
- name: ansible_docker_api_version
|
||||
timeout:
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
ca_cert:
|
||||
vars:
|
||||
- name: ansible_docker_ca_cert
|
||||
client_cert:
|
||||
vars:
|
||||
- name: ansible_docker_client_cert
|
||||
client_key:
|
||||
vars:
|
||||
- name: ansible_docker_client_key
|
||||
ssl_version:
|
||||
vars:
|
||||
- name: ansible_docker_ssl_version
|
||||
tls:
|
||||
vars:
|
||||
- name: ansible_docker_tls
|
||||
validate_certs:
|
||||
vars:
|
||||
- name: ansible_docker_validate_certs
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
|
||||
|
||||
DOCKER_PY_1_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
notes:
|
||||
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon.
|
||||
requirements:
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
|
||||
install the C(docker) Python module. Note that both modules should *not*
|
||||
be installed at the same time. Also note that when both modules are installed
|
||||
and one of them is uninstalled, the other might no longer function and a
|
||||
reinstall of it is required."
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
|
||||
|
||||
DOCKER_PY_2_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
notes:
|
||||
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon.
|
||||
requirements:
|
||||
- "Python >= 2.7"
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
This module does *not* work with docker-py."
|
||||
'''
|
||||
@@ -0,0 +1,346 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
|
||||
# For the parts taken from the docker inventory script:
|
||||
# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
|
||||
# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
|
||||
# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_containers
|
||||
short_description: Ansible dynamic inventory plugin for Docker containers.
|
||||
version_added: 1.1.0
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
requirements:
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
description:
|
||||
- Reads inventories from the Docker API.
|
||||
- Uses a YAML configuration file that ends with C(docker.[yml|yaml]).
|
||||
options:
|
||||
plugin:
|
||||
description:
|
||||
- The name of this plugin, it should always be set to C(community.docker.docker_containers)
|
||||
for this plugin to recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ community.docker.docker_containers ]
|
||||
|
||||
connection_type:
|
||||
description:
|
||||
- Which connection type to use the containers.
|
||||
- One way to connect to containers is to use SSH (C(ssh)). For this, the options I(default_ip) and
|
||||
I(private_ssh_port) are used. This requires that a SSH daemon is running inside the containers.
|
||||
- Alternatively, C(docker-cli) selects the
|
||||
R(docker connection plugin,ansible_collections.community.docker.docker_connection),
|
||||
and C(docker-api) (default) selects the
|
||||
R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection).
|
||||
- When C(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin
|
||||
to the connection plugin. This can be controlled with I(configure_docker_daemon).
|
||||
type: str
|
||||
default: docker-api
|
||||
choices:
|
||||
- ssh
|
||||
- docker-cli
|
||||
- docker-api
|
||||
|
||||
configure_docker_daemon:
|
||||
description:
|
||||
- Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
|
||||
- Only used when I(connection_type=docker-api).
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 1.8.0
|
||||
|
||||
verbose_output:
|
||||
description:
|
||||
- Toggle to (not) include all available inspection metadata.
|
||||
- Note that all top-level keys will be transformed to the format C(docker_xxx).
|
||||
For example, C(HostConfig) is converted to C(docker_hostconfig).
|
||||
- If this is C(false), these values can only be used during I(constructed), I(groups), and I(keyed_groups).
|
||||
- The C(docker) inventory script always added these variables, so for compatibility set this to C(true).
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
default_ip:
|
||||
description:
|
||||
- The IP address to assign to ansible_host when the container's SSH port is mapped to interface
|
||||
'0.0.0.0'.
|
||||
- Only used if I(connection_type) is C(ssh).
|
||||
type: str
|
||||
default: 127.0.0.1
|
||||
|
||||
private_ssh_port:
|
||||
description:
|
||||
- The port containers use for SSH.
|
||||
- Only used if I(connection_type) is C(ssh).
|
||||
type: int
|
||||
default: 22
|
||||
|
||||
add_legacy_groups:
|
||||
description:
|
||||
- "Add the same groups as the C(docker) inventory script does. These are the following:"
|
||||
- "C(<container id>): contains the container of this ID."
|
||||
- "C(<container name>): contains the container that has this name."
|
||||
- "C(<container short id>): contains the containers that have this short ID (first 13 letters of ID)."
|
||||
- "C(image_<image name>): contains the containers that have the image C(<image name>)."
|
||||
- "C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>)."
|
||||
- "C(service_<service name>): contains the containers that belong to the service C(<service name>)"
|
||||
- "C(<docker_host>): contains the containers which belong to the Docker daemon I(docker_host).
|
||||
Useful if you run this plugin against multiple Docker daemons."
|
||||
- "C(running): contains all containers that are running."
|
||||
- "C(stopped): contains all containers that are not running."
|
||||
- If this is not set to C(true), you should use keyed groups to add the containers to groups.
|
||||
See the examples for how to do that.
|
||||
type: bool
|
||||
default: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example using local Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: unix://var/run/docker.sock
|
||||
|
||||
# Minimal example using remote Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
# Example using remote Docker daemon with unverified TLS
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: true
|
||||
|
||||
# Example using remote Docker daemon with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: true
|
||||
ca_cert: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
# Example using constructed features to create groups
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: false
|
||||
keyed_groups:
|
||||
# Add containers with primary network foo to a network_foo group
|
||||
- prefix: network
|
||||
key: 'docker_hostconfig.NetworkMode'
|
||||
# Add Linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: docker_platform
|
||||
|
||||
# Example using SSH connection with an explicit fallback for when port 22 has not been
|
||||
# exported: use container name as ansible_ssh_host and 22 as ansible_ssh_port
|
||||
plugin: community.docker.docker_containers
|
||||
connection_type: ssh
|
||||
compose:
|
||||
ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true)
|
||||
ansible_ssh_port: ansible_ssh_port | default(22, true)
|
||||
'''
|
||||
|
||||
import re
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
RequestException,
|
||||
DOCKER_COMMON_ARGS_VARS,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except Exception:
|
||||
# missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
|
||||
pass
|
||||
|
||||
MIN_DOCKER_PY = '1.7.0'
|
||||
MIN_DOCKER_API = None
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker daemon as source. '''
|
||||
|
||||
NAME = 'community.docker.docker_containers'
|
||||
|
||||
def _slugify(self, value):
|
||||
return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
|
||||
|
||||
def _populate(self, client):
|
||||
strict = self.get_option('strict')
|
||||
|
||||
ssh_port = self.get_option('private_ssh_port')
|
||||
default_ip = self.get_option('default_ip')
|
||||
hostname = self.get_option('docker_host')
|
||||
verbose_output = self.get_option('verbose_output')
|
||||
connection_type = self.get_option('connection_type')
|
||||
add_legacy_groups = self.get_option('add_legacy_groups')
|
||||
|
||||
try:
|
||||
containers = client.containers(all=True)
|
||||
except APIError as exc:
|
||||
raise AnsibleError("Error listing containers: %s" % to_native(exc))
|
||||
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group('running')
|
||||
self.inventory.add_group('stopped')
|
||||
|
||||
extra_facts = {}
|
||||
if self.get_option('configure_docker_daemon'):
|
||||
for option_name, var_name in DOCKER_COMMON_ARGS_VARS.items():
|
||||
value = self.get_option(option_name)
|
||||
if value is not None:
|
||||
extra_facts[var_name] = value
|
||||
|
||||
for container in containers:
|
||||
id = container.get('Id')
|
||||
short_id = id[:13]
|
||||
|
||||
try:
|
||||
name = container.get('Names', list())[0].lstrip('/')
|
||||
full_name = name
|
||||
except IndexError:
|
||||
name = short_id
|
||||
full_name = id
|
||||
|
||||
self.inventory.add_host(name)
|
||||
facts = dict(
|
||||
docker_name=name,
|
||||
docker_short_id=short_id
|
||||
)
|
||||
full_facts = dict()
|
||||
|
||||
try:
|
||||
inspect = client.inspect_container(id)
|
||||
except APIError as exc:
|
||||
raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc)))
|
||||
|
||||
state = inspect.get('State') or dict()
|
||||
config = inspect.get('Config') or dict()
|
||||
labels = config.get('Labels') or dict()
|
||||
|
||||
running = state.get('Running')
|
||||
|
||||
# Add container to groups
|
||||
image_name = config.get('Image')
|
||||
if image_name and add_legacy_groups:
|
||||
self.inventory.add_group('image_{0}'.format(image_name))
|
||||
self.inventory.add_host(name, group='image_{0}'.format(image_name))
|
||||
|
||||
stack_name = labels.get('com.docker.stack.namespace')
|
||||
if stack_name:
|
||||
full_facts['docker_stack'] = stack_name
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group('stack_{0}'.format(stack_name))
|
||||
self.inventory.add_host(name, group='stack_{0}'.format(stack_name))
|
||||
|
||||
service_name = labels.get('com.docker.swarm.service.name')
|
||||
if service_name:
|
||||
full_facts['docker_service'] = service_name
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group('service_{0}'.format(service_name))
|
||||
self.inventory.add_host(name, group='service_{0}'.format(service_name))
|
||||
|
||||
if connection_type == 'ssh':
|
||||
# Figure out ssh IP and Port
|
||||
try:
|
||||
# Lookup the public facing port Nat'ed to ssh port.
|
||||
port = client.port(container, ssh_port)[0]
|
||||
except (IndexError, AttributeError, TypeError):
|
||||
port = dict()
|
||||
|
||||
try:
|
||||
ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
|
||||
except KeyError:
|
||||
ip = ''
|
||||
|
||||
facts.update(dict(
|
||||
ansible_ssh_host=ip,
|
||||
ansible_ssh_port=port.get('HostPort', 0),
|
||||
))
|
||||
elif connection_type == 'docker-cli':
|
||||
facts.update(dict(
|
||||
ansible_host=full_name,
|
||||
ansible_connection='community.docker.docker',
|
||||
))
|
||||
elif connection_type == 'docker-api':
|
||||
facts.update(dict(
|
||||
ansible_host=full_name,
|
||||
ansible_connection='community.docker.docker_api',
|
||||
))
|
||||
facts.update(extra_facts)
|
||||
|
||||
full_facts.update(facts)
|
||||
for key, value in inspect.items():
|
||||
fact_key = self._slugify(key)
|
||||
full_facts[fact_key] = value
|
||||
|
||||
if verbose_output:
|
||||
facts.update(full_facts)
|
||||
|
||||
for key, value in facts.items():
|
||||
self.inventory.set_variable(name, key, value)
|
||||
|
||||
# Use constructed if applicable
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), full_facts, name, strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), full_facts, name, strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), full_facts, name, strict=strict)
|
||||
|
||||
# We need to do this last since we also add a group called `name`.
|
||||
# When we do this before a set_variable() call, the variables are assigned
|
||||
# to the group, and not to the host.
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group(id)
|
||||
self.inventory.add_host(name, group=id)
|
||||
self.inventory.add_group(name)
|
||||
self.inventory.add_host(name, group=name)
|
||||
self.inventory.add_group(short_id)
|
||||
self.inventory.add_host(name, group=short_id)
|
||||
self.inventory.add_group(hostname)
|
||||
self.inventory.add_host(name, group=hostname)
|
||||
|
||||
if running is True:
|
||||
self.inventory.add_host(name, group='running')
|
||||
else:
|
||||
self.inventory.add_host(name, group='stopped')
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker.yaml', 'docker.yml')))
|
||||
|
||||
def _create_client(self):
|
||||
return AnsibleDockerClient(self, min_docker_version=MIN_DOCKER_PY, min_docker_api_version=MIN_DOCKER_API)
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
client = self._create_client()
|
||||
try:
|
||||
self._populate(client)
|
||||
except DockerException as e:
|
||||
raise AnsibleError(
|
||||
'An unexpected docker error occurred: {0}'.format(e)
|
||||
)
|
||||
except RequestException as e:
|
||||
raise AnsibleError(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e)
|
||||
)
|
||||
@@ -0,0 +1,274 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_machine
|
||||
author: Ximon Eighteen (@ximon18)
|
||||
short_description: Docker Machine inventory source
|
||||
requirements:
|
||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Get inventory hosts from Docker Machine.
|
||||
- Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
|
||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||
- The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
|
||||
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: yes
|
||||
choices: ['docker_machine', 'community.docker.docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
- With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
|
||||
A warning will be issued for any skipped host if the choice is C(require).
|
||||
- With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
|
||||
A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
|
||||
- With C(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||
- If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
|
||||
type: str
|
||||
choices:
|
||||
- require
|
||||
- require-silently
|
||||
- optional
|
||||
- optional-silently
|
||||
- skip
|
||||
default: require
|
||||
running_required:
|
||||
description:
|
||||
- When C(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||
type: bool
|
||||
default: yes
|
||||
verbose_output:
|
||||
description:
|
||||
- When C(true), include all available nodes metadata (for exmaple C(Image), C(Region), C(Size)) as a JSON object
|
||||
named C(docker_machine_node_attributes).
|
||||
type: bool
|
||||
default: yes
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example
|
||||
plugin: community.docker.docker_machine
|
||||
|
||||
# Example using constructed features to create a group per Docker Machine driver
|
||||
# (https://docs.docker.com/machine/drivers/), for example:
|
||||
# $ docker-machine create --driver digitalocean ... mymachine
|
||||
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
||||
# {
|
||||
# ...
|
||||
# "digitalocean": {
|
||||
# "hosts": [
|
||||
# "mymachine"
|
||||
# ]
|
||||
# ...
|
||||
# }
|
||||
strict: no
|
||||
keyed_groups:
|
||||
- separator: ''
|
||||
key: docker_machine_node_attributes.DriverName
|
||||
|
||||
# Example grouping hosts by Digital Machine tag
|
||||
strict: no
|
||||
keyed_groups:
|
||||
- prefix: tag
|
||||
key: 'dm_tags'
|
||||
|
||||
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||
compose:
|
||||
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.utils.display import Display
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using Docker machine as source. '''
|
||||
|
||||
NAME = 'community.docker.docker_machine'
|
||||
|
||||
DOCKER_MACHINE_PATH = None
|
||||
|
||||
def _run_command(self, args):
|
||||
if not self.DOCKER_MACHINE_PATH:
|
||||
try:
|
||||
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
|
||||
except ValueError as e:
|
||||
raise AnsibleError(to_native(e))
|
||||
|
||||
command = [self.DOCKER_MACHINE_PATH]
|
||||
command.extend(args)
|
||||
display.debug('Executing command {0}'.format(command))
|
||||
try:
|
||||
result = subprocess.check_output(command)
|
||||
except subprocess.CalledProcessError as e:
|
||||
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
|
||||
raise e
|
||||
|
||||
return to_text(result).strip()
|
||||
|
||||
def _get_docker_daemon_variables(self, machine_name):
|
||||
'''
|
||||
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||
'''
|
||||
try:
|
||||
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
|
||||
except subprocess.CalledProcessError:
|
||||
# This can happen when the machine is created but provisioning is incomplete
|
||||
return []
|
||||
|
||||
# example output of docker-machine env --shell=sh:
|
||||
# export DOCKER_TLS_VERIFY="1"
|
||||
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
||||
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
||||
# export DOCKER_MACHINE_NAME="routinator"
|
||||
# # Run this command to configure your shell:
|
||||
# # eval $(docker-machine env --shell=bash routinator)
|
||||
|
||||
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||
# with the same name and value but with a dm_ name prefix.
|
||||
vars = []
|
||||
for line in env_lines:
|
||||
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||
if match:
|
||||
env_var_name = match.group(1)
|
||||
env_var_value = match.group(2)
|
||||
vars.append((env_var_name, env_var_value))
|
||||
|
||||
return vars
|
||||
|
||||
def _get_machine_names(self):
|
||||
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
|
||||
# with them.
|
||||
ls_command = ['ls', '-q']
|
||||
if self.get_option('running_required'):
|
||||
ls_command.extend(['--filter', 'state=Running'])
|
||||
|
||||
try:
|
||||
ls_lines = self._run_command(ls_command)
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
return ls_lines.splitlines()
|
||||
|
||||
def _inspect_docker_machine_host(self, node):
|
||||
try:
|
||||
inspect_lines = self._run_command(['inspect', self.node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return json.loads(inspect_lines)
|
||||
|
||||
def _ip_addr_docker_machine_host(self, node):
|
||||
try:
|
||||
ip_addr = self._run_command(['ip', self.node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return ip_addr
|
||||
|
||||
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
|
||||
if not env_var_tuples:
|
||||
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
|
||||
if daemon_env in ('require', 'require-silently'):
|
||||
if daemon_env == 'require':
|
||||
display.warning('{0}: host will be skipped'.format(warning_prefix))
|
||||
return True
|
||||
else: # 'optional', 'optional-silently'
|
||||
if daemon_env == 'optional':
|
||||
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
|
||||
return False
|
||||
|
||||
def _populate(self):
|
||||
daemon_env = self.get_option('daemon_env')
|
||||
try:
|
||||
for self.node in self._get_machine_names():
|
||||
self.node_attrs = self._inspect_docker_machine_host(self.node)
|
||||
if not self.node_attrs:
|
||||
continue
|
||||
|
||||
machine_name = self.node_attrs['Driver']['MachineName']
|
||||
|
||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||
# that could be used to set environment variables to influence a local Docker client:
|
||||
if daemon_env == 'skip':
|
||||
env_var_tuples = []
|
||||
else:
|
||||
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
||||
continue
|
||||
|
||||
# add an entry in the inventory for this host
|
||||
self.inventory.add_host(machine_name)
|
||||
|
||||
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
||||
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
||||
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
||||
if self.node_attrs['Driver']['IPAddress']:
|
||||
ip_addr = self.node_attrs['Driver']['IPAddress']
|
||||
else:
|
||||
ip_addr = self._ip_addr_docker_machine_host(self.node)
|
||||
|
||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||
self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
|
||||
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
|
||||
|
||||
# set variables based on Docker Machine tags
|
||||
tags = self.node_attrs['Driver'].get('Tags') or ''
|
||||
self.inventory.set_variable(machine_name, 'dm_tags', tags)
|
||||
|
||||
# set variables based on Docker Machine env variables
|
||||
for kv in env_var_tuples:
|
||||
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
|
||||
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
|
||||
to_native(e), orig_exc=e)
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibility of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
@@ -0,0 +1,262 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_swarm
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file docker_swarm.[yml|yaml].
|
||||
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
|
||||
I(managers) - all manager nodes; I(leader) - the swarm leader node;
|
||||
I(nonleaders) - all nodes except the swarm leader."
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(community.docker.docker_swarm)
|
||||
for this plugin to recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ docker_swarm, community.docker.docker_swarm ]
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- "Use C(unix://var/run/docker.sock) to connect via local socket."
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ docker_url ]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS),
|
||||
C(EngineVersion))
|
||||
type: bool
|
||||
default: yes
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
|
||||
host server.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ca_cert:
|
||||
description: Use a CA certificate when performing server verification by providing the path to a CA
|
||||
certificate file.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of
|
||||
the server.
|
||||
type: str
|
||||
ssl_version:
|
||||
description: Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by docker-py.
|
||||
type: str
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
|
||||
will be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [ time_out ]
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: no
|
||||
version_added: 1.5.0
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
|
||||
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
|
||||
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
|
||||
The port always defaults to C(2376).
|
||||
type: bool
|
||||
default: no
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in I(ansible_host_uri)
|
||||
type: int
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example using local docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: unix://var/run/docker.sock
|
||||
|
||||
# Minimal example using remote docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: yes
|
||||
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: yes
|
||||
ca_cert: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: False
|
||||
keyed_groups:
|
||||
# add for example x86_64 hosts to an arch_x86_64 group
|
||||
- prefix: arch
|
||||
key: 'Description.Platform.Architecture'
|
||||
# add for example linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: 'Description.Platform.OS'
|
||||
# create a group per node label
|
||||
# for exomple a node labeled w/ "production" ends up in group "label_production"
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'Spec.Labels'
|
||||
prefix: label
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import update_tls_hostname, get_connect_params
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
|
||||
try:
|
||||
import docker
|
||||
HAS_DOCKER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker swarm as source. '''
|
||||
|
||||
NAME = 'community.docker.docker_swarm'
|
||||
|
||||
def _fail(self, msg):
|
||||
raise AnsibleError(msg)
|
||||
|
||||
def _populate(self):
|
||||
raw_params = dict(
|
||||
docker_host=self.get_option('docker_host'),
|
||||
tls=self.get_option('tls'),
|
||||
tls_verify=self.get_option('validate_certs'),
|
||||
key_path=self.get_option('client_key'),
|
||||
cacert_path=self.get_option('ca_cert'),
|
||||
cert_path=self.get_option('client_cert'),
|
||||
tls_hostname=self.get_option('tls_hostname'),
|
||||
api_version=self.get_option('api_version'),
|
||||
timeout=self.get_option('timeout'),
|
||||
ssl_version=self.get_option('ssl_version'),
|
||||
use_ssh_client=self.get_option('use_ssh_client'),
|
||||
debug=None,
|
||||
)
|
||||
update_tls_hostname(raw_params)
|
||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||
self.client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group('all')
|
||||
self.inventory.add_group('manager')
|
||||
self.inventory.add_group('worker')
|
||||
self.inventory.add_group('leader')
|
||||
self.inventory.add_group('nonleaders')
|
||||
|
||||
if self.get_option('include_host_uri'):
|
||||
if self.get_option('include_host_uri_port'):
|
||||
host_uri_port = str(self.get_option('include_host_uri_port'))
|
||||
elif self.get_option('tls') or self.get_option('validate_certs'):
|
||||
host_uri_port = '2376'
|
||||
else:
|
||||
host_uri_port = '2375'
|
||||
|
||||
try:
|
||||
self.nodes = self.client.nodes.list()
|
||||
for self.node in self.nodes:
|
||||
self.node_attrs = self.client.nodes.get(self.node.id).attrs
|
||||
self.inventory.add_host(self.node_attrs['ID'])
|
||||
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
|
||||
self.node_attrs['Status']['Addr'])
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
|
||||
if 'ManagerStatus' in self.node_attrs:
|
||||
if self.node_attrs['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||
self.node_attrs['Status']['Addr']
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='leader')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
||||
to_native(e))
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_DOCKER:
|
||||
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
|
||||
'https://github.com/docker/docker-py.')
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
@@ -0,0 +1,343 @@
|
||||
# Vendored copy of distutils/version.py from CPython 3.9.5
|
||||
#
|
||||
# Implements multiple version numbering conventions for the
|
||||
# Python Module Distribution Utilities.
|
||||
#
|
||||
# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0)
|
||||
#
|
||||
|
||||
"""Provides classes to represent module version numbers (one class for
|
||||
each style of version numbering). There are currently two such classes
|
||||
implemented: StrictVersion and LooseVersion.
|
||||
|
||||
Every version number class implements the following interface:
|
||||
* the 'parse' method takes a string and parses it to some internal
|
||||
representation; if the string is an invalid version number,
|
||||
'parse' raises a ValueError exception
|
||||
* the class constructor takes an optional string argument which,
|
||||
if supplied, is passed to 'parse'
|
||||
* __str__ reconstructs the string that was passed to 'parse' (or
|
||||
an equivalent string -- ie. one that will generate an equivalent
|
||||
version number instance)
|
||||
* __repr__ generates Python code to recreate the version number instance
|
||||
* _cmp compares the current instance with either another instance
|
||||
of the same class or a string (which will be parsed to an instance
|
||||
of the same class, thus must follow the same rules)
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
|
||||
try:
|
||||
RE_FLAGS = re.VERBOSE | re.ASCII
|
||||
except AttributeError:
|
||||
RE_FLAGS = re.VERBOSE
|
||||
|
||||
|
||||
class Version:
|
||||
"""Abstract base class for version numbering classes. Just provides
|
||||
constructor (__init__) and reproducer (__repr__), because those
|
||||
seem to be the same for all version numbering classes; and route
|
||||
rich comparisons to _cmp.
|
||||
"""
|
||||
|
||||
def __init__(self, vstring=None):
|
||||
if vstring:
|
||||
self.parse(vstring)
|
||||
|
||||
def __repr__(self):
|
||||
return "%s ('%s')" % (self.__class__.__name__, str(self))
|
||||
|
||||
def __eq__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c == 0
|
||||
|
||||
def __lt__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c < 0
|
||||
|
||||
def __le__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c <= 0
|
||||
|
||||
def __gt__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c > 0
|
||||
|
||||
def __ge__(self, other):
|
||||
c = self._cmp(other)
|
||||
if c is NotImplemented:
|
||||
return c
|
||||
return c >= 0
|
||||
|
||||
|
||||
# Interface for version-number classes -- must be implemented
|
||||
# by the following classes (the concrete ones -- Version should
|
||||
# be treated as an abstract class).
|
||||
# __init__ (string) - create and take same action as 'parse'
|
||||
# (string parameter is optional)
|
||||
# parse (string) - convert a string representation to whatever
|
||||
# internal representation is appropriate for
|
||||
# this style of version numbering
|
||||
# __str__ (self) - convert back to a string; should be very similar
|
||||
# (if not identical to) the string supplied to parse
|
||||
# __repr__ (self) - generate Python code to recreate
|
||||
# the instance
|
||||
# _cmp (self, other) - compare two version numbers ('other' may
|
||||
# be an unparsed version string, or another
|
||||
# instance of your version class)
|
||||
|
||||
|
||||
class StrictVersion(Version):
|
||||
"""Version numbering for anal retentives and software idealists.
|
||||
Implements the standard interface for version number classes as
|
||||
described above. A version number consists of two or three
|
||||
dot-separated numeric components, with an optional "pre-release" tag
|
||||
on the end. The pre-release tag consists of the letter 'a' or 'b'
|
||||
followed by a number. If the numeric components of two version
|
||||
numbers are equal, then one with a pre-release tag will always
|
||||
be deemed earlier (lesser) than one without.
|
||||
|
||||
The following are valid version numbers (shown in the order that
|
||||
would be obtained by sorting according to the supplied cmp function):
|
||||
|
||||
0.4 0.4.0 (these two are equivalent)
|
||||
0.4.1
|
||||
0.5a1
|
||||
0.5b3
|
||||
0.5
|
||||
0.9.6
|
||||
1.0
|
||||
1.0.4a3
|
||||
1.0.4b1
|
||||
1.0.4
|
||||
|
||||
The following are examples of invalid version numbers:
|
||||
|
||||
1
|
||||
2.7.2.2
|
||||
1.3.a4
|
||||
1.3pl1
|
||||
1.3c4
|
||||
|
||||
The rationale for this version numbering system will be explained
|
||||
in the distutils documentation.
|
||||
"""
|
||||
|
||||
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
|
||||
RE_FLAGS)
|
||||
|
||||
def parse(self, vstring):
|
||||
match = self.version_re.match(vstring)
|
||||
if not match:
|
||||
raise ValueError("invalid version number '%s'" % vstring)
|
||||
|
||||
(major, minor, patch, prerelease, prerelease_num) = \
|
||||
match.group(1, 2, 4, 5, 6)
|
||||
|
||||
if patch:
|
||||
self.version = tuple(map(int, [major, minor, patch]))
|
||||
else:
|
||||
self.version = tuple(map(int, [major, minor])) + (0,)
|
||||
|
||||
if prerelease:
|
||||
self.prerelease = (prerelease[0], int(prerelease_num))
|
||||
else:
|
||||
self.prerelease = None
|
||||
|
||||
def __str__(self):
|
||||
if self.version[2] == 0:
|
||||
vstring = '.'.join(map(str, self.version[0:2]))
|
||||
else:
|
||||
vstring = '.'.join(map(str, self.version))
|
||||
|
||||
if self.prerelease:
|
||||
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
|
||||
|
||||
return vstring
|
||||
|
||||
def _cmp(self, other):
|
||||
if isinstance(other, str):
|
||||
other = StrictVersion(other)
|
||||
elif not isinstance(other, StrictVersion):
|
||||
return NotImplemented
|
||||
|
||||
if self.version != other.version:
|
||||
# numeric versions don't match
|
||||
# prerelease stuff doesn't matter
|
||||
if self.version < other.version:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
|
||||
# have to compare prerelease
|
||||
# case 1: neither has prerelease; they're equal
|
||||
# case 2: self has prerelease, other doesn't; other is greater
|
||||
# case 3: self doesn't have prerelease, other does: self is greater
|
||||
# case 4: both have prerelease: must compare them!
|
||||
|
||||
if (not self.prerelease and not other.prerelease):
|
||||
return 0
|
||||
elif (self.prerelease and not other.prerelease):
|
||||
return -1
|
||||
elif (not self.prerelease and other.prerelease):
|
||||
return 1
|
||||
elif (self.prerelease and other.prerelease):
|
||||
if self.prerelease == other.prerelease:
|
||||
return 0
|
||||
elif self.prerelease < other.prerelease:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
else:
|
||||
raise AssertionError("never get here")
|
||||
|
||||
# end class StrictVersion
|
||||
|
||||
# The rules according to Greg Stein:
|
||||
# 1) a version number has 1 or more numbers separated by a period or by
|
||||
# sequences of letters. If only periods, then these are compared
|
||||
# left-to-right to determine an ordering.
|
||||
# 2) sequences of letters are part of the tuple for comparison and are
|
||||
# compared lexicographically
|
||||
# 3) recognize the numeric components may have leading zeroes
|
||||
#
|
||||
# The LooseVersion class below implements these rules: a version number
|
||||
# string is split up into a tuple of integer and string components, and
|
||||
# comparison is a simple tuple comparison. This means that version
|
||||
# numbers behave in a predictable and obvious way, but a way that might
|
||||
# not necessarily be how people *want* version numbers to behave. There
|
||||
# wouldn't be a problem if people could stick to purely numeric version
|
||||
# numbers: just split on period and compare the numbers as tuples.
|
||||
# However, people insist on putting letters into their version numbers;
|
||||
# the most common purpose seems to be:
|
||||
# - indicating a "pre-release" version
|
||||
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
|
||||
# - indicating a post-release patch ('p', 'pl', 'patch')
|
||||
# but of course this can't cover all version number schemes, and there's
|
||||
# no way to know what a programmer means without asking him.
|
||||
#
|
||||
# The problem is what to do with letters (and other non-numeric
|
||||
# characters) in a version number. The current implementation does the
|
||||
# obvious and predictable thing: keep them as strings and compare
|
||||
# lexically within a tuple comparison. This has the desired effect if
|
||||
# an appended letter sequence implies something "post-release":
|
||||
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
|
||||
#
|
||||
# However, if letters in a version number imply a pre-release version,
|
||||
# the "obvious" thing isn't correct. Eg. you would expect that
|
||||
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
|
||||
# implemented here, this just isn't so.
|
||||
#
|
||||
# Two possible solutions come to mind. The first is to tie the
|
||||
# comparison algorithm to a particular set of semantic rules, as has
|
||||
# been done in the StrictVersion class above. This works great as long
|
||||
# as everyone can go along with bondage and discipline. Hopefully a
|
||||
# (large) subset of Python module programmers will agree that the
|
||||
# particular flavour of bondage and discipline provided by StrictVersion
|
||||
# provides enough benefit to be worth using, and will submit their
|
||||
# version numbering scheme to its domination. The free-thinking
|
||||
# anarchists in the lot will never give in, though, and something needs
|
||||
# to be done to accommodate them.
|
||||
#
|
||||
# Perhaps a "moderately strict" version class could be implemented that
|
||||
# lets almost anything slide (syntactically), and makes some heuristic
|
||||
# assumptions about non-digits in version number strings. This could
|
||||
# sink into special-case-hell, though; if I was as talented and
|
||||
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
|
||||
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
|
||||
# just as happy dealing with things like "2g6" and "1.13++". I don't
|
||||
# think I'm smart enough to do it right though.
|
||||
#
|
||||
# In any case, I've coded the test suite for this module (see
|
||||
# ../test/test_version.py) specifically to fail on things like comparing
|
||||
# "1.2a2" and "1.2". That's not because the *code* is doing anything
|
||||
# wrong, it's because the simple, obvious design doesn't match my
|
||||
# complicated, hairy expectations for real-world version numbers. It
|
||||
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
|
||||
# the Right Thing" (ie. the code matches the conception). But I'd rather
|
||||
# have a conception that matches common notions about version numbers.
|
||||
|
||||
|
||||
class LooseVersion(Version):
|
||||
"""Version numbering for anarchists and software realists.
|
||||
Implements the standard interface for version number classes as
|
||||
described above. A version number consists of a series of numbers,
|
||||
separated by either periods or strings of letters. When comparing
|
||||
version numbers, the numeric components will be compared
|
||||
numerically, and the alphabetic components lexically. The following
|
||||
are all valid version numbers, in no particular order:
|
||||
|
||||
1.5.1
|
||||
1.5.2b2
|
||||
161
|
||||
3.10a
|
||||
8.02
|
||||
3.4j
|
||||
1996.07.12
|
||||
3.2.pl0
|
||||
3.1.1.6
|
||||
2g6
|
||||
11g
|
||||
0.960923
|
||||
2.2beta29
|
||||
1.13++
|
||||
5.5.kw
|
||||
2.0b1pl0
|
||||
|
||||
In fact, there is no such thing as an invalid version number under
|
||||
this scheme; the rules for comparison are simple and predictable,
|
||||
but may not always give the results you want (for some definition
|
||||
of "want").
|
||||
"""
|
||||
|
||||
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
|
||||
|
||||
def __init__(self, vstring=None):
|
||||
if vstring:
|
||||
self.parse(vstring)
|
||||
|
||||
def parse(self, vstring):
|
||||
# I've given up on thinking I can reconstruct the version string
|
||||
# from the parsed tuple -- so I just store the string here for
|
||||
# use by __str__
|
||||
self.vstring = vstring
|
||||
components = [x for x in self.component_re.split(vstring) if x and x != '.']
|
||||
for i, obj in enumerate(components):
|
||||
try:
|
||||
components[i] = int(obj)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
self.version = components
|
||||
|
||||
def __str__(self):
|
||||
return self.vstring
|
||||
|
||||
def __repr__(self):
|
||||
return "LooseVersion ('%s')" % str(self)
|
||||
|
||||
def _cmp(self, other):
|
||||
if isinstance(other, str):
|
||||
other = LooseVersion(other)
|
||||
elif not isinstance(other, LooseVersion):
|
||||
return NotImplemented
|
||||
|
||||
if self.version == other.version:
|
||||
return 0
|
||||
if self.version < other.version:
|
||||
return -1
|
||||
if self.version > other.version:
|
||||
return 1
|
||||
|
||||
# end class LooseVersion
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,232 @@
|
||||
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import socket as pysocket
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible.module_utils.six import PY3
|
||||
|
||||
try:
|
||||
from docker.utils import socket as docker_socket
|
||||
import struct
|
||||
except Exception:
|
||||
# missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.socket_helper import (
|
||||
make_unblocking,
|
||||
shutdown_writing,
|
||||
write_to_socket,
|
||||
)
|
||||
|
||||
|
||||
PARAMIKO_POLL_TIMEOUT = 0.01 # 10 milliseconds
|
||||
|
||||
|
||||
class DockerSocketHandlerBase(object):
|
||||
def __init__(self, sock, selectors, log=None):
|
||||
make_unblocking(sock)
|
||||
|
||||
self._selectors = selectors
|
||||
if log is not None:
|
||||
self._log = log
|
||||
else:
|
||||
self._log = lambda msg: True
|
||||
self._paramiko_read_workaround = hasattr(sock, 'send_ready') and 'paramiko' in str(type(sock))
|
||||
|
||||
self._sock = sock
|
||||
self._block_done_callback = None
|
||||
self._block_buffer = []
|
||||
self._eof = False
|
||||
self._read_buffer = b''
|
||||
self._write_buffer = b''
|
||||
self._end_of_writing = False
|
||||
|
||||
self._current_stream = None
|
||||
self._current_missing = 0
|
||||
self._current_buffer = b''
|
||||
|
||||
self._selector = self._selectors.DefaultSelector()
|
||||
self._selector.register(self._sock, self._selectors.EVENT_READ)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, tb):
|
||||
self._selector.close()
|
||||
|
||||
def set_block_done_callback(self, block_done_callback):
|
||||
self._block_done_callback = block_done_callback
|
||||
if self._block_done_callback is not None:
|
||||
while self._block_buffer:
|
||||
elt = self._block_buffer.remove(0)
|
||||
self._block_done_callback(*elt)
|
||||
|
||||
def _add_block(self, stream_id, data):
|
||||
if self._block_done_callback is not None:
|
||||
self._block_done_callback(stream_id, data)
|
||||
else:
|
||||
self._block_buffer.append((stream_id, data))
|
||||
|
||||
def _read(self):
|
||||
if self._eof:
|
||||
return
|
||||
if hasattr(self._sock, 'recv'):
|
||||
try:
|
||||
data = self._sock.recv(262144)
|
||||
except Exception as e:
|
||||
# After calling self._sock.shutdown(), OpenSSL's/urllib3's
|
||||
# WrappedSocket seems to eventually raise ZeroReturnError in
|
||||
# case of EOF
|
||||
if 'OpenSSL.SSL.ZeroReturnError' in str(type(e)):
|
||||
self._eof = True
|
||||
return
|
||||
else:
|
||||
raise
|
||||
elif PY3 and isinstance(self._sock, getattr(pysocket, 'SocketIO')):
|
||||
data = self._sock.read()
|
||||
else:
|
||||
data = os.read(self._sock.fileno())
|
||||
if data is None:
|
||||
# no data available
|
||||
return
|
||||
self._log('read {0} bytes'.format(len(data)))
|
||||
if len(data) == 0:
|
||||
# Stream EOF
|
||||
self._eof = True
|
||||
return
|
||||
self._read_buffer += data
|
||||
while len(self._read_buffer) > 0:
|
||||
if self._current_missing > 0:
|
||||
n = min(len(self._read_buffer), self._current_missing)
|
||||
self._current_buffer += self._read_buffer[:n]
|
||||
self._read_buffer = self._read_buffer[n:]
|
||||
self._current_missing -= n
|
||||
if self._current_missing == 0:
|
||||
self._add_block(self._current_stream, self._current_buffer)
|
||||
self._current_buffer = b''
|
||||
if len(self._read_buffer) < 8:
|
||||
break
|
||||
self._current_stream, self._current_missing = struct.unpack('>BxxxL', self._read_buffer[:8])
|
||||
self._read_buffer = self._read_buffer[8:]
|
||||
if self._current_missing < 0:
|
||||
# Stream EOF (as reported by docker daemon)
|
||||
self._eof = True
|
||||
break
|
||||
|
||||
def _handle_end_of_writing(self):
|
||||
if self._end_of_writing and len(self._write_buffer) == 0:
|
||||
self._end_of_writing = False
|
||||
self._log('Shutting socket down for writing')
|
||||
shutdown_writing(self._sock, self._log)
|
||||
|
||||
def _write(self):
|
||||
if len(self._write_buffer) > 0:
|
||||
written = write_to_socket(self._sock, self._write_buffer)
|
||||
self._write_buffer = self._write_buffer[written:]
|
||||
self._log('wrote {0} bytes, {1} are left'.format(written, len(self._write_buffer)))
|
||||
if len(self._write_buffer) > 0:
|
||||
self._selector.modify(self._sock, self._selectors.EVENT_READ | self._selectors.EVENT_WRITE)
|
||||
else:
|
||||
self._selector.modify(self._sock, self._selectors.EVENT_READ)
|
||||
self._handle_end_of_writing()
|
||||
|
||||
def select(self, timeout=None, _internal_recursion=False):
|
||||
if not _internal_recursion and self._paramiko_read_workaround and len(self._write_buffer) > 0:
|
||||
# When the SSH transport is used, docker-py internally uses Paramiko, whose
|
||||
# Channel object supports select(), but only for reading
|
||||
# (https://github.com/paramiko/paramiko/issues/695).
|
||||
if self._sock.send_ready():
|
||||
self._write()
|
||||
return True
|
||||
while timeout is None or timeout > PARAMIKO_POLL_TIMEOUT:
|
||||
result = self.select(PARAMIKO_POLL_TIMEOUT, _internal_recursion=True)
|
||||
if self._sock.send_ready():
|
||||
self._read()
|
||||
result += 1
|
||||
if result > 0:
|
||||
return True
|
||||
if timeout is not None:
|
||||
timeout -= PARAMIKO_POLL_TIMEOUT
|
||||
self._log('select... ({0})'.format(timeout))
|
||||
events = self._selector.select(timeout)
|
||||
for key, event in events:
|
||||
if key.fileobj == self._sock:
|
||||
self._log(
|
||||
'select event read:{0} write:{1}'.format(
|
||||
event & self._selectors.EVENT_READ != 0,
|
||||
event & self._selectors.EVENT_WRITE != 0))
|
||||
if event & self._selectors.EVENT_READ != 0:
|
||||
self._read()
|
||||
if event & self._selectors.EVENT_WRITE != 0:
|
||||
self._write()
|
||||
result = len(events)
|
||||
if self._paramiko_read_workaround and len(self._write_buffer) > 0:
|
||||
if self._sock.send_ready():
|
||||
self._write()
|
||||
result += 1
|
||||
return result > 0
|
||||
|
||||
def is_eof(self):
|
||||
return self._eof
|
||||
|
||||
def end_of_writing(self):
|
||||
self._end_of_writing = True
|
||||
self._handle_end_of_writing()
|
||||
|
||||
def consume(self):
|
||||
stdout = []
|
||||
stderr = []
|
||||
|
||||
def append_block(stream_id, data):
|
||||
if stream_id == docker_socket.STDOUT:
|
||||
stdout.append(data)
|
||||
elif stream_id == docker_socket.STDERR:
|
||||
stderr.append(data)
|
||||
else:
|
||||
raise ValueError('{0} is not a valid stream ID'.format(stream_id))
|
||||
|
||||
self.end_of_writing()
|
||||
|
||||
self.set_block_done_callback(append_block)
|
||||
while not self._eof:
|
||||
self.select()
|
||||
return b''.join(stdout), b''.join(stderr)
|
||||
|
||||
def write(self, str):
|
||||
self._write_buffer += str
|
||||
if len(self._write_buffer) == len(str):
|
||||
self._write()
|
||||
|
||||
|
||||
class DockerSocketHandlerModule(DockerSocketHandlerBase):
|
||||
def __init__(self, sock, module, selectors):
|
||||
super(DockerSocketHandlerModule, self).__init__(sock, selectors, module.debug)
|
||||
|
||||
|
||||
def find_selectors(module):
|
||||
try:
|
||||
# ansible-base 2.10+ has selectors a compat version of selectors, which a bundled fallback:
|
||||
from ansible.module_utils.compat import selectors
|
||||
return selectors
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
# Python 3.4+
|
||||
import selectors
|
||||
return selectors
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
# backport package installed in the system
|
||||
import selectors2
|
||||
return selectors2
|
||||
except ImportError:
|
||||
pass
|
||||
module.fail_json(msg=missing_required_lib('selectors2', reason='for handling stdin'))
|
||||
@@ -0,0 +1,53 @@
|
||||
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import socket as pysocket
|
||||
|
||||
from ansible.module_utils.six import PY3
|
||||
|
||||
|
||||
def make_unblocking(sock):
|
||||
if hasattr(sock, '_sock'):
|
||||
sock._sock.setblocking(0)
|
||||
elif hasattr(sock, 'setblocking'):
|
||||
sock.setblocking(0)
|
||||
else:
|
||||
fcntl.fcntl(sock.fileno(), fcntl.F_SETFL, fcntl.fcntl(sock.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
|
||||
|
||||
def _empty_writer(msg):
|
||||
pass
|
||||
|
||||
|
||||
def shutdown_writing(sock, log=_empty_writer):
|
||||
if hasattr(sock, 'shutdown_write'):
|
||||
sock.shutdown_write()
|
||||
elif hasattr(sock, 'shutdown'):
|
||||
try:
|
||||
sock.shutdown(pysocket.SHUT_WR)
|
||||
except TypeError as e:
|
||||
# probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
|
||||
log('Shutting down for writing not possible; trying shutdown instead: {0}'.format(e))
|
||||
sock.shutdown()
|
||||
elif PY3 and isinstance(sock, getattr(pysocket, 'SocketIO')):
|
||||
sock._sock.shutdown(pysocket.SHUT_WR)
|
||||
else:
|
||||
log('No idea how to signal end of writing')
|
||||
|
||||
|
||||
def write_to_socket(sock, data):
|
||||
if hasattr(sock, '_send_until_done'):
|
||||
# WrappedSocket (urllib3/contrib/pyopenssl) doesn't have `send`, but
|
||||
# only `sendall`, which uses `_send_until_done` under the hood.
|
||||
return sock._send_until_done(data)
|
||||
elif hasattr(sock, 'send'):
|
||||
return sock.send(data)
|
||||
else:
|
||||
return os.write(sock.fileno(), data)
|
||||
@@ -0,0 +1,280 @@
|
||||
# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
|
||||
# (c) Thierry Bouvet (@tbouvet)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import json
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, NotFound
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import AnsibleDockerClient
|
||||
|
||||
|
||||
class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
|
||||
|
||||
def get_swarm_node_id(self):
|
||||
"""
|
||||
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
|
||||
of Docker host the module is executed on
|
||||
:return:
|
||||
NodeID of host or 'None' if not part of Swarm
|
||||
"""
|
||||
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError as exc:
|
||||
self.fail("Failed to get node information for %s" % to_native(exc))
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info['Swarm']['NodeID']:
|
||||
return swarm_info['Swarm']['NodeID']
|
||||
return None
|
||||
|
||||
def check_if_swarm_node(self, node_id=None):
|
||||
"""
|
||||
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
|
||||
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
|
||||
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
|
||||
it is not executed on Swarm manager
|
||||
|
||||
:param node_id: Node identifier
|
||||
:return:
|
||||
bool: True if node is part of Swarm, False otherwise
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError:
|
||||
self.fail("Failed to get host information.")
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info['Swarm']['NodeID']:
|
||||
return True
|
||||
if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
if node_info['ID'] is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_if_swarm_manager(self):
|
||||
"""
|
||||
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
|
||||
is performed. The inspect_swarm() will fail if node is not a manager
|
||||
|
||||
:return: True if node is Swarm Manager, False otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
self.inspect_swarm()
|
||||
return True
|
||||
except APIError:
|
||||
return False
|
||||
|
||||
def fail_task_if_not_swarm_manager(self):
|
||||
"""
|
||||
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
|
||||
"""
|
||||
if not self.check_if_swarm_manager():
|
||||
self.fail("Error running docker swarm module: must run on swarm manager node")
|
||||
|
||||
def check_if_swarm_worker(self):
|
||||
"""
|
||||
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
|
||||
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
|
||||
|
||||
:return: True if node is Swarm Worker, False otherwise
|
||||
"""
|
||||
|
||||
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
|
||||
"""
|
||||
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
|
||||
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
|
||||
host that is not part of Swarm it will fail the playbook
|
||||
|
||||
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
True if node is part of swarm but its state is down, False otherwise
|
||||
"""
|
||||
|
||||
if repeat_check < 1:
|
||||
repeat_check = 1
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
for retry in range(0, repeat_check):
|
||||
if retry > 0:
|
||||
sleep(5)
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
if node_info['Status']['State'] == 'down':
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_node_inspect(self, node_id=None, skip_missing=False):
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about single node
|
||||
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
Single node information structure
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
if node_id is None:
|
||||
self.fail("Failed to get node information.")
|
||||
|
||||
try:
|
||||
node_info = self.inspect_node(node_id=node_id)
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
if exc.status_code == 404:
|
||||
if skip_missing:
|
||||
return None
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
|
||||
if 'ManagerStatus' in node_info:
|
||||
if node_info['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
count_colons = node_info['ManagerStatus']['Addr'].count(":")
|
||||
if count_colons == 1:
|
||||
swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
|
||||
else:
|
||||
swarm_leader_ip = node_info['Status']['Addr']
|
||||
node_info['Status']['Addr'] = swarm_leader_ip
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_inspect(self):
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
|
||||
|
||||
:return:
|
||||
Structure with information about all nodes
|
||||
"""
|
||||
try:
|
||||
node_info = self.nodes()
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_list(self, output='short'):
|
||||
"""
|
||||
Returns list of nodes registered in Swarm
|
||||
|
||||
:param output: Defines format of returned data
|
||||
:return:
|
||||
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
|
||||
if 'output' is 'long' then returns data is list of dict containing the attributes as in
|
||||
output of command 'docker node ls'
|
||||
"""
|
||||
nodes_list = []
|
||||
|
||||
nodes_inspect = self.get_all_nodes_inspect()
|
||||
if nodes_inspect is None:
|
||||
return None
|
||||
|
||||
if output == 'short':
|
||||
for node in nodes_inspect:
|
||||
nodes_list.append(node['Description']['Hostname'])
|
||||
elif output == 'long':
|
||||
for node in nodes_inspect:
|
||||
node_property = {}
|
||||
|
||||
node_property.update({'ID': node['ID']})
|
||||
node_property.update({'Hostname': node['Description']['Hostname']})
|
||||
node_property.update({'Status': node['Status']['State']})
|
||||
node_property.update({'Availability': node['Spec']['Availability']})
|
||||
if 'ManagerStatus' in node:
|
||||
if node['ManagerStatus']['Leader'] is True:
|
||||
node_property.update({'Leader': True})
|
||||
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
|
||||
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
|
||||
|
||||
nodes_list.append(node_property)
|
||||
else:
|
||||
return None
|
||||
|
||||
return nodes_list
|
||||
|
||||
def get_node_name_by_id(self, nodeid):
|
||||
return self.get_node_inspect(nodeid)['Description']['Hostname']
|
||||
|
||||
def get_unlock_key(self):
|
||||
if self.docker_py_version < LooseVersion('2.7.0'):
|
||||
return None
|
||||
return super(AnsibleDockerSwarmClient, self).get_unlock_key()
|
||||
|
||||
def get_service_inspect(self, service_id, skip_missing=False):
|
||||
"""
|
||||
Returns Swarm service info as in 'docker service inspect' command about single service
|
||||
|
||||
:param service_id: service ID or name
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:return:
|
||||
Single service information structure
|
||||
"""
|
||||
try:
|
||||
service_info = self.inspect_service(service_id)
|
||||
except NotFound as exc:
|
||||
if skip_missing is False:
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
else:
|
||||
return None
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
|
||||
self.fail("Error inspecting swarm service: %s" % exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm service: %s" % exc)
|
||||
|
||||
json_str = json.dumps(service_info, ensure_ascii=False)
|
||||
service_info = json.loads(json_str)
|
||||
return service_info
|
||||
@@ -0,0 +1,17 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
"""Provide version object to compare version numbers."""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can
|
||||
# remove the _version.py file, and replace the following import by
|
||||
#
|
||||
# from ansible.module_utils.compat.version import LooseVersion
|
||||
|
||||
from ._version import LooseVersion
|
||||
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2020 Matt Clay <mclay@redhat.com>
|
||||
# (c) 2020 Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: current_container_facts
|
||||
short_description: Return facts about whether the module runs in a Docker container
|
||||
version_added: 1.1.0
|
||||
description:
|
||||
- Return facts about whether the module runs in a Docker container.
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get facts on current container
|
||||
community.docker.current_container_facts:
|
||||
|
||||
- name: Print information on current container when running in a container
|
||||
ansible.builtin.debug:
|
||||
msg: "Container ID is {{ ansible_module_container_id }}"
|
||||
when: ansible_module_running_in_container
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ansible_facts:
|
||||
description: Ansible facts returned by the module
|
||||
type: dict
|
||||
returned: always
|
||||
contains:
|
||||
ansible_module_running_in_container:
|
||||
description:
|
||||
- Whether the module was able to detect that it runs in a container or not.
|
||||
returned: always
|
||||
type: bool
|
||||
ansible_module_container_id:
|
||||
description:
|
||||
- The detected container ID.
|
||||
- Contains an empty string if no container was detected.
|
||||
returned: always
|
||||
type: str
|
||||
ansible_module_container_type:
|
||||
description:
|
||||
- The detected container environment.
|
||||
- Contains an empty string if no container was detected.
|
||||
- Otherwise, will be one of C(docker), C(azure_pipelines), or C(github_actions).
|
||||
- C(github_actions) is supported since community.docker 2.4.0.
|
||||
returned: always
|
||||
type: str
|
||||
choices:
|
||||
- ''
|
||||
- docker
|
||||
- azure_pipelines
|
||||
- github_actions
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(dict(), supports_check_mode=True)
|
||||
|
||||
path = '/proc/self/cpuset'
|
||||
container_id = ''
|
||||
container_type = ''
|
||||
|
||||
if os.path.exists(path):
|
||||
# File content varies based on the environment:
|
||||
# No Container: /
|
||||
# Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
|
||||
# Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
|
||||
# Podman: /../../../../../..
|
||||
with open(path, 'rb') as f:
|
||||
contents = f.read().decode('utf-8')
|
||||
|
||||
cgroup_path, cgroup_name = os.path.split(contents.strip())
|
||||
|
||||
if cgroup_path == '/docker':
|
||||
container_id = cgroup_name
|
||||
container_type = 'docker'
|
||||
|
||||
if cgroup_path == '/azpl_job':
|
||||
container_id = cgroup_name
|
||||
container_type = 'azure_pipelines'
|
||||
|
||||
if cgroup_path == '/actions_job':
|
||||
container_id = cgroup_name
|
||||
container_type = 'github_actions'
|
||||
|
||||
module.exit_json(ansible_facts=dict(
|
||||
ansible_module_running_in_container=container_id != '',
|
||||
ansible_module_container_id=container_id,
|
||||
ansible_module_container_type=container_type,
|
||||
))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,424 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_config
|
||||
|
||||
short_description: Manage docker configs.
|
||||
|
||||
|
||||
description:
|
||||
- Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
|
||||
- Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
|
||||
in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
|
||||
unless the I(force) option is set.
|
||||
- Updates to configs are performed by removing the config and creating it again.
|
||||
options:
|
||||
data:
|
||||
description:
|
||||
- The value of the config.
|
||||
- Mutually exclusive with I(data_src). One of I(data) and I(data_src) is required if I(state=present).
|
||||
type: str
|
||||
data_is_b64:
|
||||
description:
|
||||
- If set to C(true), the data is assumed to be Base64 encoded and will be
|
||||
decoded before being used.
|
||||
- To use binary I(data), it is better to keep it Base64 encoded and let it
|
||||
be decoded by this option.
|
||||
type: bool
|
||||
default: no
|
||||
data_src:
|
||||
description:
|
||||
- The file on the target from which to read the config.
|
||||
- Mutually exclusive with I(data). One of I(data) and I(data_src) is required if I(state=present).
|
||||
type: path
|
||||
version_added: 1.10.0
|
||||
labels:
|
||||
description:
|
||||
- "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
|
||||
- If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
|
||||
type: dict
|
||||
force:
|
||||
description:
|
||||
- Use with state C(present) to always remove and recreate an existing config.
|
||||
- If C(true), an existing config will be replaced, even if it has not been changed.
|
||||
type: bool
|
||||
default: no
|
||||
rolling_versions:
|
||||
description:
|
||||
- If set to C(true), configs are created with an increasing version number appended to their name.
|
||||
- Adds a label containing the version number to the managed configs with the name C(ansible_version).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.2.0
|
||||
versions_to_keep:
|
||||
description:
|
||||
- When using I(rolling_versions), the number of old versions of the config to keep.
|
||||
- Extraneous old configs are deleted after the new one is created.
|
||||
- Set to C(-1) to keep everything or to C(0) or C(1) to keep only the current one.
|
||||
type: int
|
||||
default: 5
|
||||
version_added: 2.2.0
|
||||
name:
|
||||
description:
|
||||
- The name of the config.
|
||||
type: str
|
||||
required: yes
|
||||
state:
|
||||
description:
|
||||
- Set to C(present), if the config should exist, and C(absent), if it should not.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
template_driver:
|
||||
description:
|
||||
- Set to C(golang) to use a Go template in I(data) or a Go template file in I(data_src).
|
||||
type: str
|
||||
choices:
|
||||
- golang
|
||||
version_added: 2.5.0
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_2_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
|
||||
- "Docker API >= 1.30"
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
- John Hu (@ushuz)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Create config foo (from a file on the control machine)
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
# If the file is JSON or binary, Ansible might modify it (because
|
||||
# it is first decoded and later re-encoded). Base64-encoding the
|
||||
# file directly after reading it prevents this to happen.
|
||||
data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
|
||||
data_is_b64: true
|
||||
state: present
|
||||
|
||||
- name: Create config foo (from a file on the target machine)
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data_src: /path/to/config/file
|
||||
state: present
|
||||
|
||||
- name: Change the config data
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Add a new label
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Adding a new label will cause a remove/create of the config
|
||||
two: '2'
|
||||
state: present
|
||||
|
||||
- name: No change
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Even though 'two' is missing, there is no change to the existing config
|
||||
state: present
|
||||
|
||||
- name: Update an existing label
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: monkey # Changing a label will cause a remove/create of the config
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Force the (re-)creation of the config
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
force: yes
|
||||
state: present
|
||||
|
||||
- name: Remove config foo
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
config_id:
|
||||
description:
|
||||
- The ID assigned by Docker to the config object.
|
||||
returned: success and I(state) is C(present)
|
||||
type: str
|
||||
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
|
||||
config_name:
|
||||
description:
|
||||
- The name of the created config object.
|
||||
returned: success and I(state) is C(present)
|
||||
type: str
|
||||
sample: 'awesome_config'
|
||||
version_added: 2.2.0
|
||||
'''
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
compare_generic,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils.common.text.converters import to_native, to_bytes
|
||||
|
||||
|
||||
class ConfigManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(ConfigManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
parameters = self.client.module.params
|
||||
self.name = parameters.get('name')
|
||||
self.state = parameters.get('state')
|
||||
self.data = parameters.get('data')
|
||||
if self.data is not None:
|
||||
if parameters.get('data_is_b64'):
|
||||
self.data = base64.b64decode(self.data)
|
||||
else:
|
||||
self.data = to_bytes(self.data)
|
||||
data_src = parameters.get('data_src')
|
||||
if data_src is not None:
|
||||
try:
|
||||
with open(data_src, 'rb') as f:
|
||||
self.data = f.read()
|
||||
except Exception as exc:
|
||||
self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc)))
|
||||
self.labels = parameters.get('labels')
|
||||
self.force = parameters.get('force')
|
||||
self.rolling_versions = parameters.get('rolling_versions')
|
||||
self.versions_to_keep = parameters.get('versions_to_keep')
|
||||
self.template_driver = parameters.get('template_driver')
|
||||
|
||||
if self.rolling_versions:
|
||||
self.version = 0
|
||||
self.data_key = None
|
||||
self.configs = []
|
||||
|
||||
def __call__(self):
|
||||
self.get_config()
|
||||
if self.state == 'present':
|
||||
self.data_key = hashlib.sha224(self.data).hexdigest()
|
||||
self.present()
|
||||
self.remove_old_versions()
|
||||
elif self.state == 'absent':
|
||||
self.absent()
|
||||
|
||||
def get_version(self, config):
|
||||
try:
|
||||
return int(config.get('Spec', {}).get('Labels', {}).get('ansible_version', 0))
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
def remove_old_versions(self):
|
||||
if not self.rolling_versions or self.versions_to_keep < 0:
|
||||
return
|
||||
if not self.check_mode:
|
||||
while len(self.configs) > max(self.versions_to_keep, 1):
|
||||
self.remove_config(self.configs.pop(0))
|
||||
|
||||
def get_config(self):
|
||||
''' Find an existing config. '''
|
||||
try:
|
||||
configs = self.client.configs(filters={'name': self.name})
|
||||
except APIError as exc:
|
||||
self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
|
||||
|
||||
if self.rolling_versions:
|
||||
self.configs = [
|
||||
config
|
||||
for config in configs
|
||||
if config['Spec']['Name'].startswith('{name}_v'.format(name=self.name))
|
||||
]
|
||||
self.configs.sort(key=self.get_version)
|
||||
else:
|
||||
self.configs = [
|
||||
config for config in configs if config['Spec']['Name'] == self.name
|
||||
]
|
||||
|
||||
def create_config(self):
|
||||
''' Create a new config '''
|
||||
config_id = None
|
||||
# We can't see the data after creation, so adding a label we can use for idempotency check
|
||||
labels = {
|
||||
'ansible_key': self.data_key
|
||||
}
|
||||
if self.rolling_versions:
|
||||
self.version += 1
|
||||
labels['ansible_version'] = str(self.version)
|
||||
self.name = '{name}_v{version}'.format(name=self.name, version=self.version)
|
||||
if self.labels:
|
||||
labels.update(self.labels)
|
||||
|
||||
try:
|
||||
if not self.check_mode:
|
||||
# only use templating argument when self.template_driver is defined
|
||||
kwargs = {}
|
||||
if self.template_driver:
|
||||
kwargs['templating'] = {
|
||||
'name': self.template_driver
|
||||
}
|
||||
config_id = self.client.create_config(self.name, self.data, labels=labels, **kwargs)
|
||||
self.configs += self.client.configs(filters={'id': config_id})
|
||||
except APIError as exc:
|
||||
self.client.fail("Error creating config: %s" % to_native(exc))
|
||||
|
||||
if isinstance(config_id, dict):
|
||||
config_id = config_id['ID']
|
||||
|
||||
return config_id
|
||||
|
||||
def remove_config(self, config):
|
||||
try:
|
||||
if not self.check_mode:
|
||||
self.client.remove_config(config['ID'])
|
||||
except APIError as exc:
|
||||
self.client.fail("Error removing config %s: %s" % (config['Spec']['Name'], to_native(exc)))
|
||||
|
||||
def present(self):
|
||||
''' Handles state == 'present', creating or updating the config '''
|
||||
if self.configs:
|
||||
config = self.configs[-1]
|
||||
self.results['config_id'] = config['ID']
|
||||
self.results['config_name'] = config['Spec']['Name']
|
||||
data_changed = False
|
||||
template_driver_changed = False
|
||||
attrs = config.get('Spec', {})
|
||||
if attrs.get('Labels', {}).get('ansible_key'):
|
||||
if attrs['Labels']['ansible_key'] != self.data_key:
|
||||
data_changed = True
|
||||
else:
|
||||
if not self.force:
|
||||
self.client.module.warn("'ansible_key' label not found. Config will not be changed unless the force parameter is set to 'yes'")
|
||||
# template_driver has changed if it was set in the previous config
|
||||
# and now it differs, or if it wasn't set but now it is.
|
||||
if attrs.get('Templating', {}).get('Name'):
|
||||
if attrs['Templating']['Name'] != self.template_driver:
|
||||
template_driver_changed = True
|
||||
elif self.template_driver:
|
||||
template_driver_changed = True
|
||||
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
|
||||
if self.rolling_versions:
|
||||
self.version = self.get_version(config)
|
||||
if data_changed or template_driver_changed or labels_changed or self.force:
|
||||
# if something changed or force, delete and re-create the config
|
||||
if not self.rolling_versions:
|
||||
self.absent()
|
||||
config_id = self.create_config()
|
||||
self.results['changed'] = True
|
||||
self.results['config_id'] = config_id
|
||||
self.results['config_name'] = self.name
|
||||
else:
|
||||
self.results['changed'] = True
|
||||
self.results['config_id'] = self.create_config()
|
||||
self.results['config_name'] = self.name
|
||||
|
||||
def absent(self):
|
||||
''' Handles state == 'absent', removing the config '''
|
||||
if self.configs:
|
||||
for config in self.configs:
|
||||
self.remove_config(config)
|
||||
self.results['changed'] = True
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
data=dict(type='str'),
|
||||
data_is_b64=dict(type='bool', default=False),
|
||||
data_src=dict(type='path'),
|
||||
labels=dict(type='dict'),
|
||||
force=dict(type='bool', default=False),
|
||||
rolling_versions=dict(type='bool', default=False),
|
||||
versions_to_keep=dict(type='int', default=5),
|
||||
template_driver=dict(type='str', choices=['golang']),
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['data', 'data_src'], True),
|
||||
]
|
||||
|
||||
mutually_exclusive = [
|
||||
('data', 'data_src'),
|
||||
]
|
||||
|
||||
option_minimal_versions = dict(
|
||||
template_driver=dict(docker_py_version='5.0.3', docker_api_version='1.37'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
min_docker_version='2.6.0',
|
||||
min_docker_api_version='1.30',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
ConfigManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,314 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_container_exec
|
||||
|
||||
short_description: Execute command in a docker container
|
||||
|
||||
version_added: 1.5.0
|
||||
|
||||
description:
|
||||
- Executes a command in a Docker container.
|
||||
|
||||
options:
|
||||
container:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- The name of the container to execute the command in.
|
||||
argv:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- The command to execute.
|
||||
- Since this is a list of arguments, no quoting is needed.
|
||||
- Exactly one of I(argv) and I(command) must be specified.
|
||||
command:
|
||||
type: str
|
||||
description:
|
||||
- The command to execute.
|
||||
- Exactly one of I(argv) and I(command) must be specified.
|
||||
chdir:
|
||||
type: str
|
||||
description:
|
||||
- The directory to run the command in.
|
||||
detach:
|
||||
description:
|
||||
- Whether to run the command synchronously (I(detach=false), default) or asynchronously (I(detach=true)).
|
||||
- If set to C(true), I(stdin) cannot be provided, and the return values C(stdout), C(stderr) and
|
||||
C(rc) are not returned.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.1.0
|
||||
user:
|
||||
type: str
|
||||
description:
|
||||
- If specified, the user to execute this command with.
|
||||
stdin:
|
||||
type: str
|
||||
description:
|
||||
- Set the stdin of the command directly to the specified value.
|
||||
- Can only be used if I(detach=false).
|
||||
stdin_add_newline:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- If set to C(true), appends a newline to I(stdin).
|
||||
strip_empty_ends:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- Strip empty lines from the end of stdout/stderr in result.
|
||||
tty:
|
||||
type: bool
|
||||
default: false
|
||||
description:
|
||||
- Whether to allocate a TTY.
|
||||
env:
|
||||
description:
|
||||
- Dictionary of environment variables with their respective values to be passed to the command ran inside the container.
|
||||
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example C("true")) in order to avoid data loss.
|
||||
- Please note that if you are passing values in with Jinja2 templates, like C("{{ value }}"), you need to add C(| string) to prevent Ansible to
|
||||
convert strings such as C("true") back to booleans. The correct way is to use C("{{ value | string }}").
|
||||
type: dict
|
||||
version_added: 2.1.0
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
notes:
|
||||
- Does not support C(check_mode).
|
||||
author:
|
||||
- "Felix Fontein (@felixfontein)"
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.20"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Run a simple command (command)
|
||||
community.docker.docker_container_exec:
|
||||
container: foo
|
||||
command: /bin/bash -c "ls -lah"
|
||||
chdir: /root
|
||||
register: result
|
||||
|
||||
- name: Print stdout
|
||||
debug:
|
||||
var: result.stdout
|
||||
|
||||
- name: Run a simple command (argv)
|
||||
community.docker.docker_container_exec:
|
||||
container: foo
|
||||
argv:
|
||||
- /bin/bash
|
||||
- "-c"
|
||||
- "ls -lah > /dev/stderr"
|
||||
chdir: /root
|
||||
register: result
|
||||
|
||||
- name: Print stderr lines
|
||||
debug:
|
||||
var: result.stderr_lines
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
stdout:
|
||||
type: str
|
||||
returned: success and I(detach=false)
|
||||
description:
|
||||
- The standard output of the container command.
|
||||
stderr:
|
||||
type: str
|
||||
returned: success and I(detach=false)
|
||||
description:
|
||||
- The standard error output of the container command.
|
||||
rc:
|
||||
type: int
|
||||
returned: success and I(detach=false)
|
||||
sample: 0
|
||||
description:
|
||||
- The exit code of the command.
|
||||
exec_id:
|
||||
type: str
|
||||
returned: success and I(detach=true)
|
||||
sample: 249d9e3075655baf705ed8f40488c5e9434049cf3431976f1bfdb73741c574c5
|
||||
description:
|
||||
- The execution ID of the command.
|
||||
version_added: 2.1.0
|
||||
'''
|
||||
|
||||
import shlex
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
|
||||
from ansible.module_utils.six import string_types
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.socket_helper import (
|
||||
shutdown_writing,
|
||||
write_to_socket,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.socket_handler import (
|
||||
find_selectors,
|
||||
DockerSocketHandlerModule,
|
||||
)
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError, NotFound
|
||||
except Exception:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
container=dict(type='str', required=True),
|
||||
argv=dict(type='list', elements='str'),
|
||||
command=dict(type='str'),
|
||||
chdir=dict(type='str'),
|
||||
detach=dict(type='bool', default=False),
|
||||
user=dict(type='str'),
|
||||
stdin=dict(type='str'),
|
||||
stdin_add_newline=dict(type='bool', default=True),
|
||||
strip_empty_ends=dict(type='bool', default=True),
|
||||
tty=dict(type='bool', default=False),
|
||||
env=dict(type='dict'),
|
||||
)
|
||||
|
||||
option_minimal_versions = dict(
|
||||
chdir=dict(docker_py_version='3.0.0', docker_api_version='1.35'),
|
||||
env=dict(docker_py_version='2.3.0', docker_api_version='1.25'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
min_docker_api_version='1.20',
|
||||
mutually_exclusive=[('argv', 'command')],
|
||||
required_one_of=[('argv', 'command')],
|
||||
)
|
||||
|
||||
container = client.module.params['container']
|
||||
argv = client.module.params['argv']
|
||||
command = client.module.params['command']
|
||||
chdir = client.module.params['chdir']
|
||||
detach = client.module.params['detach']
|
||||
user = client.module.params['user']
|
||||
stdin = client.module.params['stdin']
|
||||
strip_empty_ends = client.module.params['strip_empty_ends']
|
||||
tty = client.module.params['tty']
|
||||
env = client.module.params['env']
|
||||
|
||||
if env is not None:
|
||||
for name, value in list(env.items()):
|
||||
if not isinstance(value, string_types):
|
||||
client.module.fail_json(
|
||||
msg="Non-string value found for env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
|
||||
env[name] = to_text(value, errors='surrogate_or_strict')
|
||||
|
||||
if command is not None:
|
||||
argv = shlex.split(command)
|
||||
|
||||
if detach and stdin is not None:
|
||||
client.module.fail_json(msg='If detach=true, stdin cannot be provided.')
|
||||
|
||||
if stdin is not None and client.module.params['stdin_add_newline']:
|
||||
stdin += '\n'
|
||||
|
||||
selectors = None
|
||||
if stdin and not detach:
|
||||
selectors = find_selectors(client.module)
|
||||
|
||||
try:
|
||||
kwargs = {}
|
||||
if chdir is not None:
|
||||
kwargs['workdir'] = chdir
|
||||
if env is not None:
|
||||
kwargs['environment'] = env
|
||||
exec_data = client.exec_create(
|
||||
container,
|
||||
argv,
|
||||
stdout=True,
|
||||
stderr=True,
|
||||
stdin=bool(stdin),
|
||||
user=user or '',
|
||||
**kwargs
|
||||
)
|
||||
exec_id = exec_data['Id']
|
||||
|
||||
if detach:
|
||||
client.exec_start(exec_id, tty=tty, detach=True)
|
||||
client.module.exit_json(changed=True, exec_id=exec_id)
|
||||
|
||||
else:
|
||||
if selectors:
|
||||
exec_socket = client.exec_start(
|
||||
exec_id,
|
||||
tty=tty,
|
||||
detach=False,
|
||||
socket=True,
|
||||
)
|
||||
try:
|
||||
with DockerSocketHandlerModule(exec_socket, client.module, selectors) as exec_socket_handler:
|
||||
if stdin:
|
||||
exec_socket_handler.write(to_bytes(stdin))
|
||||
|
||||
stdout, stderr = exec_socket_handler.consume()
|
||||
finally:
|
||||
exec_socket.close()
|
||||
else:
|
||||
stdout, stderr = client.exec_start(
|
||||
exec_id,
|
||||
tty=tty,
|
||||
detach=False,
|
||||
stream=False,
|
||||
socket=False,
|
||||
demux=True,
|
||||
)
|
||||
|
||||
result = client.exec_inspect(exec_id)
|
||||
|
||||
stdout = to_text(stdout or b'')
|
||||
stderr = to_text(stderr or b'')
|
||||
if strip_empty_ends:
|
||||
stdout = stdout.rstrip('\r\n')
|
||||
stderr = stderr.rstrip('\r\n')
|
||||
|
||||
client.module.exit_json(
|
||||
changed=True,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
rc=result.get('ExitCode') or 0,
|
||||
)
|
||||
except NotFound:
|
||||
client.fail('Could not find container "{0}"'.format(container))
|
||||
except APIError as e:
|
||||
if e.response and e.response.status_code == 409:
|
||||
client.fail('The container "{0}" has been paused ({1})'.format(container, to_native(e)))
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,149 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_container_info
|
||||
|
||||
short_description: Retrieves facts about docker container
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker container.
|
||||
- Essentially returns the output of C(docker inspect <name>), similar to what M(community.docker.docker_container)
|
||||
returns for a non-absent container.
|
||||
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the container to inspect.
|
||||
- When identifying an existing container name may be a name or a long or short container ID.
|
||||
type: str
|
||||
required: yes
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- "Felix Fontein (@felixfontein)"
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.20"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get infos on container
|
||||
community.docker.docker_container_info:
|
||||
name: mydata
|
||||
register: result
|
||||
|
||||
- name: Does container exist?
|
||||
ansible.builtin.debug:
|
||||
msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
|
||||
|
||||
- name: Print information about container
|
||||
ansible.builtin.debug:
|
||||
var: result.container
|
||||
when: result.exists
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the container exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
container:
|
||||
description:
|
||||
- Facts representing the current state of the container. Matches the docker inspection output.
|
||||
- Will be C(none) if container does not exist.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: '{
|
||||
"AppArmorProfile": "",
|
||||
"Args": [],
|
||||
"Config": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": [
|
||||
"/usr/bin/supervisord"
|
||||
],
|
||||
"Domainname": "",
|
||||
"Entrypoint": null,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"ExposedPorts": {
|
||||
"443/tcp": {},
|
||||
"80/tcp": {}
|
||||
},
|
||||
"Hostname": "8e47bf643eb9",
|
||||
"Image": "lnmp_nginx:v1",
|
||||
"Labels": {},
|
||||
"OnBuild": null,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": {
|
||||
"/tmp/lnmp/nginx-sites/logs/": {}
|
||||
},
|
||||
...
|
||||
}'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_api_version='1.20',
|
||||
)
|
||||
|
||||
try:
|
||||
container = client.get_container(client.module.params['name'])
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
exists=(True if container else False),
|
||||
container=container,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,362 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_host_info
|
||||
|
||||
short_description: Retrieves facts about docker host and lists of objects of the services.
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker host.
|
||||
- Essentially returns the output of C(docker system info).
|
||||
- The module also allows to list object names for containers, images, networks and volumes.
|
||||
It also allows to query information on disk usage.
|
||||
- The output differs depending on API version of the docker daemon.
|
||||
- If the docker daemon cannot be contacted or does not meet the API version requirements,
|
||||
the module will fail.
|
||||
|
||||
|
||||
options:
|
||||
containers:
|
||||
description:
|
||||
- Whether to list containers.
|
||||
type: bool
|
||||
default: no
|
||||
containers_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting containers to list.
|
||||
- "For example, C(until: 24h)."
|
||||
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
|
||||
C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
images:
|
||||
description:
|
||||
- Whether to list images.
|
||||
type: bool
|
||||
default: no
|
||||
images_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting images to list.
|
||||
- "For example, C(dangling: true)."
|
||||
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
|
||||
C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
networks:
|
||||
description:
|
||||
- Whether to list networks.
|
||||
type: bool
|
||||
default: no
|
||||
networks_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting networks to list.
|
||||
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
|
||||
C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
volumes:
|
||||
description:
|
||||
- Whether to list volumes.
|
||||
type: bool
|
||||
default: no
|
||||
volumes_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting volumes to list.
|
||||
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
|
||||
C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
disk_usage:
|
||||
description:
|
||||
- Summary information on used disk space by all Docker layers.
|
||||
- The output is a sum of images, volumes, containers and build cache.
|
||||
type: bool
|
||||
default: no
|
||||
verbose_output:
|
||||
description:
|
||||
- When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes)
|
||||
then output will contain verbose information about objects matching the full output of API method.
|
||||
For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
|
||||
- The verbose output in this module contains only subset of information returned by I(_info) module
|
||||
for each type of the objects.
|
||||
type: bool
|
||||
default: no
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.21"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info on docker host
|
||||
community.docker.docker_host_info:
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and list images
|
||||
community.docker.docker_host_info:
|
||||
images: yes
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and list images matching the filter
|
||||
community.docker.docker_host_info:
|
||||
images: yes
|
||||
images_filters:
|
||||
label: "mylabel"
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and verbose list images
|
||||
community.docker.docker_host_info:
|
||||
images: yes
|
||||
verbose_output: yes
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and used disk space
|
||||
community.docker.docker_host_info:
|
||||
disk_usage: yes
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and list containers matching the filter
|
||||
community.docker.docker_host_info:
|
||||
containers: yes
|
||||
containers_filters:
|
||||
label:
|
||||
- key1=value1
|
||||
- key2=value2
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
var: result.host_info
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
can_talk_to_docker:
|
||||
description:
|
||||
- Will be C(true) if the module can talk to the docker daemon.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
|
||||
host_info:
|
||||
description:
|
||||
- Facts representing the basic state of the docker host. Matches the C(docker system info) output.
|
||||
returned: always
|
||||
type: dict
|
||||
volumes:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume.
|
||||
Keys matches the C(docker volume ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(volumes) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
networks:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each network.
|
||||
Keys matches the C(docker network ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(networks) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
containers:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each container.
|
||||
Keys matches the C(docker container ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(containers) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
images:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each image.
|
||||
Keys matches the C(docker image ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(images) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
disk_usage:
|
||||
description:
|
||||
- Information on summary disk usage by images, containers and volumes on docker host
|
||||
unless I(verbose_output=yes). See description for I(verbose_output).
|
||||
returned: When I(disk_usage) is C(yes)
|
||||
type: dict
|
||||
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# Missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import clean_dict_booleans_for_docker_api
|
||||
|
||||
|
||||
class DockerHostManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(DockerHostManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.verbose_output = self.client.module.params['verbose_output']
|
||||
|
||||
listed_objects = ['volumes', 'networks', 'containers', 'images']
|
||||
|
||||
self.results['host_info'] = self.get_docker_host_info()
|
||||
|
||||
if self.client.module.params['disk_usage']:
|
||||
self.results['disk_usage'] = self.get_docker_disk_usage_facts()
|
||||
|
||||
for docker_object in listed_objects:
|
||||
if self.client.module.params[docker_object]:
|
||||
returned_name = docker_object
|
||||
filter_name = docker_object + "_filters"
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name), True)
|
||||
self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
|
||||
|
||||
def get_docker_host_info(self):
|
||||
try:
|
||||
return self.client.info()
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
|
||||
|
||||
def get_docker_disk_usage_facts(self):
|
||||
try:
|
||||
if self.verbose_output:
|
||||
return self.client.df()
|
||||
else:
|
||||
return dict(LayersSize=self.client.df()['LayersSize'])
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
|
||||
|
||||
def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
|
||||
items = None
|
||||
items_list = []
|
||||
|
||||
header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
|
||||
header_volumes = ['Driver', 'Name']
|
||||
header_images = ['Id', 'RepoTags', 'Created', 'Size']
|
||||
header_networks = ['Id', 'Driver', 'Name', 'Scope']
|
||||
|
||||
filter_arg = dict()
|
||||
if filters:
|
||||
filter_arg['filters'] = filters
|
||||
try:
|
||||
if docker_object == 'containers':
|
||||
items = self.client.containers(**filter_arg)
|
||||
elif docker_object == 'networks':
|
||||
items = self.client.networks(**filter_arg)
|
||||
elif docker_object == 'images':
|
||||
items = self.client.images(**filter_arg)
|
||||
elif docker_object == 'volumes':
|
||||
items = self.client.volumes(**filter_arg)
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker host for object '%s': %s" %
|
||||
(docker_object, to_native(exc)))
|
||||
|
||||
if self.verbose_output:
|
||||
if docker_object != 'volumes':
|
||||
return items
|
||||
else:
|
||||
return items['Volumes']
|
||||
|
||||
if docker_object == 'volumes':
|
||||
items = items['Volumes']
|
||||
|
||||
for item in items:
|
||||
item_record = dict()
|
||||
|
||||
if docker_object == 'containers':
|
||||
for key in header_containers:
|
||||
item_record[key] = item.get(key)
|
||||
elif docker_object == 'networks':
|
||||
for key in header_networks:
|
||||
item_record[key] = item.get(key)
|
||||
elif docker_object == 'images':
|
||||
for key in header_images:
|
||||
item_record[key] = item.get(key)
|
||||
elif docker_object == 'volumes':
|
||||
for key in header_volumes:
|
||||
item_record[key] = item.get(key)
|
||||
items_list.append(item_record)
|
||||
|
||||
return items_list
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
containers=dict(type='bool', default=False),
|
||||
containers_filters=dict(type='dict'),
|
||||
images=dict(type='bool', default=False),
|
||||
images_filters=dict(type='dict'),
|
||||
networks=dict(type='bool', default=False),
|
||||
networks_filters=dict(type='dict'),
|
||||
volumes=dict(type='bool', default=False),
|
||||
volumes_filters=dict(type='dict'),
|
||||
disk_usage=dict(type='bool', default=False),
|
||||
verbose_output=dict(type='bool', default=False),
|
||||
)
|
||||
|
||||
option_minimal_versions = dict(
|
||||
network_filters=dict(docker_py_version='2.0.2'),
|
||||
disk_usage=dict(docker_py_version='2.2.0'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.21',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
fail_results=dict(
|
||||
can_talk_to_docker=False,
|
||||
),
|
||||
)
|
||||
client.fail_results['can_talk_to_docker'] = True
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
DockerHostManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,923 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_image
|
||||
|
||||
short_description: Manage docker images
|
||||
|
||||
|
||||
description:
|
||||
- Build, load or pull an image, making the image available for creating containers. Also supports tagging
|
||||
an image, pushing an image, and archiving an image to a C(.tar) file.
|
||||
|
||||
options:
|
||||
source:
|
||||
description:
|
||||
- "Determines where the module will try to retrieve the image from."
|
||||
- "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
|
||||
be specified when this value is used."
|
||||
- "Use C(load) to load the image from a C(.tar) file. I(load_path) must
|
||||
be specified when this value is used."
|
||||
- "Use C(pull) to pull the image from a registry."
|
||||
- "Use C(local) to make sure that the image is already available on the local
|
||||
docker daemon. This means that the module does not try to build, pull or load the image."
|
||||
type: str
|
||||
choices:
|
||||
- build
|
||||
- load
|
||||
- pull
|
||||
- local
|
||||
build:
|
||||
description:
|
||||
- "Specifies options used for building images."
|
||||
type: dict
|
||||
suboptions:
|
||||
cache_from:
|
||||
description:
|
||||
- List of image names to consider as cache source.
|
||||
type: list
|
||||
elements: str
|
||||
dockerfile:
|
||||
description:
|
||||
- Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
|
||||
- This can also include a relative path (relative to I(path)).
|
||||
type: str
|
||||
http_timeout:
|
||||
description:
|
||||
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
|
||||
seconds.
|
||||
type: int
|
||||
path:
|
||||
description:
|
||||
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
|
||||
Dockerfile for building an image.
|
||||
type: path
|
||||
required: yes
|
||||
pull:
|
||||
description:
|
||||
- When building an image downloads any updates to the FROM image in Dockerfile.
|
||||
type: bool
|
||||
default: no
|
||||
rm:
|
||||
description:
|
||||
- Remove intermediate containers after build.
|
||||
type: bool
|
||||
default: yes
|
||||
network:
|
||||
description:
|
||||
- The network to use for C(RUN) build instructions.
|
||||
type: str
|
||||
nocache:
|
||||
description:
|
||||
- Do not use cache when building an image.
|
||||
type: bool
|
||||
default: no
|
||||
etc_hosts:
|
||||
description:
|
||||
- Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
|
||||
type: dict
|
||||
args:
|
||||
description:
|
||||
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
|
||||
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
|
||||
- Requires Docker API >= 1.21.
|
||||
type: dict
|
||||
container_limits:
|
||||
description:
|
||||
- A dictionary of limits applied to each container created by the build process.
|
||||
type: dict
|
||||
suboptions:
|
||||
memory:
|
||||
description:
|
||||
- Set memory limit for build.
|
||||
type: int
|
||||
memswap:
|
||||
description:
|
||||
- Total memory (memory + swap).
|
||||
- Use C(-1) to disable swap.
|
||||
type: int
|
||||
cpushares:
|
||||
description:
|
||||
- CPU shares (relative weight).
|
||||
type: int
|
||||
cpusetcpus:
|
||||
description:
|
||||
- CPUs in which to allow execution.
|
||||
- For example, C(0-3) or C(0,1).
|
||||
type: str
|
||||
use_config_proxy:
|
||||
description:
|
||||
- If set to C(yes) and a proxy configuration is specified in the docker client configuration
|
||||
(by default C($HOME/.docker/config.json)), the corresponding environment variables will
|
||||
be set in the container being built.
|
||||
- Needs Docker SDK for Python >= 3.7.0.
|
||||
type: bool
|
||||
target:
|
||||
description:
|
||||
- When building an image specifies an intermediate build stage by
|
||||
name as a final stage for the resulting image.
|
||||
type: str
|
||||
platform:
|
||||
description:
|
||||
- Platform in the format C(os[/arch[/variant]]).
|
||||
type: str
|
||||
version_added: 1.1.0
|
||||
archive_path:
|
||||
description:
|
||||
- Use with state C(present) to archive an image to a .tar file.
|
||||
type: path
|
||||
load_path:
|
||||
description:
|
||||
- Use with state C(present) to load an image from a .tar file.
|
||||
- Set I(source) to C(load) if you want to load the image.
|
||||
type: path
|
||||
force_source:
|
||||
description:
|
||||
- Use with state C(present) to build, load or pull an image (depending on the
|
||||
value of the I(source) option) when the image already exists.
|
||||
type: bool
|
||||
default: false
|
||||
force_absent:
|
||||
description:
|
||||
- Use with state I(absent) to un-tag and remove all images matching the specified name.
|
||||
type: bool
|
||||
default: false
|
||||
force_tag:
|
||||
description:
|
||||
- Use with state C(present) to force tagging an image.
|
||||
type: bool
|
||||
default: false
|
||||
name:
|
||||
description:
|
||||
- "Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name).
|
||||
When pushing or pulling an image the name can optionally include the tag by appending C(:tag_name)."
|
||||
- Note that image IDs (hashes) are only supported for I(state=absent), for I(state=present) with I(source=load),
|
||||
and for I(state=present) with I(source=local).
|
||||
type: str
|
||||
required: yes
|
||||
pull:
|
||||
description:
|
||||
- "Specifies options used for pulling images."
|
||||
type: dict
|
||||
version_added: 1.3.0
|
||||
suboptions:
|
||||
platform:
|
||||
description:
|
||||
- When pulling an image, ask for this specific platform.
|
||||
- Note that this value is not used to determine whether the image needs to be pulled. This might change
|
||||
in the future in a minor release, though.
|
||||
type: str
|
||||
push:
|
||||
description:
|
||||
- Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
|
||||
type: bool
|
||||
default: no
|
||||
repository:
|
||||
description:
|
||||
- Use with state C(present) to tag the image.
|
||||
- Expects format C(repository:tag). If no tag is provided, will use the value of the I(tag) parameter or C(latest).
|
||||
- If I(push=true), I(repository) must either include a registry, or will be assumed to belong to the default
|
||||
registry (Docker Hub).
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Make assertions about the state of an image.
|
||||
- When C(absent) an image will be removed. Use the force option to un-tag and remove all images
|
||||
matching the provided name.
|
||||
- When C(present) check if an image exists using the provided name and tag. If the image is not found or the
|
||||
force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
tag:
|
||||
description:
|
||||
- Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
|
||||
I(latest).
|
||||
- If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
|
||||
type: str
|
||||
default: latest
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.20"
|
||||
|
||||
author:
|
||||
- Pavel Antonov (@softzilla)
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
- Sorin Sbarnea (@ssbarnea)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Pull an image
|
||||
community.docker.docker_image:
|
||||
name: pacur/centos-7
|
||||
source: pull
|
||||
# Select platform for pulling. If not specified, will pull whatever docker prefers.
|
||||
pull:
|
||||
platform: amd64
|
||||
|
||||
- name: Tag and push to docker hub
|
||||
community.docker.docker_image:
|
||||
name: pacur/centos-7:56
|
||||
repository: dcoppenhagan/myimage:7.56
|
||||
push: yes
|
||||
source: local
|
||||
|
||||
- name: Tag and push to local registry
|
||||
community.docker.docker_image:
|
||||
# Image will be centos:7
|
||||
name: centos
|
||||
# Will be pushed to localhost:5000/centos:7
|
||||
repository: localhost:5000/centos
|
||||
tag: 7
|
||||
push: yes
|
||||
source: local
|
||||
|
||||
- name: Add tag latest to image
|
||||
community.docker.docker_image:
|
||||
name: myimage:7.1.2
|
||||
repository: myimage:latest
|
||||
# As 'latest' usually already is present, we need to enable overwriting of existing tags:
|
||||
force_tag: yes
|
||||
source: local
|
||||
|
||||
- name: Remove image
|
||||
community.docker.docker_image:
|
||||
state: absent
|
||||
name: registry.ansible.com/chouseknecht/sinatra
|
||||
tag: v1
|
||||
|
||||
- name: Build an image and push it to a private repo
|
||||
community.docker.docker_image:
|
||||
build:
|
||||
path: ./sinatra
|
||||
name: registry.ansible.com/chouseknecht/sinatra
|
||||
tag: v1
|
||||
push: yes
|
||||
source: build
|
||||
|
||||
- name: Archive image
|
||||
community.docker.docker_image:
|
||||
name: registry.ansible.com/chouseknecht/sinatra
|
||||
tag: v1
|
||||
archive_path: my_sinatra.tar
|
||||
source: local
|
||||
|
||||
- name: Load image from archive and push to a private registry
|
||||
community.docker.docker_image:
|
||||
name: localhost:5000/myimages/sinatra
|
||||
tag: v1
|
||||
push: yes
|
||||
load_path: my_sinatra.tar
|
||||
source: load
|
||||
|
||||
- name: Build image and with build args
|
||||
community.docker.docker_image:
|
||||
name: myimage
|
||||
build:
|
||||
path: /path/to/build/dir
|
||||
args:
|
||||
log_volume: /var/log/myapp
|
||||
listen_port: 8080
|
||||
source: build
|
||||
|
||||
- name: Build image using cache source
|
||||
community.docker.docker_image:
|
||||
name: myimage:latest
|
||||
build:
|
||||
path: /path/to/build/dir
|
||||
# Use as cache source for building myimage
|
||||
cache_from:
|
||||
- nginx:latest
|
||||
- alpine:3.8
|
||||
source: build
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
image:
|
||||
description: Image inspection results for the affected image.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
stdout:
|
||||
description: Docker build output when building an image.
|
||||
returned: success
|
||||
type: str
|
||||
sample: ""
|
||||
version_added: 1.0.0
|
||||
'''
|
||||
|
||||
import errno
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
clean_dict_booleans_for_docker_api,
|
||||
docker_version,
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
is_valid_tag,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
if docker_version is not None:
|
||||
try:
|
||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
from docker.auth import resolve_repository_name
|
||||
else:
|
||||
from docker.auth.auth import resolve_repository_name
|
||||
from docker.utils.utils import parse_repository_tag
|
||||
from docker.errors import DockerException, NotFound
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
class ImageManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(ImageManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
parameters = self.client.module.params
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.source = parameters['source']
|
||||
build = parameters['build'] or dict()
|
||||
pull = parameters['pull'] or dict()
|
||||
self.archive_path = parameters['archive_path']
|
||||
self.cache_from = build.get('cache_from')
|
||||
self.container_limits = build.get('container_limits')
|
||||
self.dockerfile = build.get('dockerfile')
|
||||
self.force_source = parameters['force_source']
|
||||
self.force_absent = parameters['force_absent']
|
||||
self.force_tag = parameters['force_tag']
|
||||
self.load_path = parameters['load_path']
|
||||
self.name = parameters['name']
|
||||
self.network = build.get('network')
|
||||
self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts'))
|
||||
self.nocache = build.get('nocache', False)
|
||||
self.build_path = build.get('path')
|
||||
self.pull = build.get('pull')
|
||||
self.target = build.get('target')
|
||||
self.repository = parameters['repository']
|
||||
self.rm = build.get('rm', True)
|
||||
self.state = parameters['state']
|
||||
self.tag = parameters['tag']
|
||||
self.http_timeout = build.get('http_timeout')
|
||||
self.pull_platform = pull.get('platform')
|
||||
self.push = parameters['push']
|
||||
self.buildargs = build.get('args')
|
||||
self.build_platform = build.get('platform')
|
||||
self.use_config_proxy = build.get('use_config_proxy')
|
||||
|
||||
# If name contains a tag, it takes precedence over tag parameter.
|
||||
if not is_image_name_id(self.name):
|
||||
repo, repo_tag = parse_repository_tag(self.name)
|
||||
if repo_tag:
|
||||
self.name = repo
|
||||
self.tag = repo_tag
|
||||
|
||||
# Sanity check: fail early when we know that something will fail later
|
||||
if self.repository and is_image_name_id(self.repository):
|
||||
self.fail("`repository` must not be an image ID; got: %s" % self.repository)
|
||||
if not self.repository and self.push and is_image_name_id(self.name):
|
||||
self.fail("Cannot push an image by ID; specify `repository` to tag and push the image with ID %s instead" % self.name)
|
||||
|
||||
if self.state == 'present':
|
||||
self.present()
|
||||
elif self.state == 'absent':
|
||||
self.absent()
|
||||
|
||||
def fail(self, msg):
|
||||
self.client.fail(msg)
|
||||
|
||||
def present(self):
|
||||
'''
|
||||
Handles state = 'present', which includes building, loading or pulling an image,
|
||||
depending on user provided parameters.
|
||||
|
||||
:returns None
|
||||
'''
|
||||
if is_image_name_id(self.name):
|
||||
image = self.client.find_image_by_id(self.name, accept_missing_image=True)
|
||||
else:
|
||||
image = self.client.find_image(name=self.name, tag=self.tag)
|
||||
|
||||
if not image or self.force_source:
|
||||
if self.source == 'build':
|
||||
if is_image_name_id(self.name):
|
||||
self.fail("Image name must not be an image ID for source=build; got: %s" % self.name)
|
||||
|
||||
# Build the image
|
||||
if not os.path.isdir(self.build_path):
|
||||
self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
|
||||
image_name = self.name
|
||||
if self.tag:
|
||||
image_name = "%s:%s" % (self.name, self.tag)
|
||||
self.log("Building image %s" % image_name)
|
||||
self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
self.results.update(self.build_image())
|
||||
|
||||
elif self.source == 'load':
|
||||
# Load the image from an archive
|
||||
if not os.path.isfile(self.load_path):
|
||||
self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
|
||||
self.load_path))
|
||||
image_name = self.name
|
||||
if self.tag and not is_image_name_id(image_name):
|
||||
image_name = "%s:%s" % (self.name, self.tag)
|
||||
self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
self.results['image'] = self.load_image()
|
||||
elif self.source == 'pull':
|
||||
if is_image_name_id(self.name):
|
||||
self.fail("Image name must not be an image ID for source=pull; got: %s" % self.name)
|
||||
|
||||
# pull the image
|
||||
self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag, platform=self.pull_platform)
|
||||
elif self.source == 'local':
|
||||
if image is None:
|
||||
name = self.name
|
||||
if self.tag and not is_image_name_id(name):
|
||||
name = "%s:%s" % (self.name, self.tag)
|
||||
self.client.fail('Cannot find the image %s locally.' % name)
|
||||
if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
|
||||
self.results['changed'] = False
|
||||
else:
|
||||
self.results['image'] = image
|
||||
|
||||
if self.archive_path:
|
||||
self.archive_image(self.name, self.tag)
|
||||
|
||||
if self.push and not self.repository:
|
||||
self.push_image(self.name, self.tag)
|
||||
elif self.repository:
|
||||
self.tag_image(self.name, self.tag, self.repository, push=self.push)
|
||||
|
||||
def absent(self):
|
||||
'''
|
||||
Handles state = 'absent', which removes an image.
|
||||
|
||||
:return None
|
||||
'''
|
||||
name = self.name
|
||||
if is_image_name_id(name):
|
||||
image = self.client.find_image_by_id(name, accept_missing_image=True)
|
||||
else:
|
||||
image = self.client.find_image(name, self.tag)
|
||||
if self.tag:
|
||||
name = "%s:%s" % (self.name, self.tag)
|
||||
if image:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.remove_image(name, force=self.force_absent)
|
||||
except NotFound:
|
||||
# If the image vanished while we were trying to remove it, don't fail
|
||||
pass
|
||||
except Exception as exc:
|
||||
self.fail("Error removing image %s - %s" % (name, to_native(exc)))
|
||||
|
||||
self.results['changed'] = True
|
||||
self.results['actions'].append("Removed image %s" % (name))
|
||||
self.results['image']['state'] = 'Deleted'
|
||||
|
||||
def archive_image(self, name, tag):
|
||||
'''
|
||||
Archive an image to a .tar file. Called when archive_path is passed.
|
||||
|
||||
:param name - name of the image. Type: str
|
||||
:return None
|
||||
'''
|
||||
|
||||
if not tag:
|
||||
tag = "latest"
|
||||
|
||||
if is_image_name_id(name):
|
||||
image = self.client.find_image_by_id(name, accept_missing_image=True)
|
||||
image_name = name
|
||||
else:
|
||||
image = self.client.find_image(name=name, tag=tag)
|
||||
image_name = "%s:%s" % (name, tag)
|
||||
|
||||
if not image:
|
||||
self.log("archive image: image %s not found" % image_name)
|
||||
return
|
||||
|
||||
self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
self.log("Getting archive of image %s" % image_name)
|
||||
try:
|
||||
saved_image = self.client.get_image(image_name)
|
||||
except Exception as exc:
|
||||
self.fail("Error getting image %s - %s" % (image_name, to_native(exc)))
|
||||
|
||||
try:
|
||||
with open(self.archive_path, 'wb') as fd:
|
||||
if self.client.docker_py_version >= LooseVersion('3.0.0'):
|
||||
for chunk in saved_image:
|
||||
fd.write(chunk)
|
||||
else:
|
||||
for chunk in saved_image.stream(2048, decode_content=False):
|
||||
fd.write(chunk)
|
||||
except Exception as exc:
|
||||
self.fail("Error writing image archive %s - %s" % (self.archive_path, to_native(exc)))
|
||||
|
||||
if image:
|
||||
self.results['image'] = image
|
||||
|
||||
def push_image(self, name, tag=None):
|
||||
'''
|
||||
If the name of the image contains a repository path, then push the image.
|
||||
|
||||
:param name Name of the image to push.
|
||||
:param tag Use a specific tag.
|
||||
:return: None
|
||||
'''
|
||||
|
||||
if is_image_name_id(name):
|
||||
self.fail("Cannot push an image ID: %s" % name)
|
||||
|
||||
repository = name
|
||||
if not tag:
|
||||
repository, tag = parse_repository_tag(name)
|
||||
registry, repo_name = resolve_repository_name(repository)
|
||||
|
||||
self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
|
||||
|
||||
if registry:
|
||||
self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
status = None
|
||||
try:
|
||||
changed = False
|
||||
for line in self.client.push(repository, tag=tag, stream=True, decode=True):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get('errorDetail'):
|
||||
raise Exception(line['errorDetail']['message'])
|
||||
status = line.get('status')
|
||||
if status == 'Pushing':
|
||||
changed = True
|
||||
self.results['changed'] = changed
|
||||
except Exception as exc:
|
||||
if 'unauthorized' in str(exc):
|
||||
if 'authentication required' in str(exc):
|
||||
self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
|
||||
(registry, repo_name, tag, to_native(exc), registry))
|
||||
else:
|
||||
self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
|
||||
(registry, repo_name, tag, str(exc)))
|
||||
self.fail("Error pushing image %s: %s" % (repository, to_native(exc)))
|
||||
self.results['image'] = self.client.find_image(name=repository, tag=tag)
|
||||
if not self.results['image']:
|
||||
self.results['image'] = dict()
|
||||
self.results['image']['push_status'] = status
|
||||
|
||||
def tag_image(self, name, tag, repository, push=False):
|
||||
'''
|
||||
Tag an image into a repository.
|
||||
|
||||
:param name: name of the image. required.
|
||||
:param tag: image tag.
|
||||
:param repository: path to the repository. required.
|
||||
:param push: bool. push the image once it's tagged.
|
||||
:return: None
|
||||
'''
|
||||
repo, repo_tag = parse_repository_tag(repository)
|
||||
if not repo_tag:
|
||||
repo_tag = "latest"
|
||||
if tag:
|
||||
repo_tag = tag
|
||||
image = self.client.find_image(name=repo, tag=repo_tag)
|
||||
found = 'found' if image else 'not found'
|
||||
self.log("image %s was %s" % (repo, found))
|
||||
|
||||
if not image or self.force_tag:
|
||||
image_name = name
|
||||
if not is_image_name_id(name) and tag and not name.endswith(':' + tag):
|
||||
image_name = "%s:%s" % (name, tag)
|
||||
self.log("tagging %s to %s:%s" % (image_name, repo, repo_tag))
|
||||
self.results['changed'] = True
|
||||
self.results['actions'].append("Tagged image %s to %s:%s" % (image_name, repo, repo_tag))
|
||||
if not self.check_mode:
|
||||
try:
|
||||
# Finding the image does not always work, especially running a localhost registry. In those
|
||||
# cases, if we don't set force=True, it errors.
|
||||
tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
|
||||
if not tag_status:
|
||||
raise Exception("Tag operation failed.")
|
||||
except Exception as exc:
|
||||
self.fail("Error: failed to tag image - %s" % to_native(exc))
|
||||
self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
|
||||
if image and image['Id'] == self.results['image']['Id']:
|
||||
self.results['changed'] = False
|
||||
|
||||
if push:
|
||||
self.push_image(repo, repo_tag)
|
||||
|
||||
@staticmethod
|
||||
def _extract_output_line(line, output):
|
||||
'''
|
||||
Extract text line from stream output and, if found, adds it to output.
|
||||
'''
|
||||
if 'stream' in line or 'status' in line:
|
||||
# Make sure we have a string (assuming that line['stream'] and
|
||||
# line['status'] are either not defined, falsish, or a string)
|
||||
text_line = line.get('stream') or line.get('status') or ''
|
||||
output.extend(text_line.splitlines())
|
||||
|
||||
def build_image(self):
|
||||
'''
|
||||
Build an image
|
||||
|
||||
:return: image dict
|
||||
'''
|
||||
params = dict(
|
||||
path=self.build_path,
|
||||
tag=self.name,
|
||||
rm=self.rm,
|
||||
nocache=self.nocache,
|
||||
timeout=self.http_timeout,
|
||||
pull=self.pull,
|
||||
forcerm=self.rm,
|
||||
dockerfile=self.dockerfile,
|
||||
decode=True,
|
||||
)
|
||||
if self.client.docker_py_version < LooseVersion('3.0.0'):
|
||||
params['stream'] = True
|
||||
|
||||
if self.tag:
|
||||
params['tag'] = "%s:%s" % (self.name, self.tag)
|
||||
if self.container_limits:
|
||||
params['container_limits'] = self.container_limits
|
||||
if self.buildargs:
|
||||
for key, value in self.buildargs.items():
|
||||
self.buildargs[key] = to_native(value)
|
||||
params['buildargs'] = self.buildargs
|
||||
if self.cache_from:
|
||||
params['cache_from'] = self.cache_from
|
||||
if self.network:
|
||||
params['network_mode'] = self.network
|
||||
if self.extra_hosts:
|
||||
params['extra_hosts'] = self.extra_hosts
|
||||
if self.use_config_proxy:
|
||||
params['use_config_proxy'] = self.use_config_proxy
|
||||
# Due to a bug in docker-py, it will crash if
|
||||
# use_config_proxy is True and buildargs is None
|
||||
if 'buildargs' not in params:
|
||||
params['buildargs'] = {}
|
||||
if self.target:
|
||||
params['target'] = self.target
|
||||
if self.build_platform is not None:
|
||||
params['platform'] = self.build_platform
|
||||
|
||||
build_output = []
|
||||
for line in self.client.build(**params):
|
||||
# line = json.loads(line)
|
||||
self.log(line, pretty_print=True)
|
||||
self._extract_output_line(line, build_output)
|
||||
|
||||
if line.get('error'):
|
||||
if line.get('errorDetail'):
|
||||
errorDetail = line.get('errorDetail')
|
||||
self.fail(
|
||||
"Error building %s - code: %s, message: %s, logs: %s" % (
|
||||
self.name,
|
||||
errorDetail.get('code'),
|
||||
errorDetail.get('message'),
|
||||
build_output))
|
||||
else:
|
||||
self.fail("Error building %s - message: %s, logs: %s" % (
|
||||
self.name, line.get('error'), build_output))
|
||||
|
||||
return {"stdout": "\n".join(build_output),
|
||||
"image": self.client.find_image(name=self.name, tag=self.tag)}
|
||||
|
||||
def load_image(self):
|
||||
'''
|
||||
Load an image from a .tar archive
|
||||
|
||||
:return: image dict
|
||||
'''
|
||||
# Load image(s) from file
|
||||
load_output = []
|
||||
has_output = False
|
||||
try:
|
||||
self.log("Opening image %s" % self.load_path)
|
||||
with open(self.load_path, 'rb') as image_tar:
|
||||
self.log("Loading image from %s" % self.load_path)
|
||||
output = self.client.load_image(image_tar)
|
||||
if output is not None:
|
||||
# Old versions of Docker SDK of Python (before version 2.5.0) do not return anything.
|
||||
# (See https://github.com/docker/docker-py/commit/7139e2d8f1ea82340417add02090bfaf7794f159)
|
||||
# Note that before that commit, something else than None was returned, but that was also
|
||||
# only introduced in a commit that first appeared in 2.5.0 (see
|
||||
# https://github.com/docker/docker-py/commit/9e793806ff79559c3bc591d8c52a3bbe3cdb7350).
|
||||
# So the above check works for every released version of Docker SDK for Python.
|
||||
has_output = True
|
||||
for line in output:
|
||||
self.log(line, pretty_print=True)
|
||||
self._extract_output_line(line, load_output)
|
||||
else:
|
||||
if LooseVersion(docker_version) < LooseVersion('2.5.0'):
|
||||
self.client.module.warn(
|
||||
'The installed version of the Docker SDK for Python does not return the loading results'
|
||||
' from the Docker daemon. Therefore, we cannot verify whether the expected image was'
|
||||
' loaded, whether multiple images where loaded, or whether the load actually succeeded.'
|
||||
' If you are not stuck with Python 2.6, *please* upgrade to a version newer than 2.5.0'
|
||||
' (2.5.0 was released in August 2017).'
|
||||
)
|
||||
else:
|
||||
self.client.module.warn(
|
||||
'The API version of your Docker daemon is < 1.23, which does not return the image'
|
||||
' loading result from the Docker daemon. Therefore, we cannot verify whether the'
|
||||
' expected image was loaded, whether multiple images where loaded, or whether the load'
|
||||
' actually succeeded. You should consider upgrading your Docker daemon.'
|
||||
)
|
||||
except EnvironmentError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
self.client.fail("Error opening image %s - %s" % (self.load_path, to_native(exc)))
|
||||
self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output))
|
||||
except Exception as exc:
|
||||
self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output))
|
||||
|
||||
# Collect loaded images
|
||||
if has_output:
|
||||
# We can only do this when we actually got some output from Docker daemon
|
||||
loaded_images = set()
|
||||
loaded_image_ids = set()
|
||||
for line in load_output:
|
||||
if line.startswith('Loaded image:'):
|
||||
loaded_images.add(line[len('Loaded image:'):].strip())
|
||||
if line.startswith('Loaded image ID:'):
|
||||
loaded_image_ids.add(line[len('Loaded image ID:'):].strip().lower())
|
||||
|
||||
if not loaded_images and not loaded_image_ids:
|
||||
self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
|
||||
|
||||
if is_image_name_id(self.name):
|
||||
expected_image = self.name.lower()
|
||||
found_image = expected_image not in loaded_image_ids
|
||||
else:
|
||||
expected_image = '%s:%s' % (self.name, self.tag)
|
||||
found_image = expected_image not in loaded_images
|
||||
if found_image:
|
||||
self.client.fail(
|
||||
"The archive did not contain image '%s'. Instead, found %s." % (
|
||||
expected_image,
|
||||
', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids)))),
|
||||
stdout='\n'.join(load_output))
|
||||
loaded_images.remove(expected_image)
|
||||
|
||||
if loaded_images:
|
||||
self.client.module.warn(
|
||||
"The archive contained more images than specified: %s" % (
|
||||
', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids))), ))
|
||||
|
||||
if is_image_name_id(self.name):
|
||||
return self.client.find_image_by_id(self.name, accept_missing_image=True)
|
||||
else:
|
||||
return self.client.find_image(self.name, self.tag)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
|
||||
build=dict(type='dict', options=dict(
|
||||
cache_from=dict(type='list', elements='str'),
|
||||
container_limits=dict(type='dict', options=dict(
|
||||
memory=dict(type='int'),
|
||||
memswap=dict(type='int'),
|
||||
cpushares=dict(type='int'),
|
||||
cpusetcpus=dict(type='str'),
|
||||
)),
|
||||
dockerfile=dict(type='str'),
|
||||
http_timeout=dict(type='int'),
|
||||
network=dict(type='str'),
|
||||
nocache=dict(type='bool', default=False),
|
||||
path=dict(type='path', required=True),
|
||||
pull=dict(type='bool', default=False),
|
||||
rm=dict(type='bool', default=True),
|
||||
args=dict(type='dict'),
|
||||
use_config_proxy=dict(type='bool'),
|
||||
target=dict(type='str'),
|
||||
etc_hosts=dict(type='dict'),
|
||||
platform=dict(type='str'),
|
||||
)),
|
||||
archive_path=dict(type='path'),
|
||||
force_source=dict(type='bool', default=False),
|
||||
force_absent=dict(type='bool', default=False),
|
||||
force_tag=dict(type='bool', default=False),
|
||||
load_path=dict(type='path'),
|
||||
name=dict(type='str', required=True),
|
||||
pull=dict(type='dict', options=dict(
|
||||
platform=dict(type='str'),
|
||||
)),
|
||||
push=dict(type='bool', default=False),
|
||||
repository=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
tag=dict(type='str', default='latest'),
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['source']),
|
||||
('source', 'build', ['build']),
|
||||
('source', 'load', ['load_path']),
|
||||
]
|
||||
|
||||
def detect_build_cache_from(client):
|
||||
return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None
|
||||
|
||||
def detect_build_network(client):
|
||||
return client.module.params['build'] and client.module.params['build'].get('network') is not None
|
||||
|
||||
def detect_build_target(client):
|
||||
return client.module.params['build'] and client.module.params['build'].get('target') is not None
|
||||
|
||||
def detect_use_config_proxy(client):
|
||||
return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None
|
||||
|
||||
def detect_etc_hosts(client):
|
||||
return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts'))
|
||||
|
||||
def detect_build_platform(client):
|
||||
return client.module.params['build'] and client.module.params['build'].get('platform') is not None
|
||||
|
||||
def detect_pull_platform(client):
|
||||
return client.module.params['pull'] and client.module.params['pull'].get('platform') is not None
|
||||
|
||||
option_minimal_versions = dict()
|
||||
option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from)
|
||||
option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network)
|
||||
option_minimal_versions["build.target"] = dict(docker_py_version='2.4.0', detect_usage=detect_build_target)
|
||||
option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy)
|
||||
option_minimal_versions["build.etc_hosts"] = dict(docker_py_version='2.6.0', docker_api_version='1.27', detect_usage=detect_etc_hosts)
|
||||
option_minimal_versions["build.platform"] = dict(docker_py_version='3.0.0', docker_api_version='1.32', detect_usage=detect_build_platform)
|
||||
option_minimal_versions["pull.platform"] = dict(docker_py_version='3.0.0', docker_api_version='1.32', detect_usage=detect_pull_platform)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
required_if=required_if,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.8.0',
|
||||
min_docker_api_version='1.20',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
if not is_valid_tag(client.module.params['tag'], allow_empty=True):
|
||||
client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
|
||||
|
||||
if client.module.params['source'] == 'build':
|
||||
if not client.module.params['build'] or not client.module.params['build'].get('path'):
|
||||
client.fail('If "source" is set to "build", the "build.path" option must be specified.')
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
actions=[],
|
||||
image={}
|
||||
)
|
||||
|
||||
ImageManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,273 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_image_info
|
||||
|
||||
short_description: Inspect docker images
|
||||
|
||||
|
||||
description:
|
||||
- Provide one or more image names, and the module will inspect each, returning an array of inspection results.
|
||||
- If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
|
||||
locally, you can call the module with the image name, then check whether the result list is empty (image does not
|
||||
exist) or has one element (the image exists locally).
|
||||
- The module will not attempt to pull images from registries. Use M(community.docker.docker_image) with I(source) set to C(pull)
|
||||
to ensure an image is pulled.
|
||||
|
||||
notes:
|
||||
- This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
|
||||
where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
|
||||
image IDs can be used.
|
||||
- If no name is provided, a list of all images will be returned.
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.20"
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Inspect a single image
|
||||
community.docker.docker_image_info:
|
||||
name: pacur/centos-7
|
||||
|
||||
- name: Inspect multiple images
|
||||
community.docker.docker_image_info:
|
||||
name:
|
||||
- pacur/centos-7
|
||||
- sinatra
|
||||
register: result
|
||||
|
||||
- name: Make sure that both images pacur/centos-7 and sinatra exist locally
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- result.images | length == 2
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
images:
|
||||
description:
|
||||
- Inspection results for the selected images.
|
||||
- The list only contains inspection results of images existing locally.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
sample: [
|
||||
{
|
||||
"Architecture": "amd64",
|
||||
"Author": "",
|
||||
"Comment": "",
|
||||
"Config": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": [
|
||||
"/etc/docker/registry/config.yml"
|
||||
],
|
||||
"Domainname": "",
|
||||
"Entrypoint": [
|
||||
"/bin/registry"
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"ExposedPorts": {
|
||||
"5000/tcp": {}
|
||||
},
|
||||
"Hostname": "e5c68db50333",
|
||||
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
|
||||
"Labels": {},
|
||||
"OnBuild": [],
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": {
|
||||
"/var/lib/registry": {}
|
||||
},
|
||||
"WorkingDir": ""
|
||||
},
|
||||
"Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
|
||||
"ContainerConfig": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
'#(nop) CMD ["/etc/docker/registry/config.yml"]'
|
||||
],
|
||||
"Domainname": "",
|
||||
"Entrypoint": [
|
||||
"/bin/registry"
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"ExposedPorts": {
|
||||
"5000/tcp": {}
|
||||
},
|
||||
"Hostname": "e5c68db50333",
|
||||
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
|
||||
"Labels": {},
|
||||
"OnBuild": [],
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": {
|
||||
"/var/lib/registry": {}
|
||||
},
|
||||
"WorkingDir": ""
|
||||
},
|
||||
"Created": "2016-03-08T21:08:15.399680378Z",
|
||||
"DockerVersion": "1.9.1",
|
||||
"GraphDriver": {
|
||||
"Data": null,
|
||||
"Name": "aufs"
|
||||
},
|
||||
"Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
|
||||
"Name": "registry:2",
|
||||
"Os": "linux",
|
||||
"Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
|
||||
"RepoDigests": [],
|
||||
"RepoTags": [
|
||||
"registry:2"
|
||||
],
|
||||
"Size": 0,
|
||||
"VirtualSize": 165808884
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from docker import utils
|
||||
from docker.errors import DockerException, NotFound
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
class ImageManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(ImageManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.name = self.client.module.params.get('name')
|
||||
self.log("Gathering facts for images: %s" % (str(self.name)))
|
||||
|
||||
if self.name:
|
||||
self.results['images'] = self.get_facts()
|
||||
else:
|
||||
self.results['images'] = self.get_all_images()
|
||||
|
||||
def fail(self, msg):
|
||||
self.client.fail(msg)
|
||||
|
||||
def get_facts(self):
|
||||
'''
|
||||
Lookup and inspect each image name found in the names parameter.
|
||||
|
||||
:returns array of image dictionaries
|
||||
'''
|
||||
|
||||
results = []
|
||||
|
||||
names = self.name
|
||||
if not isinstance(names, list):
|
||||
names = [names]
|
||||
|
||||
for name in names:
|
||||
if is_image_name_id(name):
|
||||
self.log('Fetching image %s (ID)' % (name))
|
||||
image = self.client.find_image_by_id(name, accept_missing_image=True)
|
||||
else:
|
||||
repository, tag = utils.parse_repository_tag(name)
|
||||
if not tag:
|
||||
tag = 'latest'
|
||||
self.log('Fetching image %s:%s' % (repository, tag))
|
||||
image = self.client.find_image(name=repository, tag=tag)
|
||||
if image:
|
||||
results.append(image)
|
||||
return results
|
||||
|
||||
def get_all_images(self):
|
||||
results = []
|
||||
images = self.client.images()
|
||||
for image in images:
|
||||
try:
|
||||
inspection = self.client.inspect_image(image['Id'])
|
||||
except NotFound:
|
||||
pass
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting image %s - %s" % (image['Id'], to_native(exc)))
|
||||
results.append(inspection)
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='list', elements='str'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_api_version='1.20',
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
images=[]
|
||||
)
|
||||
|
||||
ImageManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,190 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_image_load
|
||||
|
||||
short_description: Load docker image(s) from archives
|
||||
|
||||
version_added: 1.3.0
|
||||
|
||||
description:
|
||||
- Load one or multiple Docker images from a C(.tar) archive, and return information on
|
||||
the loaded image(s).
|
||||
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- The path to the C(.tar) archive to load Docker image(s) from.
|
||||
type: path
|
||||
required: true
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_2_documentation
|
||||
|
||||
notes:
|
||||
- Does not support C(check_mode).
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.5.0"
|
||||
- "Docker API >= 1.23"
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Load all image(s) from the given tar file
|
||||
community.docker.docker_image_load:
|
||||
path: /path/to/images.tar
|
||||
register: result
|
||||
|
||||
- name: Print the loaded image names
|
||||
ansible.builtin.debug:
|
||||
msg: "Loaded the following images: {{ result.image_names | join(', ') }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
image_names:
|
||||
description: List of image names and IDs loaded from the archive.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample:
|
||||
- 'hello-world:latest'
|
||||
- 'sha256:e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9'
|
||||
images:
|
||||
description: Image inspection results for the loaded images.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
sample: []
|
||||
'''
|
||||
|
||||
import errno
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
class ImageManager(DockerBaseClass):
|
||||
def __init__(self, client, results):
|
||||
super(ImageManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
parameters = self.client.module.params
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.path = parameters['path']
|
||||
|
||||
self.load_images()
|
||||
|
||||
@staticmethod
|
||||
def _extract_output_line(line, output):
|
||||
'''
|
||||
Extract text line from stream output and, if found, adds it to output.
|
||||
'''
|
||||
if 'stream' in line or 'status' in line:
|
||||
# Make sure we have a string (assuming that line['stream'] and
|
||||
# line['status'] are either not defined, falsish, or a string)
|
||||
text_line = line.get('stream') or line.get('status') or ''
|
||||
output.extend(text_line.splitlines())
|
||||
|
||||
def load_images(self):
|
||||
'''
|
||||
Load images from a .tar archive
|
||||
'''
|
||||
# Load image(s) from file
|
||||
load_output = []
|
||||
try:
|
||||
self.log("Opening image {0}".format(self.path))
|
||||
with open(self.path, 'rb') as image_tar:
|
||||
self.log("Loading images from {0}".format(self.path))
|
||||
for line in self.client.load_image(image_tar):
|
||||
self.log(line, pretty_print=True)
|
||||
self._extract_output_line(line, load_output)
|
||||
except EnvironmentError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
self.client.fail("Error opening archive {0} - {1}".format(self.path, to_native(exc)))
|
||||
self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
|
||||
except Exception as exc:
|
||||
self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
|
||||
|
||||
# Collect loaded images
|
||||
loaded_images = []
|
||||
for line in load_output:
|
||||
if line.startswith('Loaded image:'):
|
||||
loaded_images.append(line[len('Loaded image:'):].strip())
|
||||
if line.startswith('Loaded image ID:'):
|
||||
loaded_images.append(line[len('Loaded image ID:'):].strip())
|
||||
|
||||
if not loaded_images:
|
||||
self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
|
||||
|
||||
images = []
|
||||
for image_name in loaded_images:
|
||||
if is_image_name_id(image_name):
|
||||
images.append(self.client.find_image_by_id(image_name))
|
||||
elif ':' in image_name:
|
||||
image_name, tag = image_name.rsplit(':', 1)
|
||||
images.append(self.client.find_image(image_name, tag))
|
||||
else:
|
||||
self.client.module.warn('Image name "{0}" is neither ID nor has a tag'.format(image_name))
|
||||
|
||||
self.results['image_names'] = loaded_images
|
||||
self.results['images'] = images
|
||||
self.results['changed'] = True
|
||||
self.results['stdout'] = '\n'.join(load_output)
|
||||
|
||||
|
||||
def main():
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=dict(
|
||||
path=dict(type='path', required=True),
|
||||
),
|
||||
supports_check_mode=False,
|
||||
min_docker_version='2.5.0',
|
||||
min_docker_api_version='1.23',
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
image_names=[],
|
||||
images=[],
|
||||
)
|
||||
|
||||
ImageManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,474 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
|
||||
# Chris Houseknecht, <house@redhat.com>
|
||||
# James Tanner, <jtanner@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_login
|
||||
short_description: Log into a Docker registry.
|
||||
description:
|
||||
- Provides functionality similar to the C(docker login) command.
|
||||
- Authenticate with a docker registry and add the credentials to your local Docker config file respectively the
|
||||
credentials store associated to the registry. Adding the credentials to the config files resp. the credential
|
||||
store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI
|
||||
and Docker SDK for Python without needing to provide credentials.
|
||||
- Running in check mode will perform the authentication without updating the config file.
|
||||
options:
|
||||
registry_url:
|
||||
description:
|
||||
- The registry URL.
|
||||
type: str
|
||||
default: "https://index.docker.io/v1/"
|
||||
aliases:
|
||||
- registry
|
||||
- url
|
||||
username:
|
||||
description:
|
||||
- The username for the registry account.
|
||||
- Required when I(state) is C(present).
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The plaintext password for the registry account.
|
||||
- Required when I(state) is C(present).
|
||||
type: str
|
||||
reauthorize:
|
||||
description:
|
||||
- Refresh existing authentication found in the configuration file.
|
||||
type: bool
|
||||
default: no
|
||||
aliases:
|
||||
- reauth
|
||||
config_path:
|
||||
description:
|
||||
- Custom path to the Docker CLI configuration file.
|
||||
type: path
|
||||
default: ~/.docker/config.json
|
||||
aliases:
|
||||
- dockercfg_path
|
||||
state:
|
||||
description:
|
||||
- This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
|
||||
- To logout you only need the registry server, which defaults to DockerHub.
|
||||
- Before 2.1 you could ONLY log in.
|
||||
- Docker does not support 'logout' with a custom config file.
|
||||
type: str
|
||||
default: 'present'
|
||||
choices: ['present', 'absent']
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Python bindings for docker credentials store API >= 0.2.1
|
||||
(use L(docker-pycreds,https://pypi.org/project/docker-pycreds/) when using Docker SDK for Python < 4.0.0)"
|
||||
- "Docker API >= 1.20"
|
||||
author:
|
||||
- Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Log into DockerHub
|
||||
community.docker.docker_login:
|
||||
username: docker
|
||||
password: rekcod
|
||||
|
||||
- name: Log into private registry and force re-authorization
|
||||
community.docker.docker_login:
|
||||
registry_url: your.private.registry.io
|
||||
username: yourself
|
||||
password: secrets3
|
||||
reauthorize: yes
|
||||
|
||||
- name: Log into DockerHub using a custom config file
|
||||
community.docker.docker_login:
|
||||
username: docker
|
||||
password: rekcod
|
||||
config_path: /tmp/.mydockercfg
|
||||
|
||||
- name: Log out of DockerHub
|
||||
community.docker.docker_login:
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
login_results:
|
||||
description: Results from the login.
|
||||
returned: when state='present'
|
||||
type: dict
|
||||
sample: {
|
||||
"serveraddress": "localhost:5000",
|
||||
"username": "testuser"
|
||||
}
|
||||
'''
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
from docker import auth
|
||||
|
||||
# Earlier versions of docker/docker-py put decode_auth
|
||||
# in docker.auth.auth instead of docker.auth
|
||||
if hasattr(auth, 'decode_auth'):
|
||||
from docker.auth import decode_auth
|
||||
else:
|
||||
from docker.auth.auth import decode_auth
|
||||
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
HAS_DOCKER_PY,
|
||||
DEFAULT_DOCKER_REGISTRY,
|
||||
DockerBaseClass,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
NEEDS_DOCKER_PYCREDS = False
|
||||
|
||||
# Early versions of docker/docker-py rely on docker-pycreds for
|
||||
# the credential store api.
|
||||
if HAS_DOCKER_PY:
|
||||
try:
|
||||
from docker.credentials.errors import StoreError, CredentialsNotFound
|
||||
from docker.credentials import Store
|
||||
except ImportError:
|
||||
try:
|
||||
from dockerpycreds.errors import StoreError, CredentialsNotFound
|
||||
from dockerpycreds.store import Store
|
||||
except ImportError as exc:
|
||||
HAS_DOCKER_ERROR = str(exc)
|
||||
NEEDS_DOCKER_PYCREDS = True
|
||||
|
||||
|
||||
if NEEDS_DOCKER_PYCREDS:
|
||||
# docker-pycreds missing, so we need to create some place holder classes
|
||||
# to allow instantiation.
|
||||
|
||||
class StoreError(Exception):
|
||||
pass
|
||||
|
||||
class CredentialsNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DockerFileStore(object):
|
||||
'''
|
||||
A custom credential store class that implements only the functionality we need to
|
||||
update the docker config file when no credential helpers is provided.
|
||||
'''
|
||||
|
||||
program = "<legacy config>"
|
||||
|
||||
def __init__(self, config_path):
|
||||
self._config_path = config_path
|
||||
|
||||
# Make sure we have a minimal config if none is available.
|
||||
self._config = dict(
|
||||
auths=dict()
|
||||
)
|
||||
|
||||
try:
|
||||
# Attempt to read the existing config.
|
||||
with open(self._config_path, "r") as f:
|
||||
config = json.load(f)
|
||||
except (ValueError, IOError):
|
||||
# No config found or an invalid config found so we'll ignore it.
|
||||
config = dict()
|
||||
|
||||
# Update our internal config with what ever was loaded.
|
||||
self._config.update(config)
|
||||
|
||||
@property
|
||||
def config_path(self):
|
||||
'''
|
||||
Return the config path configured in this DockerFileStore instance.
|
||||
'''
|
||||
|
||||
return self._config_path
|
||||
|
||||
def get(self, server):
|
||||
'''
|
||||
Retrieve credentials for `server` if there are any in the config file.
|
||||
Otherwise raise a `StoreError`
|
||||
'''
|
||||
|
||||
server_creds = self._config['auths'].get(server)
|
||||
if not server_creds:
|
||||
raise CredentialsNotFound('No matching credentials')
|
||||
|
||||
(username, password) = decode_auth(server_creds['auth'])
|
||||
|
||||
return dict(
|
||||
Username=username,
|
||||
Secret=password
|
||||
)
|
||||
|
||||
def _write(self):
|
||||
'''
|
||||
Write config back out to disk.
|
||||
'''
|
||||
# Make sure directory exists
|
||||
dir = os.path.dirname(self._config_path)
|
||||
if not os.path.exists(dir):
|
||||
os.makedirs(dir)
|
||||
# Write config; make sure it has permissions 0x600
|
||||
content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
|
||||
f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
|
||||
try:
|
||||
os.write(f, content)
|
||||
finally:
|
||||
os.close(f)
|
||||
|
||||
def store(self, server, username, password):
|
||||
'''
|
||||
Add a credentials for `server` to the current configuration.
|
||||
'''
|
||||
|
||||
b64auth = base64.b64encode(
|
||||
to_bytes(username) + b':' + to_bytes(password)
|
||||
)
|
||||
auth = to_text(b64auth)
|
||||
|
||||
# build up the auth structure
|
||||
if 'auths' not in self._config:
|
||||
self._config['auths'] = dict()
|
||||
|
||||
self._config['auths'][server] = dict(
|
||||
auth=auth
|
||||
)
|
||||
|
||||
self._write()
|
||||
|
||||
def erase(self, server):
|
||||
'''
|
||||
Remove credentials for the given server from the configuration.
|
||||
'''
|
||||
|
||||
if 'auths' in self._config and server in self._config['auths']:
|
||||
self._config['auths'].pop(server)
|
||||
self._write()
|
||||
|
||||
|
||||
class LoginManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(LoginManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
parameters = self.client.module.params
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.registry_url = parameters.get('registry_url')
|
||||
self.username = parameters.get('username')
|
||||
self.password = parameters.get('password')
|
||||
self.reauthorize = parameters.get('reauthorize')
|
||||
self.config_path = parameters.get('config_path')
|
||||
self.state = parameters.get('state')
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Do the actuall work of this task here. This allows instantiation for partial
|
||||
testing.
|
||||
'''
|
||||
|
||||
if self.state == 'present':
|
||||
self.login()
|
||||
else:
|
||||
self.logout()
|
||||
|
||||
def fail(self, msg):
|
||||
self.client.fail(msg)
|
||||
|
||||
def login(self):
|
||||
'''
|
||||
Log into the registry with provided username/password. On success update the config
|
||||
file with the new authorization.
|
||||
|
||||
:return: None
|
||||
'''
|
||||
|
||||
self.results['actions'].append("Logged into %s" % (self.registry_url))
|
||||
self.log("Log into %s with username %s" % (self.registry_url, self.username))
|
||||
try:
|
||||
response = self.client.login(
|
||||
self.username,
|
||||
password=self.password,
|
||||
registry=self.registry_url,
|
||||
reauth=self.reauthorize,
|
||||
dockercfg_path=self.config_path
|
||||
)
|
||||
except Exception as exc:
|
||||
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
|
||||
|
||||
# If user is already logged in, then response contains password for user
|
||||
if 'password' in response:
|
||||
# This returns correct password if user is logged in and wrong password is given.
|
||||
# So if it returns another password as we passed, and the user didn't request to
|
||||
# reauthorize, still do it.
|
||||
if not self.reauthorize and response['password'] != self.password:
|
||||
try:
|
||||
response = self.client.login(
|
||||
self.username,
|
||||
password=self.password,
|
||||
registry=self.registry_url,
|
||||
reauth=True,
|
||||
dockercfg_path=self.config_path
|
||||
)
|
||||
except Exception as exc:
|
||||
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
|
||||
response.pop('password', None)
|
||||
self.results['login_result'] = response
|
||||
|
||||
self.update_credentials()
|
||||
|
||||
def logout(self):
|
||||
'''
|
||||
Log out of the registry. On success update the config file.
|
||||
|
||||
:return: None
|
||||
'''
|
||||
|
||||
# Get the configuration store.
|
||||
store = self.get_credential_store_instance(self.registry_url, self.config_path)
|
||||
|
||||
try:
|
||||
current = store.get(self.registry_url)
|
||||
except CredentialsNotFound:
|
||||
# get raises an exception on not found.
|
||||
self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
|
||||
self.results['changed'] = False
|
||||
return
|
||||
|
||||
if not self.check_mode:
|
||||
store.erase(self.registry_url)
|
||||
self.results['changed'] = True
|
||||
|
||||
def update_credentials(self):
|
||||
'''
|
||||
If the authorization is not stored attempt to store authorization values via
|
||||
the appropriate credential helper or to the config file.
|
||||
|
||||
:return: None
|
||||
'''
|
||||
|
||||
# Check to see if credentials already exist.
|
||||
store = self.get_credential_store_instance(self.registry_url, self.config_path)
|
||||
|
||||
try:
|
||||
current = store.get(self.registry_url)
|
||||
except CredentialsNotFound:
|
||||
# get raises an exception on not found.
|
||||
current = dict(
|
||||
Username='',
|
||||
Secret=''
|
||||
)
|
||||
|
||||
if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
|
||||
if not self.check_mode:
|
||||
store.store(self.registry_url, self.username, self.password)
|
||||
self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
|
||||
self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
|
||||
store.program, self.registry_url))
|
||||
self.results['changed'] = True
|
||||
|
||||
def get_credential_store_instance(self, registry, dockercfg_path):
|
||||
'''
|
||||
Return an instance of docker.credentials.Store used by the given registry.
|
||||
|
||||
:return: A Store or None
|
||||
:rtype: Union[docker.credentials.Store, NoneType]
|
||||
'''
|
||||
|
||||
# Older versions of docker-py don't have this feature.
|
||||
try:
|
||||
credstore_env = self.client.credstore_env
|
||||
except AttributeError:
|
||||
credstore_env = None
|
||||
|
||||
config = auth.load_config(config_path=dockercfg_path)
|
||||
|
||||
if hasattr(auth, 'get_credential_store'):
|
||||
store_name = auth.get_credential_store(config, registry)
|
||||
elif 'credsStore' in config:
|
||||
store_name = config['credsStore']
|
||||
else:
|
||||
store_name = None
|
||||
|
||||
# Make sure that there is a credential helper before trying to instantiate a
|
||||
# Store object.
|
||||
if store_name:
|
||||
self.log("Found credential store %s" % store_name)
|
||||
return Store(store_name, environment=credstore_env)
|
||||
|
||||
return DockerFileStore(dockercfg_path)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = dict(
|
||||
registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
|
||||
username=dict(type='str'),
|
||||
password=dict(type='str', no_log=True),
|
||||
reauthorize=dict(type='bool', default=False, aliases=['reauth']),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['username', 'password']),
|
||||
]
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
min_docker_api_version='1.20',
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
actions=[],
|
||||
login_result={}
|
||||
)
|
||||
|
||||
manager = LoginManager(client, results)
|
||||
manager.run()
|
||||
|
||||
if 'actions' in results:
|
||||
del results['actions']
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,676 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: docker_network
|
||||
short_description: Manage Docker networks
|
||||
description:
|
||||
- Create/remove Docker networks and connect containers to them.
|
||||
- Performs largely the same function as the C(docker network) CLI subcommand.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the network to operate on.
|
||||
type: str
|
||||
required: yes
|
||||
aliases:
|
||||
- network_name
|
||||
|
||||
connected:
|
||||
description:
|
||||
- List of container names or container IDs to connect to a network.
|
||||
- Please note that the module only makes sure that these containers are connected to the network,
|
||||
but does not care about connection options. If you rely on specific IP addresses etc., use the
|
||||
M(community.docker.docker_container) module to ensure your containers are correctly connected to this network.
|
||||
type: list
|
||||
elements: str
|
||||
aliases:
|
||||
- containers
|
||||
|
||||
driver:
|
||||
description:
|
||||
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
|
||||
type: str
|
||||
default: bridge
|
||||
|
||||
driver_options:
|
||||
description:
|
||||
- Dictionary of network settings. Consult docker docs for valid options and values.
|
||||
type: dict
|
||||
|
||||
force:
|
||||
description:
|
||||
- With state C(absent) forces disconnecting all containers from the
|
||||
network prior to deleting the network. With state C(present) will
|
||||
disconnect all containers, delete the network and re-create the
|
||||
network.
|
||||
- This option is required if you have changed the IPAM or driver options
|
||||
and want an existing network to be updated to use the new options.
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
appends:
|
||||
description:
|
||||
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
|
||||
- Use I(appends) to leave existing containers connected.
|
||||
type: bool
|
||||
default: no
|
||||
aliases:
|
||||
- incremental
|
||||
|
||||
enable_ipv6:
|
||||
description:
|
||||
- Enable IPv6 networking.
|
||||
type: bool
|
||||
|
||||
ipam_driver:
|
||||
description:
|
||||
- Specify an IPAM driver.
|
||||
type: str
|
||||
|
||||
ipam_driver_options:
|
||||
description:
|
||||
- Dictionary of IPAM driver options.
|
||||
type: dict
|
||||
|
||||
ipam_config:
|
||||
description:
|
||||
- List of IPAM config blocks. Consult
|
||||
L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
|
||||
Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
subnet:
|
||||
description:
|
||||
- IP subset in CIDR notation.
|
||||
type: str
|
||||
iprange:
|
||||
description:
|
||||
- IP address range in CIDR notation.
|
||||
type: str
|
||||
gateway:
|
||||
description:
|
||||
- IP gateway address.
|
||||
type: str
|
||||
aux_addresses:
|
||||
description:
|
||||
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
|
||||
type: dict
|
||||
|
||||
state:
|
||||
description:
|
||||
- C(absent) deletes the network. If a network has connected containers, it
|
||||
cannot be deleted. Use the I(force) option to disconnect all containers
|
||||
and delete the network.
|
||||
- C(present) creates the network, if it does not already exist with the
|
||||
specified parameters, and connects the list of containers provided via
|
||||
the connected parameter. Containers not on the list will be disconnected.
|
||||
An empty list will leave no containers connected to the network. Use the
|
||||
I(appends) option to leave existing containers connected. Use the I(force)
|
||||
options to force re-creation of the network.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
internal:
|
||||
description:
|
||||
- Restrict external access to the network.
|
||||
type: bool
|
||||
|
||||
labels:
|
||||
description:
|
||||
- Dictionary of labels.
|
||||
type: dict
|
||||
|
||||
scope:
|
||||
description:
|
||||
- Specify the network's scope.
|
||||
type: str
|
||||
choices:
|
||||
- local
|
||||
- global
|
||||
- swarm
|
||||
|
||||
attachable:
|
||||
description:
|
||||
- If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
|
||||
type: bool
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
notes:
|
||||
- When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
|
||||
It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
|
||||
connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
|
||||
network, loop the M(community.docker.docker_container) module to loop over your containers to make sure they are connected properly.
|
||||
- The module does not support Docker Swarm. This means that it will not try to disconnect or reconnect services. If services are connected to the
|
||||
network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
|
||||
fail as well.
|
||||
|
||||
author:
|
||||
- "Ben Keith (@keitwb)"
|
||||
- "Chris Houseknecht (@chouseknecht)"
|
||||
- "Dave Bendit (@DBendit)"
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "The docker server >= 1.10.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a network
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
|
||||
- name: Remove all but selected list of containers
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
connected:
|
||||
- container_a
|
||||
- container_b
|
||||
- container_c
|
||||
|
||||
- name: Remove a single container
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
connected: "{{ fulllist|difference(['container_a']) }}"
|
||||
|
||||
- name: Add a container to a network, leaving existing containers connected
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
connected:
|
||||
- container_a
|
||||
appends: yes
|
||||
|
||||
- name: Create a network with driver options
|
||||
community.docker.docker_network:
|
||||
name: network_two
|
||||
driver_options:
|
||||
com.docker.network.bridge.name: net2
|
||||
|
||||
- name: Create a network with custom IPAM config
|
||||
community.docker.docker_network:
|
||||
name: network_three
|
||||
ipam_config:
|
||||
- subnet: 172.23.27.0/24
|
||||
gateway: 172.23.27.2
|
||||
iprange: 172.23.27.0/26
|
||||
aux_addresses:
|
||||
host1: 172.23.27.3
|
||||
host2: 172.23.27.4
|
||||
|
||||
- name: Create a network with labels
|
||||
community.docker.docker_network:
|
||||
name: network_four
|
||||
labels:
|
||||
key1: value1
|
||||
key2: value2
|
||||
|
||||
- name: Create a network with IPv6 IPAM config
|
||||
community.docker.docker_network:
|
||||
name: network_ipv6_one
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: fdd1:ac8c:0557:7ce1::/64
|
||||
|
||||
- name: Create a network with IPv6 and custom IPv4 IPAM config
|
||||
community.docker.docker_network:
|
||||
name: network_ipv6_two
|
||||
enable_ipv6: yes
|
||||
ipam_config:
|
||||
- subnet: 172.24.27.0/24
|
||||
- subnet: fdd1:ac8c:0557:7ce2::/64
|
||||
|
||||
- name: Delete a network, disconnecting all containers
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
state: absent
|
||||
force: yes
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
network:
|
||||
description:
|
||||
- Network inspection results for the affected network.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
'''
|
||||
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
docker_version,
|
||||
DifferenceTracker,
|
||||
clean_dict_booleans_for_docker_api,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
try:
|
||||
from docker import utils
|
||||
from docker.errors import DockerException
|
||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
from docker.types import IPAMPool, IPAMConfig
|
||||
except Exception:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self, client):
|
||||
super(TaskParameters, self).__init__()
|
||||
self.client = client
|
||||
|
||||
self.name = None
|
||||
self.connected = None
|
||||
self.driver = None
|
||||
self.driver_options = None
|
||||
self.ipam_driver = None
|
||||
self.ipam_driver_options = None
|
||||
self.ipam_config = None
|
||||
self.appends = None
|
||||
self.force = None
|
||||
self.internal = None
|
||||
self.labels = None
|
||||
self.debug = None
|
||||
self.enable_ipv6 = None
|
||||
self.scope = None
|
||||
self.attachable = None
|
||||
|
||||
for key, value in client.module.params.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
def container_names_in_network(network):
|
||||
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
|
||||
|
||||
|
||||
CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
|
||||
CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
|
||||
|
||||
|
||||
def validate_cidr(cidr):
|
||||
"""Validate CIDR. Return IP version of a CIDR string on success.
|
||||
|
||||
:param cidr: Valid CIDR
|
||||
:type cidr: str
|
||||
:return: ``ipv4`` or ``ipv6``
|
||||
:rtype: str
|
||||
:raises ValueError: If ``cidr`` is not a valid CIDR
|
||||
"""
|
||||
if CIDR_IPV4.match(cidr):
|
||||
return 'ipv4'
|
||||
elif CIDR_IPV6.match(cidr):
|
||||
return 'ipv6'
|
||||
raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
|
||||
|
||||
|
||||
def normalize_ipam_config_key(key):
|
||||
"""Normalizes IPAM config keys returned by Docker API to match Ansible keys.
|
||||
|
||||
:param key: Docker API key
|
||||
:type key: str
|
||||
:return Ansible module key
|
||||
:rtype str
|
||||
"""
|
||||
special_cases = {
|
||||
'AuxiliaryAddresses': 'aux_addresses'
|
||||
}
|
||||
return special_cases.get(key, key.lower())
|
||||
|
||||
|
||||
def dicts_are_essentially_equal(a, b):
|
||||
"""Make sure that a is a subset of b, where None entries of a are ignored."""
|
||||
for k, v in a.items():
|
||||
if v is None:
|
||||
continue
|
||||
if b.get(k) != v:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class DockerNetworkManager(object):
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
self.parameters = TaskParameters(client)
|
||||
self.check_mode = self.client.check_mode
|
||||
self.results = {
|
||||
u'changed': False,
|
||||
u'actions': []
|
||||
}
|
||||
self.diff = self.client.module._diff
|
||||
self.diff_tracker = DifferenceTracker()
|
||||
self.diff_result = dict()
|
||||
|
||||
self.existing_network = self.get_existing_network()
|
||||
|
||||
if not self.parameters.connected and self.existing_network:
|
||||
self.parameters.connected = container_names_in_network(self.existing_network)
|
||||
|
||||
if self.parameters.ipam_config:
|
||||
try:
|
||||
for ipam_config in self.parameters.ipam_config:
|
||||
validate_cidr(ipam_config['subnet'])
|
||||
except ValueError as e:
|
||||
self.client.fail(to_native(e))
|
||||
|
||||
if self.parameters.driver_options:
|
||||
self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
|
||||
|
||||
state = self.parameters.state
|
||||
if state == 'present':
|
||||
self.present()
|
||||
elif state == 'absent':
|
||||
self.absent()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
if self.diff:
|
||||
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
|
||||
self.results['diff'] = self.diff_result
|
||||
|
||||
def get_existing_network(self):
|
||||
return self.client.get_network(name=self.parameters.name)
|
||||
|
||||
def has_different_config(self, net):
|
||||
'''
|
||||
Evaluates an existing network and returns a tuple containing a boolean
|
||||
indicating if the configuration is different and a list of differences.
|
||||
|
||||
:param net: the inspection output for an existing network
|
||||
:return: (bool, list)
|
||||
'''
|
||||
differences = DifferenceTracker()
|
||||
if self.parameters.driver and self.parameters.driver != net['Driver']:
|
||||
differences.add('driver',
|
||||
parameter=self.parameters.driver,
|
||||
active=net['Driver'])
|
||||
if self.parameters.driver_options:
|
||||
if not net.get('Options'):
|
||||
differences.add('driver_options',
|
||||
parameter=self.parameters.driver_options,
|
||||
active=net.get('Options'))
|
||||
else:
|
||||
for key, value in self.parameters.driver_options.items():
|
||||
if not (key in net['Options']) or value != net['Options'][key]:
|
||||
differences.add('driver_options.%s' % key,
|
||||
parameter=value,
|
||||
active=net['Options'].get(key))
|
||||
|
||||
if self.parameters.ipam_driver:
|
||||
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
|
||||
differences.add('ipam_driver',
|
||||
parameter=self.parameters.ipam_driver,
|
||||
active=net.get('IPAM'))
|
||||
|
||||
if self.parameters.ipam_driver_options is not None:
|
||||
ipam_driver_options = net['IPAM'].get('Options') or {}
|
||||
if ipam_driver_options != self.parameters.ipam_driver_options:
|
||||
differences.add('ipam_driver_options',
|
||||
parameter=self.parameters.ipam_driver_options,
|
||||
active=ipam_driver_options)
|
||||
|
||||
if self.parameters.ipam_config is not None and self.parameters.ipam_config:
|
||||
if not net.get('IPAM') or not net['IPAM']['Config']:
|
||||
differences.add('ipam_config',
|
||||
parameter=self.parameters.ipam_config,
|
||||
active=net.get('IPAM', {}).get('Config'))
|
||||
else:
|
||||
# Put network's IPAM config into the same format as module's IPAM config
|
||||
net_ipam_configs = []
|
||||
for net_ipam_config in net['IPAM']['Config']:
|
||||
config = dict()
|
||||
for k, v in net_ipam_config.items():
|
||||
config[normalize_ipam_config_key(k)] = v
|
||||
net_ipam_configs.append(config)
|
||||
# Compare lists of dicts as sets of dicts
|
||||
for idx, ipam_config in enumerate(self.parameters.ipam_config):
|
||||
net_config = dict()
|
||||
for net_ipam_config in net_ipam_configs:
|
||||
if dicts_are_essentially_equal(ipam_config, net_ipam_config):
|
||||
net_config = net_ipam_config
|
||||
break
|
||||
for key, value in ipam_config.items():
|
||||
if value is None:
|
||||
# due to recursive argument_spec, all keys are always present
|
||||
# (but have default value None if not specified)
|
||||
continue
|
||||
if value != net_config.get(key):
|
||||
differences.add('ipam_config[%s].%s' % (idx, key),
|
||||
parameter=value,
|
||||
active=net_config.get(key))
|
||||
|
||||
if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
|
||||
differences.add('enable_ipv6',
|
||||
parameter=self.parameters.enable_ipv6,
|
||||
active=net.get('EnableIPv6', False))
|
||||
|
||||
if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
|
||||
differences.add('internal',
|
||||
parameter=self.parameters.internal,
|
||||
active=net.get('Internal'))
|
||||
|
||||
if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
|
||||
differences.add('scope',
|
||||
parameter=self.parameters.scope,
|
||||
active=net.get('Scope'))
|
||||
|
||||
if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
|
||||
differences.add('attachable',
|
||||
parameter=self.parameters.attachable,
|
||||
active=net.get('Attachable'))
|
||||
if self.parameters.labels:
|
||||
if not net.get('Labels'):
|
||||
differences.add('labels',
|
||||
parameter=self.parameters.labels,
|
||||
active=net.get('Labels'))
|
||||
else:
|
||||
for key, value in self.parameters.labels.items():
|
||||
if not (key in net['Labels']) or value != net['Labels'][key]:
|
||||
differences.add('labels.%s' % key,
|
||||
parameter=value,
|
||||
active=net['Labels'].get(key))
|
||||
|
||||
return not differences.empty, differences
|
||||
|
||||
def create_network(self):
|
||||
if not self.existing_network:
|
||||
params = dict(
|
||||
driver=self.parameters.driver,
|
||||
options=self.parameters.driver_options,
|
||||
)
|
||||
|
||||
ipam_pools = []
|
||||
if self.parameters.ipam_config:
|
||||
for ipam_pool in self.parameters.ipam_config:
|
||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
ipam_pools.append(IPAMPool(**ipam_pool))
|
||||
else:
|
||||
ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
|
||||
|
||||
if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
|
||||
# Only add ipam parameter if a driver was specified or if IPAM parameters
|
||||
# were specified. Leaving this parameter away can significantly speed up
|
||||
# creation; on my machine creation with this option needs ~15 seconds,
|
||||
# and without just a few seconds.
|
||||
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
|
||||
pool_configs=ipam_pools,
|
||||
options=self.parameters.ipam_driver_options)
|
||||
else:
|
||||
params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
|
||||
pool_configs=ipam_pools)
|
||||
|
||||
if self.parameters.enable_ipv6 is not None:
|
||||
params['enable_ipv6'] = self.parameters.enable_ipv6
|
||||
if self.parameters.internal is not None:
|
||||
params['internal'] = self.parameters.internal
|
||||
if self.parameters.scope is not None:
|
||||
params['scope'] = self.parameters.scope
|
||||
if self.parameters.attachable is not None:
|
||||
params['attachable'] = self.parameters.attachable
|
||||
if self.parameters.labels:
|
||||
params['labels'] = self.parameters.labels
|
||||
|
||||
if not self.check_mode:
|
||||
resp = self.client.create_network(self.parameters.name, **params)
|
||||
self.client.report_warnings(resp, ['Warning'])
|
||||
self.existing_network = self.client.get_network(network_id=resp['Id'])
|
||||
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
|
||||
self.results['changed'] = True
|
||||
|
||||
def remove_network(self):
|
||||
if self.existing_network:
|
||||
self.disconnect_all_containers()
|
||||
if not self.check_mode:
|
||||
self.client.remove_network(self.parameters.name)
|
||||
self.results['actions'].append("Removed network %s" % (self.parameters.name,))
|
||||
self.results['changed'] = True
|
||||
|
||||
def is_container_connected(self, container_name):
|
||||
if not self.existing_network:
|
||||
return False
|
||||
return container_name in container_names_in_network(self.existing_network)
|
||||
|
||||
def connect_containers(self):
|
||||
for name in self.parameters.connected:
|
||||
if not self.is_container_connected(name):
|
||||
if not self.check_mode:
|
||||
self.client.connect_container_to_network(name, self.parameters.name)
|
||||
self.results['actions'].append("Connected container %s" % (name,))
|
||||
self.results['changed'] = True
|
||||
self.diff_tracker.add('connected.{0}'.format(name),
|
||||
parameter=True,
|
||||
active=False)
|
||||
|
||||
def disconnect_missing(self):
|
||||
if not self.existing_network:
|
||||
return
|
||||
containers = self.existing_network['Containers']
|
||||
if not containers:
|
||||
return
|
||||
for c in containers.values():
|
||||
name = c['Name']
|
||||
if name not in self.parameters.connected:
|
||||
self.disconnect_container(name)
|
||||
|
||||
def disconnect_all_containers(self):
|
||||
containers = self.client.get_network(name=self.parameters.name)['Containers']
|
||||
if not containers:
|
||||
return
|
||||
for cont in containers.values():
|
||||
self.disconnect_container(cont['Name'])
|
||||
|
||||
def disconnect_container(self, container_name):
|
||||
if not self.check_mode:
|
||||
self.client.disconnect_container_from_network(container_name, self.parameters.name)
|
||||
self.results['actions'].append("Disconnected container %s" % (container_name,))
|
||||
self.results['changed'] = True
|
||||
self.diff_tracker.add('connected.{0}'.format(container_name),
|
||||
parameter=False,
|
||||
active=True)
|
||||
|
||||
def present(self):
|
||||
different = False
|
||||
differences = DifferenceTracker()
|
||||
if self.existing_network:
|
||||
different, differences = self.has_different_config(self.existing_network)
|
||||
|
||||
self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
|
||||
if self.parameters.force or different:
|
||||
self.remove_network()
|
||||
self.existing_network = None
|
||||
|
||||
self.create_network()
|
||||
self.connect_containers()
|
||||
if not self.parameters.appends:
|
||||
self.disconnect_missing()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
|
||||
self.diff_tracker.merge(differences)
|
||||
|
||||
if not self.check_mode and not self.parameters.debug:
|
||||
self.results.pop('actions')
|
||||
|
||||
network_facts = self.get_existing_network()
|
||||
self.results['network'] = network_facts
|
||||
|
||||
def absent(self):
|
||||
self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
|
||||
self.remove_network()
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True, aliases=['network_name']),
|
||||
connected=dict(type='list', default=[], elements='str', aliases=['containers']),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
driver=dict(type='str', default='bridge'),
|
||||
driver_options=dict(type='dict', default={}),
|
||||
force=dict(type='bool', default=False),
|
||||
appends=dict(type='bool', default=False, aliases=['incremental']),
|
||||
ipam_driver=dict(type='str'),
|
||||
ipam_driver_options=dict(type='dict'),
|
||||
ipam_config=dict(type='list', elements='dict', options=dict(
|
||||
subnet=dict(type='str'),
|
||||
iprange=dict(type='str'),
|
||||
gateway=dict(type='str'),
|
||||
aux_addresses=dict(type='dict'),
|
||||
)),
|
||||
enable_ipv6=dict(type='bool'),
|
||||
internal=dict(type='bool'),
|
||||
labels=dict(type='dict', default={}),
|
||||
debug=dict(type='bool', default=False),
|
||||
scope=dict(type='str', choices=['local', 'global', 'swarm']),
|
||||
attachable=dict(type='bool'),
|
||||
)
|
||||
|
||||
option_minimal_versions = dict(
|
||||
scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
|
||||
labels=dict(docker_api_version='1.23'),
|
||||
ipam_driver_options=dict(docker_py_version='2.0.0'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.22',
|
||||
# "The docker server >= 1.10.0"
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
try:
|
||||
cm = DockerNetworkManager(client)
|
||||
client.module.exit_json(**cm.results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_network_info
|
||||
|
||||
short_description: Retrieves facts about docker network
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker network.
|
||||
- Essentially returns the output of C(docker network inspect <name>), similar to what M(community.docker.docker_network)
|
||||
returns for a non-absent network.
|
||||
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the network to inspect.
|
||||
- When identifying an existing network name may be a name or a long or short network ID.
|
||||
type: str
|
||||
required: yes
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- "Dave Bendit (@DBendit)"
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.21"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get infos on network
|
||||
community.docker.docker_network_info:
|
||||
name: mydata
|
||||
register: result
|
||||
|
||||
- name: Does network exist?
|
||||
ansible.builtin.debug:
|
||||
msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
|
||||
|
||||
- name: Print information about network
|
||||
ansible.builtin.debug:
|
||||
var: result.network
|
||||
when: result.exists
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the network exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
network:
|
||||
description:
|
||||
- Facts representing the current state of the network. Matches the docker inspection output.
|
||||
- Will be C(none) if network does not exist.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: '{
|
||||
"Attachable": false,
|
||||
"ConfigFrom": {
|
||||
"Network": ""
|
||||
},
|
||||
"ConfigOnly": false,
|
||||
"Containers": {},
|
||||
"Created": "2018-12-07T01:47:51.250835114-06:00",
|
||||
"Driver": "bridge",
|
||||
"EnableIPv6": false,
|
||||
"IPAM": {
|
||||
"Config": [
|
||||
{
|
||||
"Gateway": "192.168.96.1",
|
||||
"Subnet": "192.168.96.0/20"
|
||||
}
|
||||
],
|
||||
"Driver": "default",
|
||||
"Options": null
|
||||
},
|
||||
"Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
|
||||
"Ingress": false,
|
||||
"Internal": false,
|
||||
"Labels": {},
|
||||
"Name": "ansible-test-f2700bba",
|
||||
"Options": {},
|
||||
"Scope": "local"
|
||||
}'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_api_version='1.21',
|
||||
)
|
||||
|
||||
try:
|
||||
network = client.get_network(client.module.params['name'])
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
exists=(True if network else False),
|
||||
network=network,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,296 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_node
|
||||
short_description: Manage Docker Swarm node
|
||||
description:
|
||||
- Manages the Docker nodes via Swarm Manager.
|
||||
- This module allows to change the node's role, its availability, and to modify, add or remove node labels.
|
||||
options:
|
||||
hostname:
|
||||
description:
|
||||
- The hostname or ID of node as registered in Swarm.
|
||||
- If more than one node is registered using the same hostname the ID must be used,
|
||||
otherwise module will fail.
|
||||
type: str
|
||||
required: yes
|
||||
labels:
|
||||
description:
|
||||
- User-defined key/value metadata that will be assigned as node attribute.
|
||||
- Label operations in this module apply to the docker swarm node specified by I(hostname).
|
||||
Use M(community.docker.docker_swarm) module to add/modify/remove swarm cluster labels.
|
||||
- The actual state of labels assigned to the node when module completes its work depends on
|
||||
I(labels_state) and I(labels_to_remove) parameters values. See description below.
|
||||
type: dict
|
||||
labels_state:
|
||||
description:
|
||||
- It defines the operation on the labels assigned to node and labels specified in I(labels) option.
|
||||
- Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node.
|
||||
If no labels are assigned then it will add listed labels. For labels that are already assigned
|
||||
to the node, it will update their values. The labels not specified in I(labels) will remain unchanged.
|
||||
If I(labels) is empty then no changes will be made.
|
||||
- Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then
|
||||
all labels assigned to the node will be removed.
|
||||
type: str
|
||||
default: 'merge'
|
||||
choices:
|
||||
- merge
|
||||
- replace
|
||||
labels_to_remove:
|
||||
description:
|
||||
- List of labels that will be removed from the node configuration. The list has to contain only label
|
||||
names, not their values.
|
||||
- If the label provided on the list is not assigned to the node, the entry is ignored.
|
||||
- If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains
|
||||
assigned to the node.
|
||||
- If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to
|
||||
node are removed and I(labels_to_remove) is ignored.
|
||||
type: list
|
||||
elements: str
|
||||
availability:
|
||||
description: Node availability to assign. If not provided then node availability remains unchanged.
|
||||
choices:
|
||||
- active
|
||||
- pause
|
||||
- drain
|
||||
type: str
|
||||
role:
|
||||
description: Node role to assign. If not provided then node role remains unchanged.
|
||||
choices:
|
||||
- manager
|
||||
- worker
|
||||
type: str
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
|
||||
- Docker API >= 1.25
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
- Thierry Bouvet (@tbouvet)
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Set node role
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
role: manager
|
||||
|
||||
- name: Set node availability
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
availability: drain
|
||||
|
||||
- name: Replace node labels with new labels
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
labels:
|
||||
key: value
|
||||
labels_state: replace
|
||||
|
||||
- name: Merge node labels and new labels
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
labels:
|
||||
key: value
|
||||
|
||||
- name: Remove all labels assigned to node
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
labels_state: replace
|
||||
|
||||
- name: Remove selected labels from the node
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
labels_to_remove:
|
||||
- key1
|
||||
- key2
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
node:
|
||||
description: Information about node after 'update' operation
|
||||
returned: success
|
||||
type: dict
|
||||
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
DockerBaseClass,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self, client):
|
||||
super(TaskParameters, self).__init__()
|
||||
|
||||
# Spec
|
||||
self.name = None
|
||||
self.labels = None
|
||||
self.labels_state = None
|
||||
self.labels_to_remove = None
|
||||
|
||||
# Node
|
||||
self.availability = None
|
||||
self.role = None
|
||||
|
||||
for key, value in client.module.params.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
class SwarmNodeManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(SwarmNodeManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.client.fail_task_if_not_swarm_manager()
|
||||
|
||||
self.parameters = TaskParameters(client)
|
||||
|
||||
self.node_update()
|
||||
|
||||
def node_update(self):
|
||||
if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
|
||||
self.client.fail("This node is not part of a swarm.")
|
||||
return
|
||||
|
||||
if self.client.check_if_swarm_node_is_down():
|
||||
self.client.fail("Can not update the node. The node is down.")
|
||||
|
||||
try:
|
||||
node_info = self.client.inspect_node(node_id=self.parameters.hostname)
|
||||
except APIError as exc:
|
||||
self.client.fail("Failed to get node information for %s" % to_native(exc))
|
||||
|
||||
changed = False
|
||||
node_spec = dict(
|
||||
Availability=self.parameters.availability,
|
||||
Role=self.parameters.role,
|
||||
Labels=self.parameters.labels,
|
||||
)
|
||||
|
||||
if self.parameters.role is None:
|
||||
node_spec['Role'] = node_info['Spec']['Role']
|
||||
else:
|
||||
if not node_info['Spec']['Role'] == self.parameters.role:
|
||||
node_spec['Role'] = self.parameters.role
|
||||
changed = True
|
||||
|
||||
if self.parameters.availability is None:
|
||||
node_spec['Availability'] = node_info['Spec']['Availability']
|
||||
else:
|
||||
if not node_info['Spec']['Availability'] == self.parameters.availability:
|
||||
node_info['Spec']['Availability'] = self.parameters.availability
|
||||
changed = True
|
||||
|
||||
if self.parameters.labels_state == 'replace':
|
||||
if self.parameters.labels is None:
|
||||
node_spec['Labels'] = {}
|
||||
if node_info['Spec']['Labels']:
|
||||
changed = True
|
||||
else:
|
||||
if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
|
||||
node_spec['Labels'] = self.parameters.labels
|
||||
changed = True
|
||||
elif self.parameters.labels_state == 'merge':
|
||||
node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
|
||||
if self.parameters.labels is not None:
|
||||
for key, value in self.parameters.labels.items():
|
||||
if node_spec['Labels'].get(key) != value:
|
||||
node_spec['Labels'][key] = value
|
||||
changed = True
|
||||
|
||||
if self.parameters.labels_to_remove is not None:
|
||||
for key in self.parameters.labels_to_remove:
|
||||
if self.parameters.labels is not None:
|
||||
if not self.parameters.labels.get(key):
|
||||
if node_spec['Labels'].get(key):
|
||||
node_spec['Labels'].pop(key)
|
||||
changed = True
|
||||
else:
|
||||
self.client.module.warn(
|
||||
"Label '%s' listed both in 'labels' and 'labels_to_remove'. "
|
||||
"Keeping the assigned label value."
|
||||
% to_native(key))
|
||||
else:
|
||||
if node_spec['Labels'].get(key):
|
||||
node_spec['Labels'].pop(key)
|
||||
changed = True
|
||||
|
||||
if changed is True:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
|
||||
node_spec=node_spec)
|
||||
except APIError as exc:
|
||||
self.client.fail("Failed to update node : %s" % to_native(exc))
|
||||
self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
|
||||
self.results['changed'] = changed
|
||||
else:
|
||||
self.results['node'] = node_info
|
||||
self.results['changed'] = changed
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
hostname=dict(type='str', required=True),
|
||||
labels=dict(type='dict'),
|
||||
labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
|
||||
labels_to_remove=dict(type='list', elements='str'),
|
||||
availability=dict(type='str', choices=['active', 'pause', 'drain']),
|
||||
role=dict(type='str', choices=['worker', 'manager']),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='2.4.0',
|
||||
min_docker_api_version='1.25',
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
SwarmNodeManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,160 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_node_info
|
||||
|
||||
short_description: Retrieves facts about docker swarm node from Swarm Manager
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker node.
|
||||
- Essentially returns the output of C(docker node inspect <name>).
|
||||
- Must be executed on a host running as Swarm Manager, otherwise the module will fail.
|
||||
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the node to inspect.
|
||||
- The list of nodes names to inspect.
|
||||
- If empty then return information of all nodes in Swarm cluster.
|
||||
- When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
|
||||
- If I(self) is C(true) then this parameter is ignored.
|
||||
type: list
|
||||
elements: str
|
||||
self:
|
||||
description:
|
||||
- If C(true), queries the node (that is, the docker daemon) the module communicates with.
|
||||
- If C(true) then I(name) is ignored.
|
||||
- If C(false) then query depends on I(name) presence and value.
|
||||
type: bool
|
||||
default: no
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
|
||||
- "Docker API >= 1.24"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info on all nodes
|
||||
community.docker.docker_node_info:
|
||||
register: result
|
||||
|
||||
- name: Get info on node
|
||||
community.docker.docker_node_info:
|
||||
name: mynode
|
||||
register: result
|
||||
|
||||
- name: Get info on list of nodes
|
||||
community.docker.docker_node_info:
|
||||
name:
|
||||
- mynode1
|
||||
- mynode2
|
||||
register: result
|
||||
|
||||
- name: Get info on host if it is Swarm Manager
|
||||
community.docker.docker_node_info:
|
||||
self: true
|
||||
register: result
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
nodes:
|
||||
description:
|
||||
- Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
|
||||
- Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided.
|
||||
- If I(name) contains a list of nodes, the output will provide information on all nodes registered
|
||||
at the swarm, including nodes that left the swarm but have not been removed from the cluster on swarm
|
||||
managers and nodes that are unreachable.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
def get_node_facts(client):
|
||||
|
||||
results = []
|
||||
|
||||
if client.module.params['self'] is True:
|
||||
self_node_id = client.get_swarm_node_id()
|
||||
node_info = client.get_node_inspect(node_id=self_node_id)
|
||||
results.append(node_info)
|
||||
return results
|
||||
|
||||
if client.module.params['name'] is None:
|
||||
node_info = client.get_all_nodes_inspect()
|
||||
return node_info
|
||||
|
||||
nodes = client.module.params['name']
|
||||
if not isinstance(nodes, list):
|
||||
nodes = [nodes]
|
||||
|
||||
for next_node_name in nodes:
|
||||
next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
|
||||
if next_node_info:
|
||||
results.append(next_node_info)
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='list', elements='str'),
|
||||
self=dict(type='bool', default=False),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='2.4.0',
|
||||
min_docker_api_version='1.24',
|
||||
)
|
||||
|
||||
client.fail_task_if_not_swarm_manager()
|
||||
|
||||
try:
|
||||
nodes = get_node_facts(client)
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
nodes=nodes,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,371 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright: (c) 2021 Red Hat | Ansible Sakar Mehra<@sakarmehra100@gmail.com | @sakar97>
|
||||
# Copyright: (c) 2019, Vladimir Porshkevich (@porshkevich) <neosonic@mail.ru>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: docker_plugin
|
||||
short_description: Manage Docker plugins
|
||||
version_added: 1.3.0
|
||||
description:
|
||||
- This module allows to install, delete, enable and disable Docker plugins.
|
||||
- Performs largely the same function as the C(docker plugin) CLI subcommand.
|
||||
options:
|
||||
plugin_name:
|
||||
description:
|
||||
- Name of the plugin to operate on.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
state:
|
||||
description:
|
||||
- C(absent) remove the plugin.
|
||||
- C(present) install the plugin, if it does not already exist.
|
||||
- C(enable) enable the plugin.
|
||||
- C(disable) disable the plugin.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
- enable
|
||||
- disable
|
||||
type: str
|
||||
|
||||
alias:
|
||||
description:
|
||||
- Local name for plugin.
|
||||
type: str
|
||||
version_added: 1.8.0
|
||||
|
||||
plugin_options:
|
||||
description:
|
||||
- Dictionary of plugin settings.
|
||||
type: dict
|
||||
|
||||
force_remove:
|
||||
description:
|
||||
- Remove even if the plugin is enabled.
|
||||
default: False
|
||||
type: bool
|
||||
|
||||
enable_timeout:
|
||||
description:
|
||||
- Timeout in seconds.
|
||||
type: int
|
||||
default: 0
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_2_documentation
|
||||
|
||||
author:
|
||||
- Sakar Mehra (@sakar97)
|
||||
- Vladimir Porshkevich (@porshkevich)
|
||||
|
||||
requirements:
|
||||
- "python >= 2.7"
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
|
||||
- "Docker API >= 1.25"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install a plugin
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: plugin_one
|
||||
state: present
|
||||
|
||||
- name: Remove a plugin
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: plugin_one
|
||||
state: absent
|
||||
|
||||
- name: Enable the plugin
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: plugin_one
|
||||
state: enable
|
||||
|
||||
- name: Disable the plugin
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: plugin_one
|
||||
state: disable
|
||||
|
||||
- name: Install a plugin with options
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: weaveworks/net-plugin:latest_release
|
||||
plugin_options:
|
||||
IPALLOC_RANGE: "10.32.0.0/12"
|
||||
WEAVE_PASSWORD: "PASSWORD"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
plugin:
|
||||
description:
|
||||
- Plugin inspection results for the affected plugin.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
actions:
|
||||
description:
|
||||
- List of actions performed during task execution.
|
||||
returned: when I(state!=absent)
|
||||
type: list
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, NotFound, DockerException
|
||||
from docker import DockerClient
|
||||
except ImportError:
|
||||
# missing docker-py handled in ansible.module_utils.docker_common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
DockerBaseClass,
|
||||
AnsibleDockerClient,
|
||||
DifferenceTracker,
|
||||
RequestException
|
||||
)
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self, client):
|
||||
super(TaskParameters, self).__init__()
|
||||
self.client = client
|
||||
self.plugin_name = None
|
||||
self.alias = None
|
||||
self.plugin_options = None
|
||||
self.debug = None
|
||||
self.force_remove = None
|
||||
self.enable_timeout = None
|
||||
|
||||
for key, value in client.module.params.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
def prepare_options(options):
|
||||
return ['%s=%s' % (k, v if v is not None else "") for k, v in options.items()] if options else []
|
||||
|
||||
|
||||
def parse_options(options_list):
|
||||
return dict(x.split('=', 1) for x in options_list) if options_list else {}
|
||||
|
||||
|
||||
class DockerPluginManager(object):
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
|
||||
self.dclient = DockerClient(**self.client._connect_params)
|
||||
self.dclient.api = client
|
||||
|
||||
self.parameters = TaskParameters(client)
|
||||
self.preferred_name = self.parameters.alias or self.parameters.plugin_name
|
||||
self.check_mode = self.client.check_mode
|
||||
self.diff = self.client.module._diff
|
||||
self.diff_tracker = DifferenceTracker()
|
||||
self.diff_result = dict()
|
||||
|
||||
self.actions = []
|
||||
self.changed = False
|
||||
|
||||
self.existing_plugin = self.get_existing_plugin()
|
||||
|
||||
state = self.parameters.state
|
||||
if state == 'present':
|
||||
self.present()
|
||||
elif state == 'absent':
|
||||
self.absent()
|
||||
elif state == 'enable':
|
||||
self.enable()
|
||||
elif state == 'disable':
|
||||
self.disable()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
if self.diff:
|
||||
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
|
||||
self.diff = self.diff_result
|
||||
|
||||
def get_existing_plugin(self):
|
||||
try:
|
||||
plugin = self.dclient.plugins.get(self.preferred_name)
|
||||
except NotFound:
|
||||
return None
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
|
||||
if plugin is None:
|
||||
return None
|
||||
else:
|
||||
return plugin
|
||||
|
||||
def has_different_config(self):
|
||||
"""
|
||||
Return the list of differences between the current parameters and the existing plugin.
|
||||
|
||||
:return: list of options that differ
|
||||
"""
|
||||
differences = DifferenceTracker()
|
||||
if self.parameters.plugin_options:
|
||||
if not self.existing_plugin.settings:
|
||||
differences.add('plugin_options', parameters=self.parameters.plugin_options, active=self.existing_plugin.settings['Env'])
|
||||
else:
|
||||
existing_options_list = self.existing_plugin.settings['Env']
|
||||
existing_options = parse_options(existing_options_list)
|
||||
|
||||
for key, value in self.parameters.plugin_options.items():
|
||||
options_count = 0
|
||||
if ((not existing_options.get(key) and value) or
|
||||
not value or
|
||||
value != existing_options[key]):
|
||||
differences.add('plugin_options.%s' % key,
|
||||
parameter=value,
|
||||
active=self.existing_plugin.settings['Env'][options_count])
|
||||
|
||||
return differences
|
||||
|
||||
def install_plugin(self):
|
||||
if not self.existing_plugin:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.existing_plugin = self.dclient.plugins.install(
|
||||
self.parameters.plugin_name, self.parameters.alias
|
||||
)
|
||||
if self.parameters.plugin_options:
|
||||
self.existing_plugin.configure(prepare_options(self.parameters.plugin_options))
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
|
||||
self.actions.append("Installed plugin %s" % self.preferred_name)
|
||||
self.changed = True
|
||||
|
||||
def remove_plugin(self):
|
||||
force = self.parameters.force_remove
|
||||
if self.existing_plugin:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.existing_plugin.remove(force)
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
|
||||
self.actions.append("Removed plugin %s" % self.preferred_name)
|
||||
self.changed = True
|
||||
|
||||
def update_plugin(self):
|
||||
if self.existing_plugin:
|
||||
differences = self.has_different_config()
|
||||
if not differences.empty:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.existing_plugin.configure(prepare_options(self.parameters.plugin_options))
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
self.actions.append("Updated plugin %s settings" % self.preferred_name)
|
||||
self.changed = True
|
||||
else:
|
||||
self.client.fail("Cannot update the plugin: Plugin does not exist")
|
||||
|
||||
def present(self):
|
||||
differences = DifferenceTracker()
|
||||
if self.existing_plugin:
|
||||
differences = self.has_different_config()
|
||||
|
||||
self.diff_tracker.add('exists', parameter=True, active=self.existing_plugin is not None)
|
||||
|
||||
if self.existing_plugin:
|
||||
self.update_plugin()
|
||||
else:
|
||||
self.install_plugin()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
self.diff_tracker.merge(differences)
|
||||
|
||||
if not self.check_mode and not self.parameters.debug:
|
||||
self.actions = None
|
||||
|
||||
def absent(self):
|
||||
self.remove_plugin()
|
||||
|
||||
def enable(self):
|
||||
timeout = self.parameters.enable_timeout
|
||||
if self.existing_plugin:
|
||||
if not self.existing_plugin.enabled:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.existing_plugin.enable(timeout)
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
self.actions.append("Enabled plugin %s" % self.preferred_name)
|
||||
self.changed = True
|
||||
else:
|
||||
self.install_plugin()
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.existing_plugin.enable(timeout)
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
self.actions.append("Enabled plugin %s" % self.preferred_name)
|
||||
self.changed = True
|
||||
|
||||
def disable(self):
|
||||
if self.existing_plugin:
|
||||
if self.existing_plugin.enabled:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.existing_plugin.disable()
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
self.actions.append("Disable plugin %s" % self.preferred_name)
|
||||
self.changed = True
|
||||
else:
|
||||
self.client.fail("Plugin not found: Plugin does not exist.")
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
result = {
|
||||
'actions': self.actions,
|
||||
'changed': self.changed,
|
||||
'diff': self.diff,
|
||||
'plugin': self.client.inspect_plugin(self.preferred_name) if self.parameters.state != 'absent' else {}
|
||||
}
|
||||
return dict((k, v) for k, v in result.items() if v is not None)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
alias=dict(type='str'),
|
||||
plugin_name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent', 'enable', 'disable']),
|
||||
plugin_options=dict(type='dict', default={}),
|
||||
debug=dict(type='bool', default=False),
|
||||
force_remove=dict(type='bool', default=False),
|
||||
enable_timeout=dict(type='int', default=0),
|
||||
)
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='2.6.0',
|
||||
min_docker_api_version='1.25',
|
||||
)
|
||||
|
||||
try:
|
||||
cm = DockerPluginManager(client)
|
||||
client.module.exit_json(**cm.result)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,269 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_prune
|
||||
|
||||
short_description: Allows to prune various docker objects
|
||||
|
||||
description:
|
||||
- Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
|
||||
and C(docker volume prune) via the Docker API.
|
||||
|
||||
|
||||
options:
|
||||
containers:
|
||||
description:
|
||||
- Whether to prune containers.
|
||||
type: bool
|
||||
default: no
|
||||
containers_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting containers to delete.
|
||||
- "For example, C(until: 24h)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
images:
|
||||
description:
|
||||
- Whether to prune images.
|
||||
type: bool
|
||||
default: no
|
||||
images_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting images to delete.
|
||||
- "For example, C(dangling: true)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
networks:
|
||||
description:
|
||||
- Whether to prune networks.
|
||||
type: bool
|
||||
default: no
|
||||
networks_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting networks to delete.
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
volumes:
|
||||
description:
|
||||
- Whether to prune volumes.
|
||||
type: bool
|
||||
default: no
|
||||
volumes_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting volumes to delete.
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
builder_cache:
|
||||
description:
|
||||
- Whether to prune the builder cache.
|
||||
- Requires version 3.3.0 of the Docker SDK for Python or newer.
|
||||
type: bool
|
||||
default: no
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_2_documentation
|
||||
|
||||
|
||||
author:
|
||||
- "Felix Fontein (@felixfontein)"
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
|
||||
- "Docker API >= 1.25"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Prune containers older than 24h
|
||||
community.docker.docker_prune:
|
||||
containers: yes
|
||||
containers_filters:
|
||||
# only consider containers created more than 24 hours ago
|
||||
until: 24h
|
||||
|
||||
- name: Prune everything
|
||||
community.docker.docker_prune:
|
||||
containers: yes
|
||||
images: yes
|
||||
networks: yes
|
||||
volumes: yes
|
||||
builder_cache: yes
|
||||
|
||||
- name: Prune everything (including non-dangling images)
|
||||
community.docker.docker_prune:
|
||||
containers: yes
|
||||
images: yes
|
||||
images_filters:
|
||||
dangling: false
|
||||
networks: yes
|
||||
volumes: yes
|
||||
builder_cache: yes
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
# containers
|
||||
containers:
|
||||
description:
|
||||
- List of IDs of deleted containers.
|
||||
returned: I(containers) is C(true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: '[]'
|
||||
containers_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from container pruning in bytes.
|
||||
returned: I(containers) is C(true)
|
||||
type: int
|
||||
sample: '0'
|
||||
|
||||
# images
|
||||
images:
|
||||
description:
|
||||
- List of IDs of deleted images.
|
||||
returned: I(images) is C(true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: '[]'
|
||||
images_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from image pruning in bytes.
|
||||
returned: I(images) is C(true)
|
||||
type: int
|
||||
sample: '0'
|
||||
|
||||
# networks
|
||||
networks:
|
||||
description:
|
||||
- List of IDs of deleted networks.
|
||||
returned: I(networks) is C(true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: '[]'
|
||||
|
||||
# volumes
|
||||
volumes:
|
||||
description:
|
||||
- List of IDs of deleted volumes.
|
||||
returned: I(volumes) is C(true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: '[]'
|
||||
volumes_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from volumes pruning in bytes.
|
||||
returned: I(volumes) is C(true)
|
||||
type: int
|
||||
sample: '0'
|
||||
|
||||
# builder_cache
|
||||
builder_cache_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from builder cache pruning in bytes.
|
||||
returned: I(builder_cache) is C(true)
|
||||
type: int
|
||||
sample: '0'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
try:
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import docker_version, clean_dict_booleans_for_docker_api
|
||||
except Exception as dummy:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
containers=dict(type='bool', default=False),
|
||||
containers_filters=dict(type='dict'),
|
||||
images=dict(type='bool', default=False),
|
||||
images_filters=dict(type='dict'),
|
||||
networks=dict(type='bool', default=False),
|
||||
networks_filters=dict(type='dict'),
|
||||
volumes=dict(type='bool', default=False),
|
||||
volumes_filters=dict(type='dict'),
|
||||
builder_cache=dict(type='bool', default=False),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
# supports_check_mode=True,
|
||||
min_docker_api_version='1.25',
|
||||
min_docker_version='2.1.0',
|
||||
)
|
||||
|
||||
# Version checks
|
||||
cache_min_version = '3.3.0'
|
||||
if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version):
|
||||
msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade."
|
||||
client.fail(msg % (docker_version, cache_min_version))
|
||||
|
||||
try:
|
||||
result = dict()
|
||||
|
||||
if client.module.params['containers']:
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
|
||||
res = client.prune_containers(filters=filters)
|
||||
result['containers'] = res.get('ContainersDeleted') or []
|
||||
result['containers_space_reclaimed'] = res['SpaceReclaimed']
|
||||
|
||||
if client.module.params['images']:
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
|
||||
res = client.prune_images(filters=filters)
|
||||
result['images'] = res.get('ImagesDeleted') or []
|
||||
result['images_space_reclaimed'] = res['SpaceReclaimed']
|
||||
|
||||
if client.module.params['networks']:
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
|
||||
res = client.prune_networks(filters=filters)
|
||||
result['networks'] = res.get('NetworksDeleted') or []
|
||||
|
||||
if client.module.params['volumes']:
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
|
||||
res = client.prune_volumes(filters=filters)
|
||||
result['volumes'] = res.get('VolumesDeleted') or []
|
||||
result['volumes_space_reclaimed'] = res['SpaceReclaimed']
|
||||
|
||||
if client.module.params['builder_cache']:
|
||||
res = client.prune_builds()
|
||||
result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
|
||||
|
||||
client.module.exit_json(**result)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,397 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_secret
|
||||
|
||||
short_description: Manage docker secrets.
|
||||
|
||||
|
||||
description:
|
||||
- Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
|
||||
- Adds to the metadata of new secrets C(ansible_key), an encrypted hash representation of the data, which is then used
|
||||
in future runs to test if a secret has changed. If C(ansible_key) is not present, then a secret will not be updated
|
||||
unless the I(force) option is set.
|
||||
- Updates to secrets are performed by removing the secret and creating it again.
|
||||
options:
|
||||
data:
|
||||
description:
|
||||
- The value of the secret.
|
||||
- Mutually exclusive with I(data_src). One of I(data) and I(data_src) is required if I(state=present).
|
||||
type: str
|
||||
data_is_b64:
|
||||
description:
|
||||
- If set to C(true), the data is assumed to be Base64 encoded and will be
|
||||
decoded before being used.
|
||||
- To use binary I(data), it is better to keep it Base64 encoded and let it
|
||||
be decoded by this option.
|
||||
type: bool
|
||||
default: no
|
||||
data_src:
|
||||
description:
|
||||
- The file on the target from which to read the secret.
|
||||
- Mutually exclusive with I(data). One of I(data) and I(data_src) is required if I(state=present).
|
||||
type: path
|
||||
version_added: 1.10.0
|
||||
labels:
|
||||
description:
|
||||
- "A map of key:value meta data, where both key and value are expected to be strings."
|
||||
- If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
|
||||
type: dict
|
||||
force:
|
||||
description:
|
||||
- Use with state C(present) to always remove and recreate an existing secret.
|
||||
- If C(true), an existing secret will be replaced, even if it has not changed.
|
||||
type: bool
|
||||
default: no
|
||||
rolling_versions:
|
||||
description:
|
||||
- If set to C(true), secrets are created with an increasing version number appended to their name.
|
||||
- Adds a label containing the version number to the managed secrets with the name C(ansible_version).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.2.0
|
||||
versions_to_keep:
|
||||
description:
|
||||
- When using I(rolling_versions), the number of old versions of the secret to keep.
|
||||
- Extraneous old secrets are deleted after the new one is created.
|
||||
- Set to C(-1) to keep everything or to C(0) or C(1) to keep only the current one.
|
||||
type: int
|
||||
default: 5
|
||||
version_added: 2.2.0
|
||||
name:
|
||||
description:
|
||||
- The name of the secret.
|
||||
type: str
|
||||
required: yes
|
||||
state:
|
||||
description:
|
||||
- Set to C(present), if the secret should exist, and C(absent), if it should not.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_2_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Create secret foo (from a file on the control machine)
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
# If the file is JSON or binary, Ansible might modify it (because
|
||||
# it is first decoded and later re-encoded). Base64-encoding the
|
||||
# file directly after reading it prevents this to happen.
|
||||
data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
|
||||
data_is_b64: true
|
||||
state: present
|
||||
|
||||
- name: Create secret foo (from a file on the target machine)
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data_src: /path/to/secret/file
|
||||
state: present
|
||||
|
||||
- name: Change the secret data
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Add a new label
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Adding a new label will cause a remove/create of the secret
|
||||
two: '2'
|
||||
state: present
|
||||
|
||||
- name: No change
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Even though 'two' is missing, there is no change to the existing secret
|
||||
state: present
|
||||
|
||||
- name: Update an existing label
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: monkey # Changing a label will cause a remove/create of the secret
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Force the removal/creation of the secret
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
force: yes
|
||||
state: present
|
||||
|
||||
- name: Remove secret foo
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
secret_id:
|
||||
description:
|
||||
- The ID assigned by Docker to the secret object.
|
||||
returned: success and I(state) is C(present)
|
||||
type: str
|
||||
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
|
||||
secret_name:
|
||||
description:
|
||||
- The name of the created secret object.
|
||||
returned: success and I(state) is C(present)
|
||||
type: str
|
||||
sample: 'awesome_secret'
|
||||
version_added: 2.2.0
|
||||
'''
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
DockerBaseClass,
|
||||
compare_generic,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils.common.text.converters import to_native, to_bytes
|
||||
|
||||
|
||||
class SecretManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(SecretManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
parameters = self.client.module.params
|
||||
self.name = parameters.get('name')
|
||||
self.state = parameters.get('state')
|
||||
self.data = parameters.get('data')
|
||||
if self.data is not None:
|
||||
if parameters.get('data_is_b64'):
|
||||
self.data = base64.b64decode(self.data)
|
||||
else:
|
||||
self.data = to_bytes(self.data)
|
||||
data_src = parameters.get('data_src')
|
||||
if data_src is not None:
|
||||
try:
|
||||
with open(data_src, 'rb') as f:
|
||||
self.data = f.read()
|
||||
except Exception as exc:
|
||||
self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc)))
|
||||
self.labels = parameters.get('labels')
|
||||
self.force = parameters.get('force')
|
||||
self.rolling_versions = parameters.get('rolling_versions')
|
||||
self.versions_to_keep = parameters.get('versions_to_keep')
|
||||
|
||||
if self.rolling_versions:
|
||||
self.version = 0
|
||||
self.data_key = None
|
||||
self.secrets = []
|
||||
|
||||
def __call__(self):
|
||||
self.get_secret()
|
||||
if self.state == 'present':
|
||||
self.data_key = hashlib.sha224(self.data).hexdigest()
|
||||
self.present()
|
||||
self.remove_old_versions()
|
||||
elif self.state == 'absent':
|
||||
self.absent()
|
||||
|
||||
def get_version(self, secret):
|
||||
try:
|
||||
return int(secret.get('Spec', {}).get('Labels', {}).get('ansible_version', 0))
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
def remove_old_versions(self):
|
||||
if not self.rolling_versions or self.versions_to_keep < 0:
|
||||
return
|
||||
if not self.check_mode:
|
||||
while len(self.secrets) > max(self.versions_to_keep, 1):
|
||||
self.remove_secret(self.secrets.pop(0))
|
||||
|
||||
def get_secret(self):
|
||||
''' Find an existing secret. '''
|
||||
try:
|
||||
secrets = self.client.secrets(filters={'name': self.name})
|
||||
except APIError as exc:
|
||||
self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
|
||||
|
||||
if self.rolling_versions:
|
||||
self.secrets = [
|
||||
secret
|
||||
for secret in secrets
|
||||
if secret['Spec']['Name'].startswith('{name}_v'.format(name=self.name))
|
||||
]
|
||||
self.secrets.sort(key=self.get_version)
|
||||
else:
|
||||
self.secrets = [
|
||||
secret for secret in secrets if secret['Spec']['Name'] == self.name
|
||||
]
|
||||
|
||||
def create_secret(self):
|
||||
''' Create a new secret '''
|
||||
secret_id = None
|
||||
# We can't see the data after creation, so adding a label we can use for idempotency check
|
||||
labels = {
|
||||
'ansible_key': self.data_key
|
||||
}
|
||||
if self.rolling_versions:
|
||||
self.version += 1
|
||||
labels['ansible_version'] = str(self.version)
|
||||
self.name = '{name}_v{version}'.format(name=self.name, version=self.version)
|
||||
if self.labels:
|
||||
labels.update(self.labels)
|
||||
|
||||
try:
|
||||
if not self.check_mode:
|
||||
secret_id = self.client.create_secret(self.name, self.data, labels=labels)
|
||||
self.secrets += self.client.secrets(filters={'id': secret_id})
|
||||
except APIError as exc:
|
||||
self.client.fail("Error creating secret: %s" % to_native(exc))
|
||||
|
||||
if isinstance(secret_id, dict):
|
||||
secret_id = secret_id['ID']
|
||||
|
||||
return secret_id
|
||||
|
||||
def remove_secret(self, secret):
|
||||
try:
|
||||
if not self.check_mode:
|
||||
self.client.remove_secret(secret['ID'])
|
||||
except APIError as exc:
|
||||
self.client.fail("Error removing secret %s: %s" % (secret['Spec']['Name'], to_native(exc)))
|
||||
|
||||
def present(self):
|
||||
''' Handles state == 'present', creating or updating the secret '''
|
||||
if self.secrets:
|
||||
secret = self.secrets[-1]
|
||||
self.results['secret_id'] = secret['ID']
|
||||
self.results['secret_name'] = secret['Spec']['Name']
|
||||
data_changed = False
|
||||
attrs = secret.get('Spec', {})
|
||||
if attrs.get('Labels', {}).get('ansible_key'):
|
||||
if attrs['Labels']['ansible_key'] != self.data_key:
|
||||
data_changed = True
|
||||
else:
|
||||
if not self.force:
|
||||
self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'yes'")
|
||||
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
|
||||
if self.rolling_versions:
|
||||
self.version = self.get_version(secret)
|
||||
if data_changed or labels_changed or self.force:
|
||||
# if something changed or force, delete and re-create the secret
|
||||
if not self.rolling_versions:
|
||||
self.absent()
|
||||
secret_id = self.create_secret()
|
||||
self.results['changed'] = True
|
||||
self.results['secret_id'] = secret_id
|
||||
self.results['secret_name'] = self.name
|
||||
else:
|
||||
self.results['changed'] = True
|
||||
self.results['secret_id'] = self.create_secret()
|
||||
self.results['secret_name'] = self.name
|
||||
|
||||
def absent(self):
|
||||
''' Handles state == 'absent', removing the secret '''
|
||||
if self.secrets:
|
||||
for secret in self.secrets:
|
||||
self.remove_secret(secret)
|
||||
self.results['changed'] = True
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
data=dict(type='str', no_log=True),
|
||||
data_is_b64=dict(type='bool', default=False),
|
||||
data_src=dict(type='path'),
|
||||
labels=dict(type='dict'),
|
||||
force=dict(type='bool', default=False),
|
||||
rolling_versions=dict(type='bool', default=False),
|
||||
versions_to_keep=dict(type='int', default=5),
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'present', ['data', 'data_src'], True),
|
||||
]
|
||||
|
||||
mutually_exclusive = [
|
||||
('data', 'data_src'),
|
||||
]
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
min_docker_version='2.1.0',
|
||||
min_docker_api_version='1.25',
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
secret_id='',
|
||||
secret_name=''
|
||||
)
|
||||
|
||||
SecretManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,308 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_stack
|
||||
author: "Dario Zanzico (@dariko)"
|
||||
short_description: docker stack module
|
||||
description:
|
||||
- Manage docker stacks using the C(docker stack) command
|
||||
on the target node (see examples).
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Stack name
|
||||
type: str
|
||||
required: yes
|
||||
state:
|
||||
description:
|
||||
- Service state.
|
||||
type: str
|
||||
default: "present"
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
compose:
|
||||
description:
|
||||
- List of compose definitions. Any element may be a string
|
||||
referring to the path of the compose file on the target host
|
||||
or the YAML contents of a compose file nested as dictionary.
|
||||
type: list
|
||||
elements: raw
|
||||
default: []
|
||||
prune:
|
||||
description:
|
||||
- If true will add the C(--prune) option to the C(docker stack deploy) command.
|
||||
This will have docker remove the services not present in the
|
||||
current stack definition.
|
||||
type: bool
|
||||
default: no
|
||||
with_registry_auth:
|
||||
description:
|
||||
- If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
|
||||
This will have docker send registry authentication details to Swarm agents.
|
||||
type: bool
|
||||
default: no
|
||||
resolve_image:
|
||||
description:
|
||||
- If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
|
||||
This will have docker query the registry to resolve image digest and
|
||||
supported platforms. If not set, docker use "always" by default.
|
||||
type: str
|
||||
choices: ["always", "changed", "never"]
|
||||
absent_retries:
|
||||
description:
|
||||
- If C(>0) and I(state) is C(absent) the module will retry up to
|
||||
I(absent_retries) times to delete the stack until all the
|
||||
resources have been effectively deleted.
|
||||
If the last try still reports the stack as not completely
|
||||
removed the module will fail.
|
||||
type: int
|
||||
default: 0
|
||||
absent_retries_interval:
|
||||
description:
|
||||
- Interval in seconds between consecutive I(absent_retries).
|
||||
type: int
|
||||
default: 1
|
||||
|
||||
requirements:
|
||||
- jsondiff
|
||||
- pyyaml
|
||||
|
||||
notes:
|
||||
- Return values I(out) and I(err) have been deprecated and will be removed in community.docker 3.0.0. Use I(stdout) and I(stderr) instead.
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
stack_spec_diff:
|
||||
description: |
|
||||
dictionary containing the differences between the 'Spec' field
|
||||
of the stack services before and after applying the new stack
|
||||
definition.
|
||||
sample: >
|
||||
"stack_spec_diff":
|
||||
{'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
|
||||
returned: on change
|
||||
type: dict
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Deploy stack from a compose file
|
||||
community.docker.docker_stack:
|
||||
state: present
|
||||
name: mystack
|
||||
compose:
|
||||
- /opt/docker-compose.yml
|
||||
|
||||
- name: Deploy stack from base compose file and override the web service
|
||||
community.docker.docker_stack:
|
||||
state: present
|
||||
name: mystack
|
||||
compose:
|
||||
- /opt/docker-compose.yml
|
||||
- version: '3'
|
||||
services:
|
||||
web:
|
||||
image: nginx:latest
|
||||
environment:
|
||||
ENVVAR: envvar
|
||||
|
||||
- name: Remove stack
|
||||
community.docker.docker_stack:
|
||||
name: mystack
|
||||
state: absent
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from ansible.module_utils.six import string_types
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from jsondiff import diff as json_diff
|
||||
HAS_JSONDIFF = True
|
||||
except ImportError:
|
||||
HAS_JSONDIFF = False
|
||||
|
||||
try:
|
||||
from yaml import dump as yaml_dump
|
||||
HAS_YAML = True
|
||||
except ImportError:
|
||||
HAS_YAML = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, os
|
||||
|
||||
|
||||
def docker_stack_services(module, stack_name):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
rc, out, err = module.run_command([docker_bin,
|
||||
"stack",
|
||||
"services",
|
||||
stack_name,
|
||||
"--format",
|
||||
"{{.Name}}"])
|
||||
if err == "Nothing found in stack: %s\n" % stack_name:
|
||||
return []
|
||||
return out.strip().split('\n')
|
||||
|
||||
|
||||
def docker_service_inspect(module, service_name):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
rc, out, err = module.run_command([docker_bin,
|
||||
"service",
|
||||
"inspect",
|
||||
service_name])
|
||||
if rc != 0:
|
||||
return None
|
||||
else:
|
||||
ret = json.loads(out)[0]['Spec']
|
||||
return ret
|
||||
|
||||
|
||||
def docker_stack_deploy(module, stack_name, compose_files):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
command = [docker_bin, "stack", "deploy"]
|
||||
if module.params["prune"]:
|
||||
command += ["--prune"]
|
||||
if module.params["with_registry_auth"]:
|
||||
command += ["--with-registry-auth"]
|
||||
if module.params["resolve_image"]:
|
||||
command += ["--resolve-image",
|
||||
module.params["resolve_image"]]
|
||||
for compose_file in compose_files:
|
||||
command += ["--compose-file",
|
||||
compose_file]
|
||||
command += [stack_name]
|
||||
return module.run_command(command)
|
||||
|
||||
|
||||
def docker_stack_inspect(module, stack_name):
|
||||
ret = {}
|
||||
for service_name in docker_stack_services(module, stack_name):
|
||||
ret[service_name] = docker_service_inspect(module, service_name)
|
||||
return ret
|
||||
|
||||
|
||||
def docker_stack_rm(module, stack_name, retries, interval):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
command = [docker_bin, "stack", "rm", stack_name]
|
||||
|
||||
rc, out, err = module.run_command(command)
|
||||
|
||||
while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
|
||||
sleep(interval)
|
||||
retries = retries - 1
|
||||
rc, out, err = module.run_command(command)
|
||||
return rc, out, err
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec={
|
||||
'name': dict(type='str', required=True),
|
||||
'compose': dict(type='list', elements='raw', default=[]),
|
||||
'prune': dict(type='bool', default=False),
|
||||
'with_registry_auth': dict(type='bool', default=False),
|
||||
'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
|
||||
'state': dict(type='str', default='present', choices=['present', 'absent']),
|
||||
'absent_retries': dict(type='int', default=0),
|
||||
'absent_retries_interval': dict(type='int', default=1)
|
||||
},
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
if not HAS_JSONDIFF:
|
||||
return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
|
||||
|
||||
if not HAS_YAML:
|
||||
return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
|
||||
|
||||
state = module.params['state']
|
||||
compose = module.params['compose']
|
||||
name = module.params['name']
|
||||
absent_retries = module.params['absent_retries']
|
||||
absent_retries_interval = module.params['absent_retries_interval']
|
||||
|
||||
if state == 'present':
|
||||
if not compose:
|
||||
module.fail_json(msg=("compose parameter must be a list "
|
||||
"containing at least one element"))
|
||||
|
||||
compose_files = []
|
||||
for i, compose_def in enumerate(compose):
|
||||
if isinstance(compose_def, dict):
|
||||
compose_file_fd, compose_file = tempfile.mkstemp()
|
||||
module.add_cleanup_file(compose_file)
|
||||
with os.fdopen(compose_file_fd, 'w') as stack_file:
|
||||
compose_files.append(compose_file)
|
||||
stack_file.write(yaml_dump(compose_def))
|
||||
elif isinstance(compose_def, string_types):
|
||||
compose_files.append(compose_def)
|
||||
else:
|
||||
module.fail_json(msg="compose element '%s' must be a " +
|
||||
"string or a dictionary" % compose_def)
|
||||
|
||||
before_stack_services = docker_stack_inspect(module, name)
|
||||
|
||||
rc, out, err = docker_stack_deploy(module, name, compose_files)
|
||||
|
||||
after_stack_services = docker_stack_inspect(module, name)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="docker stack up deploy command failed",
|
||||
rc=rc,
|
||||
out=out, err=err, # Deprecated
|
||||
stdout=out, stderr=err)
|
||||
|
||||
before_after_differences = json_diff(before_stack_services,
|
||||
after_stack_services)
|
||||
for k in before_after_differences.keys():
|
||||
if isinstance(before_after_differences[k], dict):
|
||||
before_after_differences[k].pop('UpdatedAt', None)
|
||||
before_after_differences[k].pop('Version', None)
|
||||
if not list(before_after_differences[k].keys()):
|
||||
before_after_differences.pop(k)
|
||||
|
||||
if not before_after_differences:
|
||||
module.exit_json(
|
||||
changed=False,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err)
|
||||
else:
|
||||
module.exit_json(
|
||||
changed=True,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
stack_spec_diff=json_diff(before_stack_services,
|
||||
after_stack_services,
|
||||
dump=True))
|
||||
|
||||
else:
|
||||
if docker_stack_services(module, name):
|
||||
rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="'docker stack down' command failed",
|
||||
rc=rc,
|
||||
out=out, err=err, # Deprecated
|
||||
stdout=out, stderr=err)
|
||||
else:
|
||||
module.exit_json(changed=True,
|
||||
msg=out, rc=rc,
|
||||
err=err, # Deprecated
|
||||
stdout=out, stderr=err)
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_stack_info
|
||||
author: "Jose Angel Munoz (@imjoseangel)"
|
||||
short_description: Return information on a docker stack
|
||||
description:
|
||||
- Retrieve information on docker stacks using the C(docker stack) command
|
||||
on the target node (see examples).
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
results:
|
||||
description: |
|
||||
List of dictionaries containing the list of stacks or tasks associated
|
||||
to a stack name.
|
||||
sample: >
|
||||
"results": [{"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"}]
|
||||
returned: always
|
||||
type: list
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Shows stack info
|
||||
community.docker.docker_stack_info:
|
||||
register: result
|
||||
|
||||
- name: Show results
|
||||
ansible.builtin.debug:
|
||||
var: result.results
|
||||
'''
|
||||
|
||||
import json
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def docker_stack_list(module):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
rc, out, err = module.run_command(
|
||||
[docker_bin, "stack", "ls", "--format={{json .}}"])
|
||||
|
||||
return rc, out.strip(), err.strip()
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec={
|
||||
},
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
rc, out, err = docker_stack_list(module)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Error running docker stack. {0}".format(err),
|
||||
rc=rc, stdout=out, stderr=err)
|
||||
else:
|
||||
if out:
|
||||
ret = list(
|
||||
json.loads(outitem)
|
||||
for outitem in out.splitlines())
|
||||
|
||||
else:
|
||||
ret = []
|
||||
|
||||
module.exit_json(changed=False,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
results=ret)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_stack_task_info
|
||||
author: "Jose Angel Munoz (@imjoseangel)"
|
||||
short_description: Return information of the tasks on a docker stack
|
||||
description:
|
||||
- Retrieve information on docker stacks tasks using the C(docker stack) command
|
||||
on the target node (see examples).
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Stack name.
|
||||
type: str
|
||||
required: yes
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
results:
|
||||
description: |
|
||||
List of dictionaries containing the list of tasks associated
|
||||
to a stack name.
|
||||
sample: >
|
||||
[{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}]
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Shows stack info
|
||||
community.docker.docker_stack_task_info:
|
||||
name: test_stack
|
||||
register: result
|
||||
|
||||
- name: Show results
|
||||
ansible.builtin.debug:
|
||||
var: result.results
|
||||
'''
|
||||
|
||||
import json
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def docker_stack_task(module, stack_name):
|
||||
docker_bin = module.get_bin_path('docker', required=True)
|
||||
rc, out, err = module.run_command(
|
||||
[docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
|
||||
|
||||
return rc, out.strip(), err.strip()
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec={
|
||||
'name': dict(type='str', required=True)
|
||||
},
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
|
||||
rc, out, err = docker_stack_task(module, name)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Error running docker stack. {0}".format(err),
|
||||
rc=rc, stdout=out, stderr=err)
|
||||
else:
|
||||
if out:
|
||||
ret = list(
|
||||
json.loads(outitem)
|
||||
for outitem in out.splitlines())
|
||||
|
||||
else:
|
||||
ret = []
|
||||
|
||||
module.exit_json(changed=False,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
results=ret)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,694 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_swarm
|
||||
short_description: Manage Swarm cluster
|
||||
description:
|
||||
- Create a new Swarm cluster.
|
||||
- Add/Remove nodes or managers to an existing cluster.
|
||||
options:
|
||||
advertise_addr:
|
||||
description:
|
||||
- Externally reachable address advertised to other nodes.
|
||||
- This can either be an address/port combination
|
||||
in the form C(192.168.1.1:4567), or an interface followed by a
|
||||
port number, like C(eth0:4567).
|
||||
- If the port number is omitted,
|
||||
the port number from the listen address is used.
|
||||
- If I(advertise_addr) is not specified, it will be automatically
|
||||
detected when possible.
|
||||
- Only used when swarm is initialised or joined. Because of this it's not
|
||||
considered for idempotency checking.
|
||||
type: str
|
||||
default_addr_pool:
|
||||
description:
|
||||
- Default address pool in CIDR format.
|
||||
- Only used when swarm is initialised. Because of this it's not considered
|
||||
for idempotency checking.
|
||||
- Requires API version >= 1.39.
|
||||
type: list
|
||||
elements: str
|
||||
subnet_size:
|
||||
description:
|
||||
- Default address pool subnet mask length.
|
||||
- Only used when swarm is initialised. Because of this it's not considered
|
||||
for idempotency checking.
|
||||
- Requires API version >= 1.39.
|
||||
type: int
|
||||
listen_addr:
|
||||
description:
|
||||
- Listen address used for inter-manager communication.
|
||||
- This can either be an address/port combination in the form
|
||||
C(192.168.1.1:4567), or an interface followed by a port number,
|
||||
like C(eth0:4567).
|
||||
- If the port number is omitted, the default swarm listening port
|
||||
is used.
|
||||
- Only used when swarm is initialised or joined. Because of this it's not
|
||||
considered for idempotency checking.
|
||||
type: str
|
||||
default: 0.0.0.0:2377
|
||||
force:
|
||||
description:
|
||||
- Use with state C(present) to force creating a new Swarm, even if already part of one.
|
||||
- Use with state C(absent) to Leave the swarm even if this node is a manager.
|
||||
type: bool
|
||||
default: no
|
||||
state:
|
||||
description:
|
||||
- Set to C(present), to create/update a new cluster.
|
||||
- Set to C(join), to join an existing cluster.
|
||||
- Set to C(absent), to leave an existing cluster.
|
||||
- Set to C(remove), to remove an absent node from the cluster.
|
||||
Note that removing requires Docker SDK for Python >= 2.4.0.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
- join
|
||||
- absent
|
||||
- remove
|
||||
node_id:
|
||||
description:
|
||||
- Swarm id of the node to remove.
|
||||
- Used with I(state=remove).
|
||||
type: str
|
||||
join_token:
|
||||
description:
|
||||
- Swarm token used to join a swarm cluster.
|
||||
- Used with I(state=join).
|
||||
- If this value is specified, the corresponding value in the return values will be censored by Ansible.
|
||||
This is a side-effect of this value not being logged.
|
||||
type: str
|
||||
remote_addrs:
|
||||
description:
|
||||
- Remote address of one or more manager nodes of an existing Swarm to connect to.
|
||||
- Used with I(state=join).
|
||||
type: list
|
||||
elements: str
|
||||
task_history_retention_limit:
|
||||
description:
|
||||
- Maximum number of tasks history stored.
|
||||
- Docker default value is C(5).
|
||||
type: int
|
||||
snapshot_interval:
|
||||
description:
|
||||
- Number of logs entries between snapshot.
|
||||
- Docker default value is C(10000).
|
||||
type: int
|
||||
keep_old_snapshots:
|
||||
description:
|
||||
- Number of snapshots to keep beyond the current snapshot.
|
||||
- Docker default value is C(0).
|
||||
type: int
|
||||
log_entries_for_slow_followers:
|
||||
description:
|
||||
- Number of log entries to keep around to sync up slow followers after a snapshot is created.
|
||||
type: int
|
||||
heartbeat_tick:
|
||||
description:
|
||||
- Amount of ticks (in seconds) between each heartbeat.
|
||||
- Docker default value is C(1s).
|
||||
type: int
|
||||
election_tick:
|
||||
description:
|
||||
- Amount of ticks (in seconds) needed without a leader to trigger a new election.
|
||||
- Docker default value is C(10s).
|
||||
type: int
|
||||
dispatcher_heartbeat_period:
|
||||
description:
|
||||
- The delay for an agent to send a heartbeat to the dispatcher.
|
||||
- Docker default value is C(5s).
|
||||
type: int
|
||||
node_cert_expiry:
|
||||
description:
|
||||
- Automatic expiry for nodes certificates.
|
||||
- Docker default value is C(3months).
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- The name of the swarm.
|
||||
type: str
|
||||
labels:
|
||||
description:
|
||||
- User-defined key/value metadata.
|
||||
- Label operations in this module apply to the docker swarm cluster.
|
||||
Use M(community.docker.docker_node) module to add/modify/remove swarm node labels.
|
||||
- Requires API version >= 1.32.
|
||||
type: dict
|
||||
signing_ca_cert:
|
||||
description:
|
||||
- The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
|
||||
- This must not be a path to a certificate, but the contents of the certificate.
|
||||
- Requires API version >= 1.30.
|
||||
type: str
|
||||
signing_ca_key:
|
||||
description:
|
||||
- The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
|
||||
- This must not be a path to a key, but the contents of the key.
|
||||
- Requires API version >= 1.30.
|
||||
type: str
|
||||
ca_force_rotate:
|
||||
description:
|
||||
- An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
|
||||
if none have been specified.
|
||||
- Docker default value is C(0).
|
||||
- Requires API version >= 1.30.
|
||||
type: int
|
||||
autolock_managers:
|
||||
description:
|
||||
- If set, generate a key and use it to lock data stored on the managers.
|
||||
- Docker default value is C(no).
|
||||
- M(community.docker.docker_swarm_info) can be used to retrieve the unlock key.
|
||||
type: bool
|
||||
rotate_worker_token:
|
||||
description: Rotate the worker join token.
|
||||
type: bool
|
||||
default: no
|
||||
rotate_manager_token:
|
||||
description: Rotate the manager join token.
|
||||
type: bool
|
||||
default: no
|
||||
data_path_addr:
|
||||
description:
|
||||
- Address or interface to use for data path traffic.
|
||||
- This can either be an address in the form C(192.168.1.1), or an interface,
|
||||
like C(eth0).
|
||||
- Only used when swarm is initialised or joined. Because of this it is not
|
||||
considered for idempotency checking.
|
||||
type: str
|
||||
version_added: 2.5.0
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- Docker API >= 1.25
|
||||
author:
|
||||
- Thierry Bouvet (@tbouvet)
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Init a new swarm with default parameters
|
||||
community.docker.docker_swarm:
|
||||
state: present
|
||||
|
||||
- name: Update swarm configuration
|
||||
community.docker.docker_swarm:
|
||||
state: present
|
||||
election_tick: 5
|
||||
|
||||
- name: Add nodes
|
||||
community.docker.docker_swarm:
|
||||
state: join
|
||||
advertise_addr: 192.168.1.2
|
||||
join_token: SWMTKN-1--xxxxx
|
||||
remote_addrs: [ '192.168.1.1:2377' ]
|
||||
|
||||
- name: Leave swarm for a node
|
||||
community.docker.docker_swarm:
|
||||
state: absent
|
||||
|
||||
- name: Remove a swarm manager
|
||||
community.docker.docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
|
||||
- name: Remove node from swarm
|
||||
community.docker.docker_swarm:
|
||||
state: remove
|
||||
node_id: mynode
|
||||
|
||||
- name: Init a new swarm with different data path interface
|
||||
community.docker.docker_swarm:
|
||||
state: present
|
||||
advertise_addr: eth0
|
||||
data_path_addr: ens10
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
swarm_facts:
|
||||
description: Informations about swarm.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
JoinTokens:
|
||||
description: Tokens to connect to the Swarm.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
Worker:
|
||||
description:
|
||||
- Token to join the cluster as a new *worker* node.
|
||||
- "B(Note:) if this value has been specified as I(join_token), the value here will not
|
||||
be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass I(join_token),
|
||||
make sure your playbook/role does not depend on this return value!"
|
||||
returned: success
|
||||
type: str
|
||||
example: SWMTKN-1--xxxxx
|
||||
Manager:
|
||||
description:
|
||||
- Token to join the cluster as a new *manager* node.
|
||||
- "B(Note:) if this value has been specified as I(join_token), the value here will not
|
||||
be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass I(join_token),
|
||||
make sure your playbook/role does not depend on this return value!"
|
||||
returned: success
|
||||
type: str
|
||||
example: SWMTKN-1--xxxxx
|
||||
UnlockKey:
|
||||
description: The swarm unlock-key if I(autolock_managers) is C(true).
|
||||
returned: on success if I(autolock_managers) is C(true)
|
||||
and swarm is initialised, or if I(autolock_managers) has changed.
|
||||
type: str
|
||||
example: SWMKEY-1-xxx
|
||||
|
||||
actions:
|
||||
description: Provides the actions done on the swarm.
|
||||
returned: when action failed.
|
||||
type: list
|
||||
elements: str
|
||||
example: "['This cluster is already a swarm cluster']"
|
||||
|
||||
'''
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
DockerBaseClass,
|
||||
DifferenceTracker,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self):
|
||||
super(TaskParameters, self).__init__()
|
||||
|
||||
self.advertise_addr = None
|
||||
self.listen_addr = None
|
||||
self.remote_addrs = None
|
||||
self.join_token = None
|
||||
self.data_path_addr = None
|
||||
|
||||
# Spec
|
||||
self.snapshot_interval = None
|
||||
self.task_history_retention_limit = None
|
||||
self.keep_old_snapshots = None
|
||||
self.log_entries_for_slow_followers = None
|
||||
self.heartbeat_tick = None
|
||||
self.election_tick = None
|
||||
self.dispatcher_heartbeat_period = None
|
||||
self.node_cert_expiry = None
|
||||
self.name = None
|
||||
self.labels = None
|
||||
self.log_driver = None
|
||||
self.signing_ca_cert = None
|
||||
self.signing_ca_key = None
|
||||
self.ca_force_rotate = None
|
||||
self.autolock_managers = None
|
||||
self.rotate_worker_token = None
|
||||
self.rotate_manager_token = None
|
||||
self.default_addr_pool = None
|
||||
self.subnet_size = None
|
||||
|
||||
@staticmethod
|
||||
def from_ansible_params(client):
|
||||
result = TaskParameters()
|
||||
for key, value in client.module.params.items():
|
||||
if key in result.__dict__:
|
||||
setattr(result, key, value)
|
||||
|
||||
result.update_parameters(client)
|
||||
return result
|
||||
|
||||
def update_from_swarm_info(self, swarm_info):
|
||||
spec = swarm_info['Spec']
|
||||
|
||||
ca_config = spec.get('CAConfig') or dict()
|
||||
if self.node_cert_expiry is None:
|
||||
self.node_cert_expiry = ca_config.get('NodeCertExpiry')
|
||||
if self.ca_force_rotate is None:
|
||||
self.ca_force_rotate = ca_config.get('ForceRotate')
|
||||
|
||||
dispatcher = spec.get('Dispatcher') or dict()
|
||||
if self.dispatcher_heartbeat_period is None:
|
||||
self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
|
||||
|
||||
raft = spec.get('Raft') or dict()
|
||||
if self.snapshot_interval is None:
|
||||
self.snapshot_interval = raft.get('SnapshotInterval')
|
||||
if self.keep_old_snapshots is None:
|
||||
self.keep_old_snapshots = raft.get('KeepOldSnapshots')
|
||||
if self.heartbeat_tick is None:
|
||||
self.heartbeat_tick = raft.get('HeartbeatTick')
|
||||
if self.log_entries_for_slow_followers is None:
|
||||
self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
|
||||
if self.election_tick is None:
|
||||
self.election_tick = raft.get('ElectionTick')
|
||||
|
||||
orchestration = spec.get('Orchestration') or dict()
|
||||
if self.task_history_retention_limit is None:
|
||||
self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
|
||||
|
||||
encryption_config = spec.get('EncryptionConfig') or dict()
|
||||
if self.autolock_managers is None:
|
||||
self.autolock_managers = encryption_config.get('AutoLockManagers')
|
||||
|
||||
if self.name is None:
|
||||
self.name = spec['Name']
|
||||
|
||||
if self.labels is None:
|
||||
self.labels = spec.get('Labels') or {}
|
||||
|
||||
if 'LogDriver' in spec['TaskDefaults']:
|
||||
self.log_driver = spec['TaskDefaults']['LogDriver']
|
||||
|
||||
def update_parameters(self, client):
|
||||
assign = dict(
|
||||
snapshot_interval='snapshot_interval',
|
||||
task_history_retention_limit='task_history_retention_limit',
|
||||
keep_old_snapshots='keep_old_snapshots',
|
||||
log_entries_for_slow_followers='log_entries_for_slow_followers',
|
||||
heartbeat_tick='heartbeat_tick',
|
||||
election_tick='election_tick',
|
||||
dispatcher_heartbeat_period='dispatcher_heartbeat_period',
|
||||
node_cert_expiry='node_cert_expiry',
|
||||
name='name',
|
||||
labels='labels',
|
||||
signing_ca_cert='signing_ca_cert',
|
||||
signing_ca_key='signing_ca_key',
|
||||
ca_force_rotate='ca_force_rotate',
|
||||
autolock_managers='autolock_managers',
|
||||
log_driver='log_driver',
|
||||
)
|
||||
params = dict()
|
||||
for dest, source in assign.items():
|
||||
if not client.option_minimal_versions[source]['supported']:
|
||||
continue
|
||||
value = getattr(self, source)
|
||||
if value is not None:
|
||||
params[dest] = value
|
||||
self.spec = client.create_swarm_spec(**params)
|
||||
|
||||
def compare_to_active(self, other, client, differences):
|
||||
for k in self.__dict__:
|
||||
if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
|
||||
'rotate_worker_token', 'rotate_manager_token', 'spec',
|
||||
'default_addr_pool', 'subnet_size', 'data_path_addr'):
|
||||
continue
|
||||
if not client.option_minimal_versions[k]['supported']:
|
||||
continue
|
||||
value = getattr(self, k)
|
||||
if value is None:
|
||||
continue
|
||||
other_value = getattr(other, k)
|
||||
if value != other_value:
|
||||
differences.add(k, parameter=value, active=other_value)
|
||||
if self.rotate_worker_token:
|
||||
differences.add('rotate_worker_token', parameter=True, active=False)
|
||||
if self.rotate_manager_token:
|
||||
differences.add('rotate_manager_token', parameter=True, active=False)
|
||||
return differences
|
||||
|
||||
|
||||
class SwarmManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(SwarmManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
self.swarm_info = {}
|
||||
|
||||
self.state = client.module.params['state']
|
||||
self.force = client.module.params['force']
|
||||
self.node_id = client.module.params['node_id']
|
||||
|
||||
self.differences = DifferenceTracker()
|
||||
self.parameters = TaskParameters.from_ansible_params(client)
|
||||
|
||||
self.created = False
|
||||
|
||||
def __call__(self):
|
||||
choice_map = {
|
||||
"present": self.init_swarm,
|
||||
"join": self.join,
|
||||
"absent": self.leave,
|
||||
"remove": self.remove,
|
||||
}
|
||||
|
||||
choice_map.get(self.state)()
|
||||
|
||||
if self.client.module._diff or self.parameters.debug:
|
||||
diff = dict()
|
||||
diff['before'], diff['after'] = self.differences.get_before_after()
|
||||
self.results['diff'] = diff
|
||||
|
||||
def inspect_swarm(self):
|
||||
try:
|
||||
data = self.client.inspect_swarm()
|
||||
json_str = json.dumps(data, ensure_ascii=False)
|
||||
self.swarm_info = json.loads(json_str)
|
||||
|
||||
self.results['changed'] = False
|
||||
self.results['swarm_facts'] = self.swarm_info
|
||||
|
||||
unlock_key = self.get_unlock_key()
|
||||
self.swarm_info.update(unlock_key)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
def get_unlock_key(self):
|
||||
default = {'UnlockKey': None}
|
||||
if not self.has_swarm_lock_changed():
|
||||
return default
|
||||
try:
|
||||
return self.client.get_unlock_key() or default
|
||||
except APIError:
|
||||
return default
|
||||
|
||||
def has_swarm_lock_changed(self):
|
||||
return self.parameters.autolock_managers and (
|
||||
self.created or self.differences.has_difference_for('autolock_managers')
|
||||
)
|
||||
|
||||
def init_swarm(self):
|
||||
if not self.force and self.client.check_if_swarm_manager():
|
||||
self.__update_swarm()
|
||||
return
|
||||
|
||||
if not self.check_mode:
|
||||
init_arguments = {
|
||||
'advertise_addr': self.parameters.advertise_addr,
|
||||
'listen_addr': self.parameters.listen_addr,
|
||||
'data_path_addr': self.parameters.data_path_addr,
|
||||
'force_new_cluster': self.force,
|
||||
'swarm_spec': self.parameters.spec,
|
||||
}
|
||||
if self.parameters.default_addr_pool is not None:
|
||||
init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
|
||||
if self.parameters.subnet_size is not None:
|
||||
init_arguments['subnet_size'] = self.parameters.subnet_size
|
||||
try:
|
||||
self.client.init_swarm(**init_arguments)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
|
||||
|
||||
if not self.client.check_if_swarm_manager():
|
||||
if not self.check_mode:
|
||||
self.client.fail("Swarm not created or other error!")
|
||||
|
||||
self.created = True
|
||||
self.inspect_swarm()
|
||||
self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
|
||||
self.differences.add('state', parameter='present', active='absent')
|
||||
self.results['changed'] = True
|
||||
self.results['swarm_facts'] = {
|
||||
'JoinTokens': self.swarm_info.get('JoinTokens'),
|
||||
'UnlockKey': self.swarm_info.get('UnlockKey')
|
||||
}
|
||||
|
||||
def __update_swarm(self):
|
||||
try:
|
||||
self.inspect_swarm()
|
||||
version = self.swarm_info['Version']['Index']
|
||||
self.parameters.update_from_swarm_info(self.swarm_info)
|
||||
old_parameters = TaskParameters()
|
||||
old_parameters.update_from_swarm_info(self.swarm_info)
|
||||
self.parameters.compare_to_active(old_parameters, self.client, self.differences)
|
||||
if self.differences.empty:
|
||||
self.results['actions'].append("No modification")
|
||||
self.results['changed'] = False
|
||||
return
|
||||
update_parameters = TaskParameters.from_ansible_params(self.client)
|
||||
update_parameters.update_parameters(self.client)
|
||||
if not self.check_mode:
|
||||
self.client.update_swarm(
|
||||
version=version, swarm_spec=update_parameters.spec,
|
||||
rotate_worker_token=self.parameters.rotate_worker_token,
|
||||
rotate_manager_token=self.parameters.rotate_manager_token)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
|
||||
return
|
||||
|
||||
self.inspect_swarm()
|
||||
self.results['actions'].append("Swarm cluster updated")
|
||||
self.results['changed'] = True
|
||||
|
||||
def join(self):
|
||||
if self.client.check_if_swarm_node():
|
||||
self.results['actions'].append("This node is already part of a swarm.")
|
||||
return
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.join_swarm(
|
||||
remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
|
||||
listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr,
|
||||
data_path_addr=self.parameters.data_path_addr)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("New node is added to swarm cluster")
|
||||
self.differences.add('joined', parameter=True, active=False)
|
||||
self.results['changed'] = True
|
||||
|
||||
def leave(self):
|
||||
if not self.client.check_if_swarm_node():
|
||||
self.results['actions'].append("This node is not part of a swarm.")
|
||||
return
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.leave_swarm(force=self.force)
|
||||
except APIError as exc:
|
||||
self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("Node has left the swarm cluster")
|
||||
self.differences.add('joined', parameter='absent', active='present')
|
||||
self.results['changed'] = True
|
||||
|
||||
def remove(self):
|
||||
if not self.client.check_if_swarm_manager():
|
||||
self.client.fail("This node is not a manager.")
|
||||
|
||||
try:
|
||||
status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
if not status_down:
|
||||
self.client.fail("Can not remove the node. The status node is ready and not down.")
|
||||
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.remove_node(node_id=self.node_id, force=self.force)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("Node is removed from swarm cluster.")
|
||||
self.differences.add('joined', parameter=False, active=True)
|
||||
self.results['changed'] = True
|
||||
|
||||
|
||||
def _detect_remove_operation(client):
|
||||
return client.module.params['state'] == 'remove'
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
advertise_addr=dict(type='str'),
|
||||
data_path_addr=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove']),
|
||||
force=dict(type='bool', default=False),
|
||||
listen_addr=dict(type='str', default='0.0.0.0:2377'),
|
||||
remote_addrs=dict(type='list', elements='str'),
|
||||
join_token=dict(type='str', no_log=True),
|
||||
snapshot_interval=dict(type='int'),
|
||||
task_history_retention_limit=dict(type='int'),
|
||||
keep_old_snapshots=dict(type='int'),
|
||||
log_entries_for_slow_followers=dict(type='int'),
|
||||
heartbeat_tick=dict(type='int'),
|
||||
election_tick=dict(type='int'),
|
||||
dispatcher_heartbeat_period=dict(type='int'),
|
||||
node_cert_expiry=dict(type='int'),
|
||||
name=dict(type='str'),
|
||||
labels=dict(type='dict'),
|
||||
signing_ca_cert=dict(type='str'),
|
||||
signing_ca_key=dict(type='str', no_log=True),
|
||||
ca_force_rotate=dict(type='int'),
|
||||
autolock_managers=dict(type='bool'),
|
||||
node_id=dict(type='str'),
|
||||
rotate_worker_token=dict(type='bool', default=False),
|
||||
rotate_manager_token=dict(type='bool', default=False),
|
||||
default_addr_pool=dict(type='list', elements='str'),
|
||||
subnet_size=dict(type='int'),
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'join', ['remote_addrs', 'join_token']),
|
||||
('state', 'remove', ['node_id'])
|
||||
]
|
||||
|
||||
option_minimal_versions = dict(
|
||||
labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
|
||||
signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
autolock_managers=dict(docker_py_version='2.6.0'),
|
||||
log_driver=dict(docker_py_version='2.6.0'),
|
||||
remove_operation=dict(
|
||||
docker_py_version='2.4.0',
|
||||
detect_usage=_detect_remove_operation,
|
||||
usage_msg='remove swarm nodes'
|
||||
),
|
||||
default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
|
||||
subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
|
||||
data_path_addr=dict(docker_py_version='4.0.0', docker_api_version='1.30'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.25',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
result='',
|
||||
actions=[]
|
||||
)
|
||||
|
||||
SwarmManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,386 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_swarm_info
|
||||
|
||||
short_description: Retrieves facts about Docker Swarm cluster.
|
||||
|
||||
description:
|
||||
- Retrieves facts about a Docker Swarm.
|
||||
- Returns lists of swarm objects names for the services - nodes, services, tasks.
|
||||
- The output differs depending on API version available on docker host.
|
||||
- Must be run on Swarm Manager node; otherwise module fails with error message.
|
||||
It does return boolean flags in on both error and success which indicate whether
|
||||
the docker daemon can be communicated with, whether it is in Swarm mode, and
|
||||
whether it is a Swarm Manager node.
|
||||
|
||||
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
|
||||
options:
|
||||
nodes:
|
||||
description:
|
||||
- Whether to list swarm nodes.
|
||||
type: bool
|
||||
default: no
|
||||
nodes_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting nodes to list.
|
||||
- "For example, C(name: mynode)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
services:
|
||||
description:
|
||||
- Whether to list swarm services.
|
||||
type: bool
|
||||
default: no
|
||||
services_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting services to list.
|
||||
- "For example, C(name: myservice)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
tasks:
|
||||
description:
|
||||
- Whether to list containers.
|
||||
type: bool
|
||||
default: no
|
||||
tasks_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting tasks to list.
|
||||
- "For example, C(node: mynode-1)."
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering)
|
||||
for more information on possible filters.
|
||||
type: dict
|
||||
unlock_key:
|
||||
description:
|
||||
- Whether to retrieve the swarm unlock key.
|
||||
type: bool
|
||||
default: no
|
||||
verbose_output:
|
||||
description:
|
||||
- When set to C(yes) and I(nodes), I(services) or I(tasks) is set to C(yes), then the module output will
|
||||
contain verbose information about objects matching the full output of API method.
|
||||
- For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
|
||||
- The verbose output in this module contains only subset of information returned by I(_info) module
|
||||
for each type of the objects.
|
||||
type: bool
|
||||
default: no
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.24"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info on Docker Swarm
|
||||
community.docker.docker_swarm_info:
|
||||
ignore_errors: yes
|
||||
register: result
|
||||
|
||||
- name: Inform about basic flags
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
|
||||
Docker in Swarm mode: {{ result.docker_swarm_active }}
|
||||
This is a Manager node: {{ result.docker_swarm_manager }}
|
||||
|
||||
- block:
|
||||
|
||||
- name: Get info on Docker Swarm and list of registered nodes
|
||||
community.docker.docker_swarm_info:
|
||||
nodes: yes
|
||||
register: result
|
||||
|
||||
- name: Get info on Docker Swarm and extended list of registered nodes
|
||||
community.docker.docker_swarm_info:
|
||||
nodes: yes
|
||||
verbose_output: yes
|
||||
register: result
|
||||
|
||||
- name: Get info on Docker Swarm and filtered list of registered nodes
|
||||
community.docker.docker_swarm_info:
|
||||
nodes: yes
|
||||
nodes_filters:
|
||||
name: mynode
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
var: result.swarm_facts
|
||||
|
||||
- name: Get the swarm unlock key
|
||||
community.docker.docker_swarm_info:
|
||||
unlock_key: yes
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
var: result.swarm_unlock_key
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
can_talk_to_docker:
|
||||
description:
|
||||
- Will be C(true) if the module can talk to the docker daemon.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
docker_swarm_active:
|
||||
description:
|
||||
- Will be C(true) if the module can talk to the docker daemon,
|
||||
and the docker daemon is in Swarm mode.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
docker_swarm_manager:
|
||||
description:
|
||||
- Will be C(true) if the module can talk to the docker daemon,
|
||||
the docker daemon is in Swarm mode, and the current node is
|
||||
a manager node.
|
||||
- Only if this one is C(true), the module will not fail.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
swarm_facts:
|
||||
description:
|
||||
- Facts representing the basic state of the docker Swarm cluster.
|
||||
- Contains tokens to connect to the Swarm
|
||||
returned: always
|
||||
type: dict
|
||||
swarm_unlock_key:
|
||||
description:
|
||||
- Contains the key needed to unlock the swarm.
|
||||
returned: When I(unlock_key) is C(true).
|
||||
type: str
|
||||
nodes:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume.
|
||||
Keys matches the C(docker node ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(nodes) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
services:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume.
|
||||
Keys matches the C(docker service ls) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(services) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
tasks:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume.
|
||||
Keys matches the C(docker service ps) output unless I(verbose_output=yes).
|
||||
See description for I(verbose_output).
|
||||
returned: When I(tasks) is C(yes)
|
||||
type: list
|
||||
elements: dict
|
||||
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker_common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
DockerBaseClass,
|
||||
clean_dict_booleans_for_docker_api,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
class DockerSwarmManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(DockerSwarmManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.verbose_output = self.client.module.params['verbose_output']
|
||||
|
||||
listed_objects = ['tasks', 'services', 'nodes']
|
||||
|
||||
self.client.fail_task_if_not_swarm_manager()
|
||||
|
||||
self.results['swarm_facts'] = self.get_docker_swarm_facts()
|
||||
|
||||
for docker_object in listed_objects:
|
||||
if self.client.module.params[docker_object]:
|
||||
returned_name = docker_object
|
||||
filter_name = docker_object + "_filters"
|
||||
filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
|
||||
self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
|
||||
if self.client.module.params['unlock_key']:
|
||||
self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
|
||||
|
||||
def get_docker_swarm_facts(self):
|
||||
try:
|
||||
return self.client.inspect_swarm()
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
|
||||
|
||||
def get_docker_items_list(self, docker_object=None, filters=None):
|
||||
items = None
|
||||
items_list = []
|
||||
|
||||
try:
|
||||
if docker_object == 'nodes':
|
||||
items = self.client.nodes(filters=filters)
|
||||
elif docker_object == 'tasks':
|
||||
items = self.client.tasks(filters=filters)
|
||||
elif docker_object == 'services':
|
||||
items = self.client.services(filters=filters)
|
||||
except APIError as exc:
|
||||
self.client.fail("Error inspecting docker swarm for object '%s': %s" %
|
||||
(docker_object, to_native(exc)))
|
||||
|
||||
if self.verbose_output:
|
||||
return items
|
||||
|
||||
for item in items:
|
||||
item_record = dict()
|
||||
|
||||
if docker_object == 'nodes':
|
||||
item_record = self.get_essential_facts_nodes(item)
|
||||
elif docker_object == 'tasks':
|
||||
item_record = self.get_essential_facts_tasks(item)
|
||||
elif docker_object == 'services':
|
||||
item_record = self.get_essential_facts_services(item)
|
||||
if item_record['Mode'] == 'Global':
|
||||
item_record['Replicas'] = len(items)
|
||||
items_list.append(item_record)
|
||||
|
||||
return items_list
|
||||
|
||||
@staticmethod
|
||||
def get_essential_facts_nodes(item):
|
||||
object_essentials = dict()
|
||||
|
||||
object_essentials['ID'] = item.get('ID')
|
||||
object_essentials['Hostname'] = item['Description']['Hostname']
|
||||
object_essentials['Status'] = item['Status']['State']
|
||||
object_essentials['Availability'] = item['Spec']['Availability']
|
||||
if 'ManagerStatus' in item:
|
||||
object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
|
||||
if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
|
||||
object_essentials['ManagerStatus'] = "Leader"
|
||||
else:
|
||||
object_essentials['ManagerStatus'] = None
|
||||
object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
|
||||
|
||||
return object_essentials
|
||||
|
||||
def get_essential_facts_tasks(self, item):
|
||||
object_essentials = dict()
|
||||
|
||||
object_essentials['ID'] = item['ID']
|
||||
# Returning container ID to not trigger another connection to host
|
||||
# Container ID is sufficient to get extended info in other tasks
|
||||
object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
|
||||
object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
|
||||
object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
|
||||
object_essentials['DesiredState'] = item['DesiredState']
|
||||
object_essentials['CurrentState'] = item['Status']['State']
|
||||
if 'Err' in item['Status']:
|
||||
object_essentials['Error'] = item['Status']['Err']
|
||||
else:
|
||||
object_essentials['Error'] = None
|
||||
|
||||
return object_essentials
|
||||
|
||||
@staticmethod
|
||||
def get_essential_facts_services(item):
|
||||
object_essentials = dict()
|
||||
|
||||
object_essentials['ID'] = item['ID']
|
||||
object_essentials['Name'] = item['Spec']['Name']
|
||||
if 'Replicated' in item['Spec']['Mode']:
|
||||
object_essentials['Mode'] = "Replicated"
|
||||
object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
|
||||
elif 'Global' in item['Spec']['Mode']:
|
||||
object_essentials['Mode'] = "Global"
|
||||
# Number of replicas have to be updated in calling method or may be left as None
|
||||
object_essentials['Replicas'] = None
|
||||
object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
|
||||
if 'Ports' in item['Spec']['EndpointSpec']:
|
||||
object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
|
||||
else:
|
||||
object_essentials['Ports'] = []
|
||||
|
||||
return object_essentials
|
||||
|
||||
def get_docker_swarm_unlock_key(self):
|
||||
unlock_key = self.client.get_unlock_key() or {}
|
||||
return unlock_key.get('UnlockKey') or None
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
nodes=dict(type='bool', default=False),
|
||||
nodes_filters=dict(type='dict'),
|
||||
tasks=dict(type='bool', default=False),
|
||||
tasks_filters=dict(type='dict'),
|
||||
services=dict(type='bool', default=False),
|
||||
services_filters=dict(type='dict'),
|
||||
unlock_key=dict(type='bool', default=False),
|
||||
verbose_output=dict(type='bool', default=False),
|
||||
)
|
||||
option_minimal_versions = dict(
|
||||
unlock_key=dict(docker_py_version='2.7.0', docker_api_version='1.25'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.24',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
fail_results=dict(
|
||||
can_talk_to_docker=False,
|
||||
docker_swarm_active=False,
|
||||
docker_swarm_manager=False,
|
||||
),
|
||||
)
|
||||
client.fail_results['can_talk_to_docker'] = True
|
||||
client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
|
||||
client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
DockerSwarmManager(client, results)
|
||||
results.update(client.fail_results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,119 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_swarm_service_info
|
||||
|
||||
short_description: Retrieves information about docker services from a Swarm Manager
|
||||
|
||||
description:
|
||||
- Retrieves information about a docker service.
|
||||
- Essentially returns the output of C(docker service inspect <name>).
|
||||
- Must be executed on a host running as Swarm Manager, otherwise the module will fail.
|
||||
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the service to inspect.
|
||||
type: str
|
||||
required: yes
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Hannes Ljungberg (@hannseman)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
|
||||
- "Docker API >= 1.24"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info from a service
|
||||
community.docker.docker_swarm_service_info:
|
||||
name: myservice
|
||||
register: result
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the service exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
service:
|
||||
description:
|
||||
- A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
|
||||
- Will be C(none) if service does not exist.
|
||||
returned: always
|
||||
type: dict
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
RequestException,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
|
||||
|
||||
|
||||
def get_service_info(client):
|
||||
service = client.module.params['name']
|
||||
return client.get_service_inspect(
|
||||
service_id=service,
|
||||
skip_missing=True
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='2.0.0',
|
||||
min_docker_api_version='1.24',
|
||||
)
|
||||
|
||||
client.fail_task_if_not_swarm_manager()
|
||||
|
||||
try:
|
||||
service = get_service_info(client)
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
service=service,
|
||||
exists=bool(service)
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,312 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: docker_volume
|
||||
short_description: Manage Docker volumes
|
||||
description:
|
||||
- Create/remove Docker volumes.
|
||||
- Performs largely the same function as the C(docker volume) CLI subcommand.
|
||||
options:
|
||||
volume_name:
|
||||
description:
|
||||
- Name of the volume to operate on.
|
||||
type: str
|
||||
required: yes
|
||||
aliases:
|
||||
- name
|
||||
|
||||
driver:
|
||||
description:
|
||||
- Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
|
||||
type: str
|
||||
default: local
|
||||
|
||||
driver_options:
|
||||
description:
|
||||
- "Dictionary of volume settings. Consult docker docs for valid options and values:
|
||||
U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)."
|
||||
type: dict
|
||||
|
||||
labels:
|
||||
description:
|
||||
- Dictionary of label key/values to set for the volume
|
||||
type: dict
|
||||
|
||||
recreate:
|
||||
description:
|
||||
- Controls when a volume will be recreated when I(state) is C(present). Please
|
||||
note that recreating an existing volume will cause B(any data in the existing volume
|
||||
to be lost!) The volume will be deleted and a new volume with the same name will be
|
||||
created.
|
||||
- The value C(always) forces the volume to be always recreated.
|
||||
- The value C(never) makes sure the volume will not be recreated.
|
||||
- The value C(options-changed) makes sure the volume will be recreated if the volume
|
||||
already exist and the driver, driver options or labels differ.
|
||||
type: str
|
||||
default: never
|
||||
choices:
|
||||
- always
|
||||
- never
|
||||
- options-changed
|
||||
|
||||
state:
|
||||
description:
|
||||
- C(absent) deletes the volume.
|
||||
- C(present) creates the volume, if it does not already exist.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Alex Grönholm (@agronholm)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "The docker server >= 1.9.0"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a volume
|
||||
community.docker.docker_volume:
|
||||
name: volume_one
|
||||
|
||||
- name: Remove a volume
|
||||
community.docker.docker_volume:
|
||||
name: volume_one
|
||||
state: absent
|
||||
|
||||
- name: Create a volume with options
|
||||
community.docker.docker_volume:
|
||||
name: volume_two
|
||||
driver_options:
|
||||
type: btrfs
|
||||
device: /dev/sda2
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
volume:
|
||||
description:
|
||||
- Volume inspection results for the affected volume.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
DockerBaseClass,
|
||||
AnsibleDockerClient,
|
||||
DifferenceTracker,
|
||||
RequestException,
|
||||
)
|
||||
from ansible.module_utils.six import iteritems, text_type
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self, client):
|
||||
super(TaskParameters, self).__init__()
|
||||
self.client = client
|
||||
|
||||
self.volume_name = None
|
||||
self.driver = None
|
||||
self.driver_options = None
|
||||
self.labels = None
|
||||
self.recreate = None
|
||||
self.debug = None
|
||||
|
||||
for key, value in iteritems(client.module.params):
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
class DockerVolumeManager(object):
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
self.parameters = TaskParameters(client)
|
||||
self.check_mode = self.client.check_mode
|
||||
self.results = {
|
||||
u'changed': False,
|
||||
u'actions': []
|
||||
}
|
||||
self.diff = self.client.module._diff
|
||||
self.diff_tracker = DifferenceTracker()
|
||||
self.diff_result = dict()
|
||||
|
||||
self.existing_volume = self.get_existing_volume()
|
||||
|
||||
state = self.parameters.state
|
||||
if state == 'present':
|
||||
self.present()
|
||||
elif state == 'absent':
|
||||
self.absent()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
if self.diff:
|
||||
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
|
||||
self.results['diff'] = self.diff_result
|
||||
|
||||
def get_existing_volume(self):
|
||||
try:
|
||||
volumes = self.client.volumes()
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
|
||||
if volumes[u'Volumes'] is None:
|
||||
return None
|
||||
|
||||
for volume in volumes[u'Volumes']:
|
||||
if volume['Name'] == self.parameters.volume_name:
|
||||
return volume
|
||||
|
||||
return None
|
||||
|
||||
def has_different_config(self):
|
||||
"""
|
||||
Return the list of differences between the current parameters and the existing volume.
|
||||
|
||||
:return: list of options that differ
|
||||
"""
|
||||
differences = DifferenceTracker()
|
||||
if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
|
||||
differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
|
||||
if self.parameters.driver_options:
|
||||
if not self.existing_volume.get('Options'):
|
||||
differences.add('driver_options',
|
||||
parameter=self.parameters.driver_options,
|
||||
active=self.existing_volume.get('Options'))
|
||||
else:
|
||||
for key, value in iteritems(self.parameters.driver_options):
|
||||
if (not self.existing_volume['Options'].get(key) or
|
||||
value != self.existing_volume['Options'][key]):
|
||||
differences.add('driver_options.%s' % key,
|
||||
parameter=value,
|
||||
active=self.existing_volume['Options'].get(key))
|
||||
if self.parameters.labels:
|
||||
existing_labels = self.existing_volume.get('Labels', {})
|
||||
for label in self.parameters.labels:
|
||||
if existing_labels.get(label) != self.parameters.labels.get(label):
|
||||
differences.add('labels.%s' % label,
|
||||
parameter=self.parameters.labels.get(label),
|
||||
active=existing_labels.get(label))
|
||||
|
||||
return differences
|
||||
|
||||
def create_volume(self):
|
||||
if not self.existing_volume:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
params = dict(
|
||||
driver=self.parameters.driver,
|
||||
driver_opts=self.parameters.driver_options,
|
||||
)
|
||||
|
||||
if self.parameters.labels is not None:
|
||||
params['labels'] = self.parameters.labels
|
||||
|
||||
resp = self.client.create_volume(self.parameters.volume_name, **params)
|
||||
self.existing_volume = self.client.inspect_volume(resp['Name'])
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
|
||||
self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
|
||||
self.results['changed'] = True
|
||||
|
||||
def remove_volume(self):
|
||||
if self.existing_volume:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.remove_volume(self.parameters.volume_name)
|
||||
except APIError as e:
|
||||
self.client.fail(to_native(e))
|
||||
|
||||
self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
|
||||
self.results['changed'] = True
|
||||
|
||||
def present(self):
|
||||
differences = DifferenceTracker()
|
||||
if self.existing_volume:
|
||||
differences = self.has_different_config()
|
||||
|
||||
self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
|
||||
if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
|
||||
self.remove_volume()
|
||||
self.existing_volume = None
|
||||
|
||||
self.create_volume()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
|
||||
self.diff_tracker.merge(differences)
|
||||
|
||||
if not self.check_mode and not self.parameters.debug:
|
||||
self.results.pop('actions')
|
||||
|
||||
volume_facts = self.get_existing_volume()
|
||||
self.results['volume'] = volume_facts
|
||||
|
||||
def absent(self):
|
||||
self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
|
||||
self.remove_volume()
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
volume_name=dict(type='str', required=True, aliases=['name']),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
driver=dict(type='str', default='local'),
|
||||
driver_options=dict(type='dict', default={}),
|
||||
labels=dict(type='dict'),
|
||||
recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
|
||||
debug=dict(type='bool', default=False)
|
||||
)
|
||||
|
||||
option_minimal_versions = dict(
|
||||
labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.21',
|
||||
# "The docker server >= 1.9.0"
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
try:
|
||||
cm = DockerVolumeManager(client)
|
||||
client.module.exit_json(**cm.results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,132 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: docker_volume_info
|
||||
short_description: Retrieve facts about Docker volumes
|
||||
description:
|
||||
- Performs largely the same function as the C(docker volume inspect) CLI subcommand.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the volume to inspect.
|
||||
type: str
|
||||
required: yes
|
||||
aliases:
|
||||
- volume_name
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker
|
||||
- community.docker.docker.docker_py_1_documentation
|
||||
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- "Docker API >= 1.21"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get infos on volume
|
||||
community.docker.docker_volume_info:
|
||||
name: mydata
|
||||
register: result
|
||||
|
||||
- name: Does volume exist?
|
||||
ansible.builtin.debug:
|
||||
msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
|
||||
|
||||
- name: Print information about volume
|
||||
ansible.builtin.debug:
|
||||
var: result.volume
|
||||
when: result.exists
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the volume exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
volume:
|
||||
description:
|
||||
- Volume inspection results for the affected volume.
|
||||
- Will be C(none) if volume does not exist.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{
|
||||
"CreatedAt": "2018-12-09T17:43:44+01:00",
|
||||
"Driver": "local",
|
||||
"Labels": null,
|
||||
"Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
|
||||
"Name": "ansible-test-bd3f6172",
|
||||
"Options": {},
|
||||
"Scope": "local"
|
||||
}'
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, NotFound
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
def get_existing_volume(client, volume_name):
|
||||
try:
|
||||
return client.inspect_volume(volume_name)
|
||||
except NotFound as dummy:
|
||||
return None
|
||||
except Exception as exc:
|
||||
client.fail("Error inspecting volume: %s" % to_native(exc))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(type='str', required=True, aliases=['volume_name']),
|
||||
)
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version='1.8.0',
|
||||
min_docker_api_version='1.21',
|
||||
)
|
||||
|
||||
try:
|
||||
volume = get_existing_volume(client, client.module.params['name'])
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
exists=(True if volume else False),
|
||||
volume=volume,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,37 @@
|
||||
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible.errors import AnsibleConnectionFailure
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import (
|
||||
AnsibleDockerClientBase,
|
||||
DOCKER_COMMON_ARGS,
|
||||
)
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(self, plugin, min_docker_version=None, min_docker_api_version=None):
|
||||
self.plugin = plugin
|
||||
self.display = Display()
|
||||
super(AnsibleDockerClient, self).__init__(
|
||||
min_docker_version=min_docker_version,
|
||||
min_docker_api_version=min_docker_api_version)
|
||||
|
||||
def fail(self, msg, **kwargs):
|
||||
if kwargs:
|
||||
msg += '\nContext:\n' + '\n'.join(' {0} = {1!r}'.format(k, v) for (k, v) in kwargs.items())
|
||||
raise AnsibleConnectionFailure(msg)
|
||||
|
||||
def deprecate(self, msg, version=None, date=None, collection_name=None):
|
||||
self.display.deprecated(msg, version=version, date=date, collection_name=collection_name)
|
||||
|
||||
def _get_params(self):
|
||||
return dict([
|
||||
(option, self.plugin.get_option(option))
|
||||
for option in DOCKER_COMMON_ARGS
|
||||
])
|
||||
@@ -0,0 +1,17 @@
|
||||
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible.compat import selectors
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.socket_handler import (
|
||||
DockerSocketHandlerBase,
|
||||
)
|
||||
|
||||
|
||||
class DockerSocketHandler(DockerSocketHandlerBase):
|
||||
def __init__(self, display, sock, log=None, container=None):
|
||||
super(DockerSocketHandler, self).__init__(sock, selectors, log=lambda msg: display.vvvv(msg, host=container))
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
# See template for more information:
|
||||
# https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/config/config.yml
|
||||
modules:
|
||||
python_requires: default
|
||||
@@ -0,0 +1,15 @@
|
||||
- hosts: localhost
|
||||
vars:
|
||||
docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8
|
||||
tasks:
|
||||
- name: Find all roles
|
||||
find:
|
||||
paths:
|
||||
- "{{ (playbook_dir | default('.')) ~ '/roles' }}"
|
||||
file_type: directory
|
||||
depth: 1
|
||||
register: result
|
||||
- name: Include all roles
|
||||
include_role:
|
||||
name: "{{ item }}"
|
||||
loop: "{{ result.files | map(attribute='path') | map('regex_replace', '.*/', '') | sort }}"
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Retrieve information on current container
|
||||
community.docker.current_container_facts:
|
||||
register: result
|
||||
|
||||
# The following two tasks are useful if we ever have to debug why this fails.
|
||||
|
||||
- name: Print all Ansible facts
|
||||
debug:
|
||||
var: ansible_facts
|
||||
|
||||
- name: Read some files
|
||||
slurp:
|
||||
src: "{{ item }}"
|
||||
loop:
|
||||
- /proc/self/cpuset
|
||||
- /proc/1/cgroup
|
||||
- /proc/1/environ
|
||||
|
||||
- name: Print facts returned by module
|
||||
debug:
|
||||
var: result.ansible_facts
|
||||
|
||||
- name: Validate results
|
||||
assert:
|
||||
that:
|
||||
- ansible_module_running_in_container
|
||||
- ansible_module_container_type != ''
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
# Create random name prefix (for containers, networks, ...)
|
||||
- name: Create random container name prefix
|
||||
set_fact:
|
||||
cname_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
|
||||
|
||||
- name: Create project and container names
|
||||
set_fact:
|
||||
pname: "{{ cname_prefix }}"
|
||||
cname: "{{ cname_prefix }}-hi"
|
||||
|
||||
- name: Define service
|
||||
set_fact:
|
||||
test_service: |
|
||||
version: '3'
|
||||
services:
|
||||
{{ cname }}:
|
||||
image: "{{ docker_test_image_alpine }}"
|
||||
command: '/bin/sh -c "sleep 10m"'
|
||||
stop_grace_period: 1s
|
||||
|
||||
- name: Present
|
||||
community.docker.docker_compose:
|
||||
project_name: "{{ pname }}"
|
||||
state: present
|
||||
remove_orphans: true
|
||||
definition: "{{ test_service | from_yaml }}"
|
||||
|
||||
- name: Absent
|
||||
community.docker.docker_compose:
|
||||
project_name: "{{ pname }}"
|
||||
state: absent
|
||||
remove_orphans: true
|
||||
definition: "{{ test_service | from_yaml }}"
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
# Create random name prefix (for containers, networks, ...)
|
||||
- name: Create random container name prefix
|
||||
set_fact:
|
||||
cname_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
|
||||
|
||||
- name: Make sure image is absent
|
||||
community.docker.docker_image:
|
||||
name: "{{ docker_test_image_alpine }}"
|
||||
state: absent
|
||||
|
||||
- name: Make sure image is pulled
|
||||
community.docker.docker_image:
|
||||
name: "{{ docker_test_image_alpine }}"
|
||||
source: pull
|
||||
|
||||
- name: Start container
|
||||
community.docker.docker_container:
|
||||
name: "{{ cname_prefix }}-1"
|
||||
image: "{{ docker_test_image_alpine }}"
|
||||
state: started
|
||||
|
||||
- name: Remove container
|
||||
community.docker.docker_container:
|
||||
name: "{{ cname_prefix }}-1"
|
||||
state: absent
|
||||
stop_timeout: 1
|
||||
force_kill: yes
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
# Currently the docker_stack* modules are not supported in the EE since we'd need to install the Docker CLI client
|
||||
@@ -0,0 +1 @@
|
||||
hidden
|
||||
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eux
|
||||
|
||||
[ -f "${INVENTORY}" ]
|
||||
|
||||
# Run connection tests with both the default and C locale.
|
||||
|
||||
ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
|
||||
LC_ALL=C LANG=C ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
|
||||
@@ -0,0 +1,43 @@
|
||||
- hosts: "{{ target_hosts }}"
|
||||
gather_facts: no
|
||||
serial: 1
|
||||
tasks:
|
||||
|
||||
### raw with unicode arg and output
|
||||
|
||||
- name: raw with unicode arg and output
|
||||
raw: echo 汉语
|
||||
register: command
|
||||
- name: check output of raw with unicode arg and output
|
||||
assert:
|
||||
that:
|
||||
- "'汉语' in command.stdout"
|
||||
- command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules
|
||||
|
||||
### copy local file with unicode filename and content
|
||||
|
||||
- name: create local file with unicode filename and content
|
||||
local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语
|
||||
- name: remove remote file with unicode filename and content
|
||||
action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent"
|
||||
- name: create remote directory with unicode name
|
||||
action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory"
|
||||
- name: copy local file with unicode filename and content
|
||||
action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt"
|
||||
|
||||
### fetch remote file with unicode filename and content
|
||||
|
||||
- name: remove local file with unicode filename and content
|
||||
local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent
|
||||
- name: fetch remote file with unicode filename and content
|
||||
fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true
|
||||
|
||||
### remove local and remote temp files
|
||||
|
||||
- name: remove local temp file
|
||||
local_action: file path={{ local_tmp }}-汉语 state=absent
|
||||
- name: remove remote temp file
|
||||
action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent"
|
||||
|
||||
### test wait_for_connection plugin
|
||||
- ansible.builtin.wait_for_connection:
|
||||
@@ -0,0 +1,3 @@
|
||||
shippable/posix/group4
|
||||
skip/docker # coverage does not work if we're inside a docker container, since we cannot access this container's /tmp dir from the new container
|
||||
destructive
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eux
|
||||
|
||||
# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
|
||||
# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
|
||||
|
||||
PYTHON="$(command -v python3 python | head -n1)"
|
||||
|
||||
group=$(${PYTHON} -c \
|
||||
"from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
|
||||
|
||||
cd ../connection
|
||||
|
||||
INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
|
||||
-e target_hosts="${group}" \
|
||||
-e action_prefix= \
|
||||
-e local_tmp=/tmp/ansible-local \
|
||||
-e remote_tmp=/tmp/ansible-remote \
|
||||
"$@"
|
||||
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# If you use another image, you possibly also need to adjust
|
||||
# ansible_python_interpreter in test_connection.inventory.
|
||||
source ../setup_docker/vars/main.env
|
||||
IMAGE="${DOCKER_TEST_IMAGE_PYTHON3}"
|
||||
|
||||
# Setup phase
|
||||
|
||||
echo "Setup"
|
||||
ANSIBLE_ROLES_PATH=.. ansible-playbook setup.yml
|
||||
|
||||
# If docker wasn't installed, don't run the tests
|
||||
if [ "$(command -v docker)" == "" ]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
|
||||
# Test phase
|
||||
|
||||
CONTAINER_SUFFIX=-${RANDOM}
|
||||
|
||||
DOCKER_CONTAINERS="docker-connection-test-container${CONTAINER_SUFFIX}"
|
||||
|
||||
[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cleanup() {
|
||||
echo "Cleanup"
|
||||
docker rm -f ${DOCKER_CONTAINERS}
|
||||
echo "Shutdown"
|
||||
ANSIBLE_ROLES_PATH=.. ansible-playbook shutdown.yml
|
||||
echo "Done"
|
||||
}
|
||||
|
||||
trap cleanup INT TERM EXIT
|
||||
|
||||
echo "Start containers"
|
||||
for CONTAINER in ${DOCKER_CONTAINERS}; do
|
||||
if [ "${ANSIBLE_TEST_COVERAGE:-}" == "" ]; then
|
||||
docker run --rm --name ${CONTAINER} --detach "${IMAGE}" /bin/sh -c 'sleep 10m'
|
||||
else
|
||||
docker run --rm --name ${CONTAINER} --detach -v /tmp:/tmp "${IMAGE}" /bin/sh -c 'sleep 10m'
|
||||
docker exec ${CONTAINER} pip3 install coverage
|
||||
fi
|
||||
echo ${CONTAINER}
|
||||
done
|
||||
|
||||
cat > test_connection.inventory << EOF
|
||||
[docker]
|
||||
docker-no-pipelining ansible_pipelining=false
|
||||
docker-pipelining ansible_pipelining=true
|
||||
|
||||
[docker:vars]
|
||||
ansible_host=docker-connection-test-container${CONTAINER_SUFFIX}
|
||||
ansible_connection=community.docker.docker
|
||||
ansible_python_interpreter=/usr/local/bin/python3
|
||||
EOF
|
||||
|
||||
echo "Run tests"
|
||||
./runme-connection.sh "$@"
|
||||
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
vars:
|
||||
docker_skip_cleanup: yes
|
||||
|
||||
tasks:
|
||||
- name: Setup docker
|
||||
import_role:
|
||||
name: setup_docker
|
||||
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
vars:
|
||||
docker_skip_cleanup: yes
|
||||
|
||||
tasks:
|
||||
- name: Remove docker packages
|
||||
action: "{{ ansible_facts.pkg_mgr }}"
|
||||
args:
|
||||
name:
|
||||
- docker
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
state: absent
|
||||
@@ -0,0 +1,3 @@
|
||||
shippable/posix/group4
|
||||
skip/docker # coverage does not work if we're inside a docker container, since we cannot access this container's /tmp dir from the new container
|
||||
destructive
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eux
|
||||
|
||||
# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
|
||||
# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
|
||||
|
||||
PYTHON="$(command -v python3 python | head -n1)"
|
||||
|
||||
group=$(${PYTHON} -c \
|
||||
"from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
|
||||
|
||||
cd ../connection
|
||||
|
||||
INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
|
||||
-e target_hosts="${group}" \
|
||||
-e action_prefix= \
|
||||
-e local_tmp=/tmp/ansible-local \
|
||||
-e remote_tmp=/tmp/ansible-remote \
|
||||
"$@"
|
||||
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# If you use another image, you possibly also need to adjust
|
||||
# ansible_python_interpreter in test_connection.inventory.
|
||||
source ../setup_docker/vars/main.env
|
||||
IMAGE="${DOCKER_TEST_IMAGE_PYTHON3}"
|
||||
|
||||
# Setup phase
|
||||
|
||||
echo "Setup"
|
||||
ANSIBLE_ROLES_PATH=.. ansible-playbook setup.yml
|
||||
|
||||
# If docker wasn't installed, don't run the tests
|
||||
if [ "$(command -v docker)" == "" ]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
|
||||
# Test phase
|
||||
|
||||
CONTAINER_SUFFIX=-${RANDOM}
|
||||
|
||||
DOCKER_CONTAINERS="docker-connection-test-container${CONTAINER_SUFFIX}"
|
||||
|
||||
[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cleanup() {
|
||||
echo "Cleanup"
|
||||
docker rm -f ${DOCKER_CONTAINERS}
|
||||
echo "Shutdown"
|
||||
ANSIBLE_ROLES_PATH=.. ansible-playbook shutdown.yml
|
||||
echo "Done"
|
||||
}
|
||||
|
||||
trap cleanup INT TERM EXIT
|
||||
|
||||
echo "Start containers"
|
||||
for CONTAINER in ${DOCKER_CONTAINERS}; do
|
||||
if [ "${ANSIBLE_TEST_COVERAGE:-}" == "" ]; then
|
||||
docker run --rm --name ${CONTAINER} --detach "${IMAGE}" /bin/sh -c 'sleep 10m'
|
||||
else
|
||||
docker run --rm --name ${CONTAINER} --detach -v /tmp:/tmp "${IMAGE}" /bin/sh -c 'sleep 10m'
|
||||
docker exec ${CONTAINER} pip3 install coverage
|
||||
fi
|
||||
echo ${CONTAINER}
|
||||
done
|
||||
|
||||
cat > test_connection.inventory << EOF
|
||||
[docker_api]
|
||||
docker_api-no-pipelining ansible_pipelining=false
|
||||
docker_api-pipelining ansible_pipelining=true
|
||||
|
||||
[docker_api:vars]
|
||||
ansible_host=docker-connection-test-container${CONTAINER_SUFFIX}
|
||||
ansible_connection=community.docker.docker_api
|
||||
ansible_python_interpreter=/usr/local/bin/python3
|
||||
EOF
|
||||
|
||||
echo "Run tests"
|
||||
./runme-connection.sh "$@"
|
||||
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
vars:
|
||||
docker_skip_cleanup: yes
|
||||
|
||||
tasks:
|
||||
- name: Setup docker
|
||||
import_role:
|
||||
name: setup_docker
|
||||
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
vars:
|
||||
docker_skip_cleanup: yes
|
||||
|
||||
tasks:
|
||||
- name: Remove docker packages
|
||||
action: "{{ ansible_facts.pkg_mgr }}"
|
||||
args:
|
||||
name:
|
||||
- docker
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
state: absent
|
||||
@@ -0,0 +1,4 @@
|
||||
shippable/posix/group5
|
||||
skip/docker # this requires unfettered access to the container host
|
||||
skip/rhel7.9 # nsenter does not work out of the box
|
||||
destructive
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- setup_docker
|
||||
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eux
|
||||
|
||||
# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
|
||||
# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
|
||||
|
||||
PYTHON="$(command -v python3 python | head -n1)"
|
||||
|
||||
group=$(${PYTHON} -c \
|
||||
"from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
|
||||
|
||||
cd ../connection
|
||||
|
||||
INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
|
||||
-e target_hosts="${group}" \
|
||||
-e action_prefix= \
|
||||
-e local_tmp=/tmp/ansible-local \
|
||||
-e remote_tmp=/tmp/ansible-remote \
|
||||
"$@"
|
||||
@@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
[[ -n "${DEBUG:-}" || -n "${ANSIBLE_DEBUG:-}" ]] && set -x
|
||||
|
||||
readonly IMAGE="quay.io/ansible/ansible-runner:devel"
|
||||
readonly PYTHON="$(command -v python3 python | head -n1)"
|
||||
|
||||
# Determine collection root
|
||||
COLLECTION_ROOT=./
|
||||
while true; do
|
||||
if [ -e ${COLLECTION_ROOT}galaxy.yml ] || [ -e ${COLLECTION_ROOT}MANIFEST.json ]; then
|
||||
break
|
||||
fi
|
||||
COLLECTION_ROOT="${COLLECTION_ROOT}../"
|
||||
done
|
||||
readonly COLLECTION_ROOT="$(cd ${COLLECTION_ROOT} ; pwd)"
|
||||
|
||||
# Setup phase
|
||||
echo "Setup"
|
||||
ANSIBLE_ROLES_PATH=.. ansible-playbook setup.yml
|
||||
|
||||
# If docker wasn't installed, don't run the tests
|
||||
if [ "$(command -v docker)" == "" ]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
cleanup() {
|
||||
echo "Cleanup"
|
||||
echo "Shutdown"
|
||||
ANSIBLE_ROLES_PATH=.. ansible-playbook shutdown.yml
|
||||
echo "Done"
|
||||
}
|
||||
|
||||
envs=(--env "HOME=${HOME:-}")
|
||||
while IFS=$'\0' read -d '' -r line; do
|
||||
key="$(echo "$line" | cut -d= -f1)"
|
||||
value="$(echo "$line" | cut -d= -f2-)"
|
||||
if [[ "${key}" =~ ^(ANSIBLE_|JUNIT_OUTPUT_DIR$|OUTPUT_DIR$|PYTHONPATH$) ]]; then
|
||||
envs+=(--env "${key}=${value}")
|
||||
fi
|
||||
done < <(printenv -0)
|
||||
|
||||
# Test phase
|
||||
cat > test_connection.inventory << EOF
|
||||
[nsenter]
|
||||
nsenter-no-pipelining ansible_pipelining=false
|
||||
nsenter-pipelining ansible_pipelining=true
|
||||
|
||||
[nsenter:vars]
|
||||
ansible_host=localhost
|
||||
ansible_connection=community.docker.nsenter
|
||||
ansible_host_volume_mount=/host
|
||||
ansible_nsenter_pid=1
|
||||
ansible_python_interpreter=${PYTHON}
|
||||
EOF
|
||||
|
||||
echo "Run tests"
|
||||
set -x
|
||||
docker run \
|
||||
-i \
|
||||
--rm \
|
||||
--privileged \
|
||||
--pid host \
|
||||
"${envs[@]}" \
|
||||
--volume "${COLLECTION_ROOT}:${COLLECTION_ROOT}" \
|
||||
--workdir "$(pwd)" \
|
||||
"${IMAGE}" \
|
||||
./runme-connection.sh "$@"
|
||||
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
vars:
|
||||
docker_skip_cleanup: yes
|
||||
|
||||
tasks:
|
||||
- name: Setup docker
|
||||
import_role:
|
||||
name: setup_docker
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user