From 200d29c4e78d18d66725157a1c67d916b6762260 Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Sun, 17 Nov 2019 06:22:40 +0000 Subject: [PATCH 1/7] [ansible]: use native docker module from ansible - remove docker-py 1.7.2 - remove library/docker.py - use docker-login and docker-container module from ansible - adapt to docker 4.0.2 in vm_topology.py requires ansible 2.8.7 --- ansible/library/docker.py | 1775 ------------------ ansible/roles/vm_set/library/vm_topology.py | 8 +- ansible/roles/vm_set/tasks/add_topo.yml | 18 +- ansible/roles/vm_set/tasks/docker.yml | 5 - ansible/roles/vm_set/tasks/remove_topo.yml | 3 +- ansible/roles/vm_set/tasks/renumber_topo.yml | 30 +- 6 files changed, 35 insertions(+), 1804 deletions(-) delete mode 100644 ansible/library/docker.py diff --git a/ansible/library/docker.py b/ansible/library/docker.py deleted file mode 100644 index 545bbd592e6..00000000000 --- a/ansible/library/docker.py +++ /dev/null @@ -1,1775 +0,0 @@ -#!/usr/bin/python - -# (c) 2013, Cove Schneider -# (c) 2014, Joshua Conner -# (c) 2014, Pavel Antonov -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -DOCUMENTATION = ''' ---- -module: docker -version_added: "1.4" -short_description: manage docker containers -description: - - Manage the life cycle of docker containers. -options: - count: - description: - - Number of matching containers that should be in the desired state. - default: 1 - image: - description: - - Container image used to match and launch containers. - required: true - pull: - description: - - Control when container images are updated from the C(docker_url) registry. - If "missing," images will be pulled only when missing from the host; - if '"always," the registry will be checked for a newer version of the - image' each time the task executes. - default: missing - choices: [ "missing", "always" ] - version_added: "1.9" - command: - description: - - Command used to match and launch containers. - default: null - name: - description: - - Name used to match and uniquely name launched containers. Explicit names - are used to uniquely identify a single container or to link among - containers. Mutually exclusive with a "count" other than "1". - default: null - version_added: "1.5" - ports: - description: - - "List containing private to public port mapping specification. - Use docker 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' - where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface. - The container ports need to be exposed either in the Dockerfile or via the C(expose) option." - default: null - version_added: "1.5" - expose: - description: - - List of additional container ports to expose for port mappings or links. - If the port is already exposed using EXPOSE in a Dockerfile, you don't - need to expose it again. - default: null - version_added: "1.5" - publish_all_ports: - description: - - Publish all exposed ports to the host interfaces. - default: false - version_added: "1.5" - volumes: - description: - - List of volumes to mount within the container using docker CLI-style - - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' - default: null - volumes_from: - description: - - List of names of containers to mount volumes from. - default: null - links: - description: - - List of other containers to link within this container with an optional - - 'alias. Use docker CLI-style syntax: C(redis:myredis).' - default: null - version_added: "1.5" - log_driver: - description: - - You can specify a different logging driver for the container than for the daemon. - "json-file" Default logging driver for Docker. Writes JSON messages to file. - docker logs command is available only for this logging driver. - "none" disables any logging for the container. - "syslog" Syslog logging driver for Docker. Writes log messages to syslog. - docker logs command is not available for this logging driver. - "journald" Journald logging driver for Docker. Writes log messages to "journald". - "gelf" Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. - "fluentd" Fluentd logging driver for Docker. Writes log messages to "fluentd" (forward input). - If not defined explicitly, the Docker daemon's default ("json-file") will apply. - Requires docker >= 1.6.0. - required: false - default: json-file - choices: - - json-file - - none - - syslog - - journald - - gelf - - fluentd - version_added: "2.0" - log_opt: - description: - - Additional options to pass to the logging driver selected above. See Docker `log-driver - ` documentation for more information. - Requires docker >=1.7.0. - required: false - default: null - version_added: "2.0" - memory_limit: - description: - - RAM allocated to the container as a number of bytes or as a human-readable - string like "512MB". Leave as "0" to specify no limit. - default: 0 - docker_url: - description: - - URL of the host running the docker daemon. This will default to the env - var DOCKER_HOST if unspecified. - default: ${DOCKER_HOST} or unix://var/run/docker.sock - use_tls: - description: - - Whether to use tls to connect to the docker server. "no" means not to - use tls (and ignore any other tls related parameters). "encrypt" means - to use tls to encrypt the connection to the server. "verify" means to - also verify that the server's certificate is valid for the server - (this both verifies the certificate against the CA and that the - certificate was issued for that host. If this is unspecified, tls will - only be used if one of the other tls options require it. - choices: [ "no", "encrypt", "verify" ] - version_added: "1.9" - tls_client_cert: - description: - - Path to the PEM-encoded certificate used to authenticate docker client. - If specified tls_client_key must be valid - default: ${DOCKER_CERT_PATH}/cert.pem - version_added: "1.9" - tls_client_key: - description: - - Path to the PEM-encoded key used to authenticate docker client. If - specified tls_client_cert must be valid - default: ${DOCKER_CERT_PATH}/key.pem - version_added: "1.9" - tls_ca_cert: - description: - - Path to a PEM-encoded certificate authority to secure the Docker connection. - This has no effect if use_tls is encrypt. - default: ${DOCKER_CERT_PATH}/ca.pem - version_added: "1.9" - tls_hostname: - description: - - A hostname to check matches what's supplied in the docker server's - certificate. If unspecified, the hostname is taken from the docker_url. - default: Taken from docker_url - version_added: "1.9" - docker_api_version: - description: - - Remote API version to use. This defaults to the current default as - specified by docker-py. - default: docker-py default remote API version - version_added: "1.8" - docker_user: - description: - - Username or UID to use within the container - required: false - default: null - version_added: "2.0" - username: - description: - - Remote API username. - default: null - password: - description: - - Remote API password. - default: null - email: - description: - - Remote API email. - default: null - hostname: - description: - - Container hostname. - default: null - domainname: - description: - - Container domain name. - default: null - env: - description: - - Pass a dict of environment variables to the container. - default: null - dns: - description: - - List of custom DNS servers for the container. - required: false - default: null - detach: - description: - - Enable detached mode to leave the container running in background. If - disabled, fail unless the process exits cleanly. - default: true - signal: - version_added: "2.0" - description: - - With the state "killed", you can alter the signal sent to the - container. - required: false - default: KILL - state: - description: - - Assert the container's desired state. "present" only asserts that the - matching containers exist. "started" asserts that the matching - containers both exist and are running, but takes no action if any - configuration has changed. "reloaded" (added in Ansible 1.9) asserts that all matching - containers are running and restarts any that have any images or - configuration out of date. "restarted" unconditionally restarts (or - starts) the matching containers. "stopped" and '"killed" stop and kill - all matching containers. "absent" stops and then' removes any matching - containers. - required: false - default: started - choices: - - present - - started - - reloaded - - restarted - - stopped - - killed - - absent - privileged: - description: - - Whether the container should run in privileged mode or not. - default: false - lxc_conf: - description: - - LXC configuration parameters, such as C(lxc.aa_profile:unconfined). - default: null - stdin_open: - description: - - Keep stdin open after a container is launched. - default: false - version_added: "1.6" - tty: - description: - - Allocate a pseudo-tty within the container. - default: false - version_added: "1.6" - net: - description: - - 'Network mode for the launched container: bridge, none, container:' - - or host. Requires docker >= 0.11. - default: false - version_added: "1.8" - pid: - description: - - Set the PID namespace mode for the container (currently only supports 'host'). Requires docker-py >= 1.0.0 and docker >= 1.5.0 - required: false - default: None - aliases: [] - version_added: "1.9" - registry: - description: - - Remote registry URL to pull images from. - default: DockerHub - aliases: [] - version_added: "1.8" - read_only: - description: - - Mount the container's root filesystem as read only - default: null - aliases: [] - version_added: "2.0" - restart_policy: - description: - - Container restart policy. - choices: ["no", "on-failure", "always"] - default: null - version_added: "1.9" - restart_policy_retry: - description: - - Maximum number of times to restart a container. Leave as "0" for unlimited - retries. - default: 0 - version_added: "1.9" - extra_hosts: - version_added: "2.0" - description: - - Dict of custom host-to-IP mappings to be defined in the container - insecure_registry: - description: - - Use insecure private registry by HTTP instead of HTTPS. Needed for - docker-py >= 0.5.0. - default: false - version_added: "1.9" - cpu_set: - description: - - CPUs in which to allow execution. Requires docker-py >= 0.6.0. - required: false - default: null - version_added: "2.0" - cap_add: - description: - - Add capabilities for the container. Requires docker-py >= 0.5.0. - required: false - default: false - version_added: "2.0" - cap_drop: - description: - - Drop capabilities for the container. Requires docker-py >= 0.5.0. - required: false - default: false - aliases: [] - version_added: "2.0" - stop_timeout: - description: - - How many seconds to wait for the container to stop before killing it. - required: false - default: 10 - version_added: "2.0" -author: - - "Cove Schneider (@cove)" - - "Joshua Conner (@joshuaconner)" - - "Pavel Antonov (@softzilla)" - - "Ash Wilson (@smashwilson)" - - "Thomas Steinbach (@ThomasSteinbach)" - - "Philippe Jandot (@zfil)" -requirements: - - "python >= 2.6" - - "docker-py >= 0.3.0" - - "The docker server >= 0.10.0" -''' - -EXAMPLES = ''' -# Containers are matched either by name (if provided) or by an exact match of -# the image they were launched with and the command they're running. The module -# can accept either a name to target a container uniquely, or a count to operate -# on multiple containers at once when it makes sense to do so. - -# Ensure that a data container with the name "mydata" exists. If no container -# by this name exists, it will be created, but not started. - -- name: data container - docker: - name: mydata - image: busybox - state: present - volumes: - - /data - -# Ensure that a Redis server is running, using the volume from the data -# container. Expose the default Redis port. - -- name: redis container - docker: - name: myredis - image: redis - command: redis-server --appendonly yes - state: started - expose: - - 6379 - volumes_from: - - mydata - -# Ensure that a container of your application server is running. This will: -# - pull the latest version of your application image from DockerHub. -# - ensure that a container is running with the specified name and exact image. -# If any configuration options have changed, the existing container will be -# stopped and removed, and a new one will be launched in its place. -# - link this container to the existing redis container launched above with -# an alias. -# - bind TCP port 9000 within the container to port 8080 on all interfaces -# on the host. -# - bind UDP port 9001 within the container to port 8081 on the host, only -# listening on localhost. -# - set the environment variable SECRET_KEY to "ssssh". - -- name: application container - docker: - name: myapplication - image: someuser/appimage - state: reloaded - pull: always - links: - - "myredis:aliasedredis" - ports: - - "8080:9000" - - "127.0.0.1:8081:9001/udp" - env: - SECRET_KEY: ssssh - -# Ensure that exactly five containers of another server are running with this -# exact image and command. If fewer than five are running, more will be launched; -# if more are running, the excess will be stopped. - -- name: load-balanced containers - docker: - state: reloaded - count: 5 - image: someuser/anotherappimage - command: sleep 1d - -# Unconditionally restart a service container. This may be useful within a -# handler, for example. - -- name: application service - docker: - name: myservice - image: someuser/serviceimage - state: restarted - -# Stop all containers running the specified image. - -- name: obsolete container - docker: - image: someuser/oldandbusted - state: stopped - -# Stop and remove a container with the specified name. - -- name: obsolete container - docker: - name: ohno - image: someuser/oldandbusted - state: absent - -# Example Syslogging Output - -- name: myservice container - docker: - name: myservice - image: someservice/someimage - state: reloaded - log_driver: syslog - log_opt: - syslog-address: tcp://my-syslog-server:514 - syslog-facility: daemon - syslog-tag: myservice -''' - -HAS_DOCKER_PY = True -DEFAULT_DOCKER_API_VERSION = None - -import sys -import json -import os -import shlex -from urlparse import urlparse -try: - import docker.client - import docker.utils - import docker.errors - from requests.exceptions import RequestException -except ImportError: - HAS_DOCKER_PY = False - -if HAS_DOCKER_PY: - try: - from docker.errors import APIError as DockerAPIError - except ImportError: - from docker.client import APIError as DockerAPIError - try: - # docker-py 1.2+ - import docker.constants - DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION - except (ImportError, AttributeError): - # docker-py less than 1.2 - DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION - - -def _human_to_bytes(number): - suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] - - if isinstance(number, int): - return number - if number[-1] == suffixes[0] and number[-2].isdigit(): - return number[:-1] - - i = 1 - for each in suffixes[1:]: - if number[-len(each):] == suffixes[i]: - return int(number[:-len(each)]) * (1024 ** i) - i = i + 1 - - raise ValueError('Could not convert %s to integer' % (number,)) - - -def _ansible_facts(container_list): - return {"docker_containers": container_list} - - -def _docker_id_quirk(inspect): - # XXX: some quirk in docker - if 'ID' in inspect: - inspect['Id'] = inspect['ID'] - del inspect['ID'] - return inspect - - -def get_split_image_tag(image): - # If image contains a host or org name, omit that from our check - if '/' in image: - registry, resource = image.rsplit('/', 1) - else: - registry, resource = None, image - - # now we can determine if image has a tag or a digest - tag = "latest" - basename = resource - for s in ['@',':']: - if s in resource: - basename, tag = resource.split(s, 1) - break - - if registry: - fullname = '/'.join((registry, basename)) - else: - fullname = basename - - return fullname, tag - -def normalize_image(image): - """ - Normalize a Docker image name to include the implied :latest tag. - """ - - return ":".join(get_split_image_tag(image)) - - -def is_running(container): - '''Return True if an inspected container is in a state we consider "running."''' - - return container['State']['Running'] == True and not container['State'].get('Ghost', False) - - -def get_docker_py_versioninfo(): - if hasattr(docker, '__version__'): - # a '__version__' attribute was added to the module but not until - # after 0.3.0 was pushed to pypi. If it's there, use it. - version = [] - for part in docker.__version__.split('.'): - try: - version.append(int(part)) - except ValueError: - for idx, char in enumerate(part): - if not char.isdigit(): - nondigit = part[idx:] - digit = part[:idx] - break - if digit: - version.append(int(digit)) - if nondigit: - version.append(nondigit) - elif hasattr(docker.Client, '_get_raw_response_socket'): - # HACK: if '__version__' isn't there, we check for the existence of - # `_get_raw_response_socket` in the docker.Client class, which was - # added in 0.3.0 - version = (0, 3, 0) - else: - # This is untrue but this module does not function with a version less - # than 0.3.0 so it's okay to lie here. - version = (0,) - - return tuple(version) - - -def check_dependencies(module): - """ - Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a - helpful error message if it isn't. - """ - if not HAS_DOCKER_PY: - module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") - else: - versioninfo = get_docker_py_versioninfo() - if versioninfo < (0, 3, 0): - module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") - - -class DockerManager(object): - - counters = dict( - created=0, started=0, stopped=0, killed=0, removed=0, restarted=0, pulled=0 - ) - reload_reasons = [] - _capabilities = set() - - # Map optional parameters to minimum (docker-py version, server APIVersion) - # docker-py version is a tuple of ints because we have to compare them - # server APIVersion is passed to a docker-py function that takes strings - _cap_ver_req = { - 'dns': ((0, 3, 0), '1.10'), - 'volumes_from': ((0, 3, 0), '1.10'), - 'restart_policy': ((0, 5, 0), '1.14'), - 'extra_hosts': ((0, 7, 0), '1.3.1'), - 'pid': ((1, 0, 0), '1.17'), - 'log_driver': ((1, 2, 0), '1.18'), - 'log_opt': ((1, 2, 0), '1.18'), - 'host_config': ((0, 7, 0), '1.15'), - 'cpu_set': ((0, 6, 0), '1.14'), - 'cap_add': ((0, 5, 0), '1.14'), - 'cap_drop': ((0, 5, 0), '1.14'), - 'read_only': ((1, 0, 0), '1.17'), - 'stop_timeout': ((0, 5, 0), '1.0'), - # Clientside only - 'insecure_registry': ((0, 5, 0), '0.0') - } - - def __init__(self, module): - self.module = module - - self.binds = None - self.volumes = None - if self.module.params.get('volumes'): - self.binds = {} - self.volumes = [] - vols = self.module.params.get('volumes') - for vol in vols: - parts = vol.split(":") - # regular volume - if len(parts) == 1: - self.volumes.append(parts[0]) - # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) - elif 2 <= len(parts) <= 3: - # default to read-write - ro = False - # with supplied bind mode - if len(parts) == 3: - if parts[2] not in ['ro', 'rw']: - self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') - else: - ro = parts[2] == 'ro' - self.binds[parts[0]] = {'bind': parts[1], 'ro': ro } - else: - self.module.fail_json(msg='volumes support 1 to 3 arguments') - - self.lxc_conf = None - if self.module.params.get('lxc_conf'): - self.lxc_conf = [] - options = self.module.params.get('lxc_conf') - for option in options: - parts = option.split(':', 1) - self.lxc_conf.append({"Key": parts[0], "Value": parts[1]}) - - self.exposed_ports = None - if self.module.params.get('expose'): - self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose')) - - self.port_bindings = None - if self.module.params.get('ports'): - self.port_bindings = self.get_port_bindings(self.module.params.get('ports')) - - self.links = None - if self.module.params.get('links'): - self.links = self.get_links(self.module.params.get('links')) - - self.env = self.module.params.get('env', None) - - # Connect to the docker server using any configured host and TLS settings. - - env_host = os.getenv('DOCKER_HOST') - env_docker_verify = os.getenv('DOCKER_TLS_VERIFY') - env_cert_path = os.getenv('DOCKER_CERT_PATH') - env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME') - - docker_url = module.params.get('docker_url') - if not docker_url: - if env_host: - docker_url = env_host - else: - docker_url = 'unix://var/run/docker.sock' - - docker_api_version = module.params.get('docker_api_version') - - tls_client_cert = module.params.get('tls_client_cert', None) - if not tls_client_cert and env_cert_path: - tls_client_cert = os.path.join(env_cert_path, 'cert.pem') - - tls_client_key = module.params.get('tls_client_key', None) - if not tls_client_key and env_cert_path: - tls_client_key = os.path.join(env_cert_path, 'key.pem') - - tls_ca_cert = module.params.get('tls_ca_cert') - if not tls_ca_cert and env_cert_path: - tls_ca_cert = os.path.join(env_cert_path, 'ca.pem') - - tls_hostname = module.params.get('tls_hostname') - if tls_hostname is None: - if env_docker_hostname: - tls_hostname = env_docker_hostname - else: - parsed_url = urlparse(docker_url) - if ':' in parsed_url.netloc: - tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')] - else: - tls_hostname = parsed_url - if not tls_hostname: - tls_hostname = True - - # use_tls can be one of four values: - # no: Do not use tls - # encrypt: Use tls. We may do client auth. We will not verify the server - # verify: Use tls. We may do client auth. We will verify the server - # None: Only use tls if the parameters for client auth were specified - # or tls_ca_cert (which requests verifying the server with - # a specific ca certificate) - use_tls = module.params.get('use_tls') - if use_tls is None and env_docker_verify is not None: - use_tls = 'verify' - - tls_config = None - if use_tls != 'no': - params = {} - - # Setup client auth - if tls_client_cert and tls_client_key: - params['client_cert'] = (tls_client_cert, tls_client_key) - - # We're allowed to verify the connection to the server - if use_tls == 'verify' or (use_tls is None and tls_ca_cert): - if tls_ca_cert: - params['ca_cert'] = tls_ca_cert - params['verify'] = True - params['assert_hostname'] = tls_hostname - else: - params['verify'] = True - params['assert_hostname'] = tls_hostname - elif use_tls == 'encrypt': - params['verify'] = False - - if params: - # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296 - docker_url = docker_url.replace('tcp://', 'https://') - tls_config = docker.tls.TLSConfig(**params) - - self.client = docker.Client(base_url=docker_url, - version=docker_api_version, - tls=tls_config) - - self.docker_py_versioninfo = get_docker_py_versioninfo() - - def _check_capabilities(self): - """ - Create a list of available capabilities - """ - api_version = self.client.version()['ApiVersion'] - for cap, req_vers in self._cap_ver_req.items(): - if (self.docker_py_versioninfo >= req_vers[0] and - docker.utils.compare_version(req_vers[1], api_version) >= 0): - self._capabilities.add(cap) - - def ensure_capability(self, capability, fail=True): - """ - Some of the functionality this ansible module implements are only - available in newer versions of docker. Ensure that the capability - is available here. - - If fail is set to False then return True or False depending on whether - we have the capability. Otherwise, simply fail and exit the module if - we lack the capability. - """ - if not self._capabilities: - self._check_capabilities() - - if capability in self._capabilities: - return True - - if not fail: - return False - - api_version = self.client.version()['ApiVersion'] - self.module.fail_json(msg='Specifying the `%s` parameter requires' - ' docker-py: %s, docker server apiversion %s; found' - ' docker-py: %s, server: %s' % ( - capability, - '.'.join(map(str, self._cap_ver_req[capability][0])), - self._cap_ver_req[capability][1], - '.'.join(map(str, self.docker_py_versioninfo)), - api_version)) - - def get_links(self, links): - """ - Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link - """ - processed_links = {} - - for link in links: - parsed_link = link.split(':', 1) - if(len(parsed_link) == 2): - processed_links[parsed_link[0]] = parsed_link[1] - else: - processed_links[parsed_link[0]] = parsed_link[0] - - return processed_links - - def get_exposed_ports(self, expose_list): - """ - Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax. - """ - if expose_list: - exposed = [] - for port in expose_list: - port = str(port).strip() - if port.endswith('/tcp') or port.endswith('/udp'): - port_with_proto = tuple(port.split('/')) - else: - # assume tcp protocol if not specified - port_with_proto = (port, 'tcp') - exposed.append(port_with_proto) - return exposed - else: - return None - - def get_start_params(self): - """ - Create start params - """ - params = { - 'lxc_conf': self.lxc_conf, - 'binds': self.binds, - 'port_bindings': self.port_bindings, - 'publish_all_ports': self.module.params.get('publish_all_ports'), - 'privileged': self.module.params.get('privileged'), - 'links': self.links, - 'network_mode': self.module.params.get('net'), - } - - optionals = {} - for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', - 'cap_add', 'cap_drop', 'read_only', 'log_opt'): - optionals[optional_param] = self.module.params.get(optional_param) - - if optionals['dns'] is not None: - self.ensure_capability('dns') - params['dns'] = optionals['dns'] - - if optionals['volumes_from'] is not None: - self.ensure_capability('volumes_from') - params['volumes_from'] = optionals['volumes_from'] - - if optionals['restart_policy'] is not None: - self.ensure_capability('restart_policy') - params['restart_policy'] = { 'Name': optionals['restart_policy'] } - if params['restart_policy']['Name'] == 'on-failure': - params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] - - # docker_py only accepts 'host' or None - if 'pid' in optionals and not optionals['pid']: - optionals['pid'] = None - - if optionals['pid'] is not None: - self.ensure_capability('pid') - params['pid_mode'] = optionals['pid'] - - if optionals['extra_hosts'] is not None: - self.ensure_capability('extra_hosts') - params['extra_hosts'] = optionals['extra_hosts'] - - if optionals['log_driver'] is not None: - self.ensure_capability('log_driver') - log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) - if optionals['log_opt'] is not None: - for k, v in optionals['log_opt'].iteritems(): - log_config.set_config_value(k, v) - log_config.type = optionals['log_driver'] - params['log_config'] = log_config - - if optionals['cap_add'] is not None: - self.ensure_capability('cap_add') - params['cap_add'] = optionals['cap_add'] - - if optionals['cap_drop'] is not None: - self.ensure_capability('cap_drop') - params['cap_drop'] = optionals['cap_drop'] - - if optionals['read_only'] is not None: - self.ensure_capability('read_only') - params['read_only'] = optionals['read_only'] - - return params - - def create_host_config(self): - """ - Create HostConfig object - """ - params = self.get_start_params() - return docker.utils.create_host_config(**params) - - def get_port_bindings(self, ports): - """ - Parse the `ports` string into a port bindings dict for the `start_container` call. - """ - binds = {} - for port in ports: - # ports could potentially be an array like [80, 443], so we make sure they're strings - # before splitting - parts = str(port).split(':') - container_port = parts[-1] - if '/' not in container_port: - container_port = int(parts[-1]) - - p_len = len(parts) - if p_len == 1: - # Bind `container_port` of the container to a dynamically - # allocated TCP port on all available interfaces of the host - # machine. - bind = ('0.0.0.0',) - elif p_len == 2: - # Bind `container_port` of the container to port `parts[0]` on - # all available interfaces of the host machine. - bind = ('0.0.0.0', int(parts[0])) - elif p_len == 3: - # Bind `container_port` of the container to port `parts[1]` on - # IP `parts[0]` of the host machine. If `parts[1]` empty bind - # to a dynamically allocated port of IP `parts[0]`. - bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],) - - if container_port in binds: - old_bind = binds[container_port] - if isinstance(old_bind, list): - # append to list if it already exists - old_bind.append(bind) - else: - # otherwise create list that contains the old and new binds - binds[container_port] = [binds[container_port], bind] - else: - binds[container_port] = bind - - return binds - - def get_summary_message(self): - ''' - Generate a message that briefly describes the actions taken by this - task, in English. - ''' - - parts = [] - for k, v in self.counters.iteritems(): - if v == 0: - continue - - if v == 1: - plural = "" - else: - plural = "s" - parts.append("%s %d container%s" % (k, v, plural)) - - if parts: - return ", ".join(parts) + "." - else: - return "No action taken." - - def get_reload_reason_message(self): - ''' - Generate a message describing why any reloaded containers were reloaded. - ''' - - if self.reload_reasons: - return ", ".join(self.reload_reasons) - else: - return None - - def get_summary_counters_msg(self): - msg = "" - for k, v in self.counters.iteritems(): - msg = msg + "%s %d " % (k, v) - - return msg - - def increment_counter(self, name): - self.counters[name] = self.counters[name] + 1 - - def has_changed(self): - for k, v in self.counters.iteritems(): - if v > 0: - return True - - return False - - def get_inspect_image(self): - try: - return self.client.inspect_image(self.module.params.get('image')) - except DockerAPIError as e: - if e.response.status_code == 404: - return None - else: - raise e - - def get_image_repo_tags(self): - image, tag = get_split_image_tag(self.module.params.get('image')) - if tag is None: - tag = 'latest' - resource = '%s:%s' % (image, tag) - - for image in self.client.images(name=image): - # If image is pulled by digest, RepoTags may be None - repo_tags = image.get('RepoTags', None) - if repo_tags is not None and resource in repo_tags: - return repo_tags - repo_digests = image.get('RepoDigests', None) - if repo_digests is not None and resource in repo_digests: - return repo_digests - return [] - - def get_inspect_containers(self, containers): - inspect = [] - for i in containers: - details = self.client.inspect_container(i['Id']) - details = _docker_id_quirk(details) - inspect.append(details) - - return inspect - - def get_differing_containers(self): - """ - Inspect all matching, running containers, and return those that were - started with parameters that differ from the ones that are provided - during this module run. A list containing the differing - containers will be returned, and a short string describing the specific - difference encountered in each container will be appended to - reload_reasons. - - This generates the set of containers that need to be stopped and - started with new parameters with state=reloaded. - """ - - running = self.get_running_containers() - current = self.get_inspect_containers(running) - - #Get API version - api_version = self.client.version()['ApiVersion'] - - image = self.get_inspect_image() - if image is None: - # The image isn't present. Assume that we're about to pull a new - # tag and *everything* will be restarted. - # - # This will give false positives if you untag an image on the host - # and there's nothing more to pull. - return current - - differing = [] - - for container in current: - - # IMAGE - # Compare the image by ID rather than name, so that containers - # will be restarted when new versions of an existing image are - # pulled. - if container['Image'] != image['Id']: - self.reload_reasons.append('image ({0} => {1})'.format(container['Image'], image['Id'])) - differing.append(container) - continue - - # COMMAND - - expected_command = self.module.params.get('command') - if expected_command: - expected_command = shlex.split(expected_command) - actual_command = container["Config"]["Cmd"] - - if actual_command != expected_command: - self.reload_reasons.append('command ({0} => {1})'.format(actual_command, expected_command)) - differing.append(container) - continue - - # EXPOSED PORTS - expected_exposed_ports = set((image['ContainerConfig'].get('ExposedPorts') or {}).keys()) - for p in (self.exposed_ports or []): - expected_exposed_ports.add("/".join(p)) - - actually_exposed_ports = set((container["Config"].get("ExposedPorts") or {}).keys()) - - if actually_exposed_ports != expected_exposed_ports: - self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports)) - differing.append(container) - continue - - # VOLUMES - - expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys()) - if self.volumes: - expected_volume_keys.update(self.volumes) - - actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) - - if actual_volume_keys != expected_volume_keys: - self.reload_reasons.append('volumes ({0} => {1})'.format(actual_volume_keys, expected_volume_keys)) - differing.append(container) - continue - - # MEM_LIMIT - - try: - expected_mem = _human_to_bytes(self.module.params.get('memory_limit')) - except ValueError as e: - self.module.fail_json(msg=str(e)) - - #For v1.19 API and above use HostConfig, otherwise use Config - if docker.utils.compare_version('1.19', api_version) >= 0: - actual_mem = container['HostConfig']['Memory'] - else: - actual_mem = container['Config']['Memory'] - - if expected_mem and actual_mem != expected_mem: - self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem)) - differing.append(container) - continue - - # ENVIRONMENT - # actual_env is likely to include environment variables injected by - # the Dockerfile. - - expected_env = {} - - for image_env in image['ContainerConfig']['Env'] or []: - name, value = image_env.split('=', 1) - expected_env[name] = value - - if self.env: - for name, value in self.env.iteritems(): - expected_env[name] = str(value) - - actual_env = {} - for container_env in container['Config']['Env'] or []: - name, value = container_env.split('=', 1) - actual_env[name] = value - - if actual_env != expected_env: - # Don't include the environment difference in the output. - self.reload_reasons.append('environment {0} => {1}'.format(actual_env, expected_env)) - differing.append(container) - continue - - # HOSTNAME - - expected_hostname = self.module.params.get('hostname') - actual_hostname = container['Config']['Hostname'] - if expected_hostname and actual_hostname != expected_hostname: - self.reload_reasons.append('hostname ({0} => {1})'.format(actual_hostname, expected_hostname)) - differing.append(container) - continue - - # DOMAINNAME - - expected_domainname = self.module.params.get('domainname') - actual_domainname = container['Config']['Domainname'] - if expected_domainname and actual_domainname != expected_domainname: - self.reload_reasons.append('domainname ({0} => {1})'.format(actual_domainname, expected_domainname)) - differing.append(container) - continue - - # DETACH - - # We don't have to check for undetached containers. If it wasn't - # detached, it would have stopped before the playbook continued! - - # NAME - - # We also don't have to check name, because this is one of the - # criteria that's used to determine which container(s) match in - # the first place. - - # STDIN_OPEN - - expected_stdin_open = self.module.params.get('stdin_open') - actual_stdin_open = container['Config']['OpenStdin'] - if actual_stdin_open != expected_stdin_open: - self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open)) - differing.append(container) - continue - - # TTY - - expected_tty = self.module.params.get('tty') - actual_tty = container['Config']['Tty'] - if actual_tty != expected_tty: - self.reload_reasons.append('tty ({0} => {1})'.format(actual_tty, expected_tty)) - differing.append(container) - continue - - # -- "start" call differences -- - - # LXC_CONF - - if self.lxc_conf: - expected_lxc = set(self.lxc_conf) - actual_lxc = set(container['HostConfig']['LxcConf'] or []) - if actual_lxc != expected_lxc: - self.reload_reasons.append('lxc_conf ({0} => {1})'.format(actual_lxc, expected_lxc)) - differing.append(container) - continue - - # BINDS - - expected_binds = set() - if self.binds: - for host_path, config in self.binds.iteritems(): - if isinstance(config, dict): - container_path = config['bind'] - if config['ro']: - mode = 'ro' - else: - mode = 'rw' - else: - container_path = config - mode = 'rw' - expected_binds.add("{0}:{1}:{2}".format(host_path, container_path, mode)) - - actual_binds = set() - for bind in (container['HostConfig']['Binds'] or []): - if len(bind.split(':')) == 2: - actual_binds.add(bind + ":rw") - else: - actual_binds.add(bind) - - if actual_binds != expected_binds: - self.reload_reasons.append('binds ({0} => {1})'.format(actual_binds, expected_binds)) - differing.append(container) - continue - - # PORT BINDINGS - - expected_bound_ports = {} - if self.port_bindings: - for container_port, config in self.port_bindings.iteritems(): - if isinstance(container_port, int): - container_port = "{0}/tcp".format(container_port) - if len(config) == 1: - expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] - elif isinstance(config[0], tuple): - expected_bound_ports[container_port] = [] - for hostip, hostport in config: - expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)}) - else: - expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] - - actual_bound_ports = container['HostConfig']['PortBindings'] or {} - - if actual_bound_ports != expected_bound_ports: - self.reload_reasons.append('port bindings ({0} => {1})'.format(actual_bound_ports, expected_bound_ports)) - differing.append(container) - continue - - # PUBLISHING ALL PORTS - - # What we really care about is the set of ports that is actually - # published. That should be caught above. - - # PRIVILEGED - - expected_privileged = self.module.params.get('privileged') - actual_privileged = container['HostConfig']['Privileged'] - if actual_privileged != expected_privileged: - self.reload_reasons.append('privileged ({0} => {1})'.format(actual_privileged, expected_privileged)) - differing.append(container) - continue - - # LINKS - - expected_links = set() - for link, alias in (self.links or {}).iteritems(): - expected_links.add("/{0}:{1}/{2}".format(link, container["Name"], alias)) - - actual_links = set(container['HostConfig']['Links'] or []) - if actual_links != expected_links: - self.reload_reasons.append('links ({0} => {1})'.format(actual_links, expected_links)) - differing.append(container) - continue - - # NETWORK MODE - - expected_netmode = self.module.params.get('net') or 'bridge' - actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge' - if actual_netmode != expected_netmode: - self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) - differing.append(container) - continue - - # DNS - - expected_dns = set(self.module.params.get('dns') or []) - actual_dns = set(container['HostConfig']['Dns'] or []) - if actual_dns != expected_dns: - self.reload_reasons.append('dns ({0} => {1})'.format(actual_dns, expected_dns)) - differing.append(container) - continue - - # VOLUMES_FROM - - expected_volumes_from = set(self.module.params.get('volumes_from') or []) - actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or []) - if actual_volumes_from != expected_volumes_from: - self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) - differing.append(container) - - # LOG_DRIVER - - if self.ensure_capability('log_driver', False): - expected_log_driver = self.module.params.get('log_driver') or 'json-file' - actual_log_driver = container['HostConfig']['LogConfig']['Type'] - if actual_log_driver != expected_log_driver: - self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) - differing.append(container) - continue - - if self.ensure_capability('log_opt', False): - expected_logging_opts = self.module.params.get('log_opt') or {} - actual_log_opts = container['HostConfig']['LogConfig']['Config'] - if len(set(expected_logging_opts.items()) - set(actual_log_opts.items())) != 0: - log_opt_reasons = { - 'added': dict(set(expected_logging_opts.items()) - set(actual_log_opts.items())), - 'removed': dict(set(actual_log_opts.items()) - set(expected_logging_opts.items())) - } - self.reload_reasons.append('log_opt ({0})'.format(log_opt_reasons)) - differing.append(container) - - return differing - - def get_deployed_containers(self): - """ - Return any matching containers that are already present. - """ - - command = self.module.params.get('command') - if command is not None: - command = shlex.split(command) - name = self.module.params.get('name') - if name and not name.startswith('/'): - name = '/' + name - deployed = [] - - # "images" will be a collection of equivalent "name:tag" image names - # that map to the same Docker image. - inspected = self.get_inspect_image() - if inspected: - repo_tags = self.get_image_repo_tags() - else: - repo_tags = [normalize_image(self.module.params.get('image'))] - - for container in self.client.containers(all=True): - details = None - - if name: - name_list = container.get('Names') - if name_list is None: - name_list = [] - matches = name in name_list - else: - details = self.client.inspect_container(container['Id']) - details = _docker_id_quirk(details) - - running_image = normalize_image(details['Config']['Image']) - - image_matches = running_image in repo_tags - - command_matches = command == details['Config']['Cmd'] - - matches = image_matches and command_matches - - if matches: - if not details: - details = self.client.inspect_container(container['Id']) - details = _docker_id_quirk(details) - - deployed.append(details) - - return deployed - - def get_running_containers(self): - return [c for c in self.get_deployed_containers() if is_running(c)] - - def pull_image(self): - extra_params = {} - if self.module.params.get('insecure_registry'): - if self.ensure_capability('insecure_registry', fail=False): - extra_params['insecure_registry'] = self.module.params.get('insecure_registry') - - resource = self.module.params.get('image') - image, tag = get_split_image_tag(resource) - if self.module.params.get('username'): - try: - self.client.login( - self.module.params.get('username'), - password=self.module.params.get('password'), - email=self.module.params.get('email'), - registry=self.module.params.get('registry') - ) - except Exception as e: - self.module.fail_json(msg="failed to login to the remote registry, check your username/password.", error=repr(e)) - try: - changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params)) - try: - last = changes[-1] - # seems Docker 1.8 puts an empty dict at the end of the - # stream; catch that and get the previous instead - # https://github.com/ansible/ansible-modules-core/issues/2043 - if last.strip() == '{}': - last = changes[-2] - except IndexError: - last = '{}' - status = json.loads(last).get('status', '') - if status.startswith('Status: Image is up to date for'): - # Image is already up to date. Don't increment the counter. - pass - elif (status.startswith('Status: Downloaded newer image for') or - status.startswith('Download complete')): - # Image was updated. Increment the pull counter. - self.increment_counter('pulled') - else: - # Unrecognized status string. - self.module.fail_json(msg="Unrecognized status from pull.", status=status, changes=changes) - except Exception as e: - self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) - - def create_containers(self, count=1): - try: - mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) - except ValueError as e: - self.module.fail_json(msg=str(e)) - api_version = self.client.version()['ApiVersion'] - - params = {'image': self.module.params.get('image'), - 'command': self.module.params.get('command'), - 'ports': self.exposed_ports, - 'volumes': self.volumes, - 'environment': self.env, - 'hostname': self.module.params.get('hostname'), - 'domainname': self.module.params.get('domainname'), - 'detach': self.module.params.get('detach'), - 'name': self.module.params.get('name'), - 'stdin_open': self.module.params.get('stdin_open'), - 'tty': self.module.params.get('tty'), - 'cpuset': self.module.params.get('cpu_set'), - 'user': self.module.params.get('docker_user'), - } - if self.ensure_capability('host_config', fail=False): - params['host_config'] = self.create_host_config() - - #For v1.19 API and above use HostConfig, otherwise use Config - if docker.utils.compare_version('1.19', api_version) < 0: - params['mem_limit'] = mem_limit - else: - params['host_config']['Memory'] = mem_limit - - - def do_create(count, params): - results = [] - for _ in range(count): - result = self.client.create_container(**params) - self.increment_counter('created') - results.append(result) - - return results - - try: - containers = do_create(count, params) - except docker.errors.APIError as e: - if e.response.status_code != 404: - raise - - self.pull_image() - containers = do_create(count, params) - - return containers - - def start_containers(self, containers): - params = {} - - if not self.ensure_capability('host_config', fail=False): - params = self.get_start_params() - - for i in containers: - self.client.start(i) - self.increment_counter('started') - - if not self.module.params.get('detach'): - status = self.client.wait(i['Id']) - if status != 0: - output = self.client.logs(i['Id'], stdout=True, stderr=True, - stream=False, timestamps=False) - self.module.fail_json(status=status, msg=output) - - def stop_containers(self, containers): - for i in containers: - self.client.stop(i['Id'], self.module.params.get('stop_timeout')) - self.increment_counter('stopped') - - return [self.client.wait(i['Id']) for i in containers] - - def remove_containers(self, containers): - for i in containers: - self.client.remove_container(i['Id']) - self.increment_counter('removed') - - def kill_containers(self, containers): - for i in containers: - self.client.kill(i['Id'], self.module.params.get('signal')) - self.increment_counter('killed') - - def restart_containers(self, containers): - for i in containers: - self.client.restart(i['Id']) - self.increment_counter('restarted') - - -class ContainerSet: - - def __init__(self, manager): - self.manager = manager - self.running = [] - self.deployed = [] - self.changed = [] - - def refresh(self): - ''' - Update our view of the matching containers from the Docker daemon. - ''' - - - self.deployed = self.manager.get_deployed_containers() - self.running = [c for c in self.deployed if is_running(c)] - - def notice_changed(self, containers): - ''' - Record a collection of containers as "changed". - ''' - - self.changed.extend(containers) - - -def present(manager, containers, count, name): - '''Ensure that exactly `count` matching containers exist in any state.''' - - containers.refresh() - delta = count - len(containers.deployed) - - if delta > 0: - created = manager.create_containers(delta) - containers.notice_changed(manager.get_inspect_containers(created)) - - if delta < 0: - # If both running and stopped containers exist, remove - # stopped containers first. - containers.deployed.sort(lambda cx, cy: cmp(is_running(cx), is_running(cy))) - - to_stop = [] - to_remove = [] - for c in containers.deployed[0:-delta]: - if is_running(c): - to_stop.append(c) - to_remove.append(c) - - manager.stop_containers(to_stop) - containers.notice_changed(manager.get_inspect_containers(to_remove)) - manager.remove_containers(to_remove) - -def started(manager, containers, count, name): - '''Ensure that exactly `count` matching containers exist and are running.''' - - containers.refresh() - delta = count - len(containers.running) - - if delta > 0: - if name and containers.deployed: - # A stopped container exists with the requested name. - # Clean it up before attempting to start a new one. - manager.remove_containers(containers.deployed) - - created = manager.create_containers(delta) - manager.start_containers(created) - containers.notice_changed(manager.get_inspect_containers(created)) - - if delta < 0: - excess = containers.running[0:-delta] - containers.notice_changed(manager.get_inspect_containers(excess)) - manager.stop_containers(excess) - manager.remove_containers(excess) - -def reloaded(manager, containers, count, name): - ''' - Ensure that exactly `count` matching containers exist and are - running. If any associated settings have been changed (volumes, - ports or so on), restart those containers. - ''' - - containers.refresh() - - for container in manager.get_differing_containers(): - manager.stop_containers([container]) - manager.remove_containers([container]) - - started(manager, containers, count, name) - -def restarted(manager, containers, count, name): - ''' - Ensure that exactly `count` matching containers exist and are - running. Unconditionally restart any that were already running. - ''' - - containers.refresh() - - for container in manager.get_differing_containers(): - manager.stop_containers([container]) - manager.remove_containers([container]) - - manager.restart_containers(containers.running) - started(manager, containers, count, name) - -def stopped(manager, containers, count, name): - '''Stop any matching containers that are running.''' - - containers.refresh() - - manager.stop_containers(containers.running) - containers.notice_changed(manager.get_inspect_containers(containers.running)) - -def killed(manager, containers, count, name): - '''Kill any matching containers that are running.''' - - containers.refresh() - - manager.kill_containers(containers.running) - containers.notice_changed(manager.get_inspect_containers(containers.running)) - -def absent(manager, containers, count, name): - '''Stop and remove any matching containers.''' - - containers.refresh() - - manager.stop_containers(containers.running) - containers.notice_changed(manager.get_inspect_containers(containers.deployed)) - manager.remove_containers(containers.deployed) - -def main(): - module = AnsibleModule( - argument_spec = dict( - count = dict(default=1), - image = dict(required=True), - pull = dict(required=False, default='missing', choices=['missing', 'always']), - command = dict(required=False, default=None), - expose = dict(required=False, default=None, type='list'), - ports = dict(required=False, default=None, type='list'), - publish_all_ports = dict(default=False, type='bool'), - volumes = dict(default=None, type='list'), - volumes_from = dict(default=None), - links = dict(default=None, type='list'), - memory_limit = dict(default=0), - memory_swap = dict(default=0), - docker_url = dict(), - use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']), - tls_client_cert = dict(required=False, default=None, type='str'), - tls_client_key = dict(required=False, default=None, type='str'), - tls_ca_cert = dict(required=False, default=None, type='str'), - tls_hostname = dict(required=False, type='str', default=None), - docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'), - docker_user = dict(default=None), - username = dict(default=None), - password = dict(), - email = dict(), - registry = dict(), - hostname = dict(default=None), - domainname = dict(default=None), - env = dict(type='dict'), - dns = dict(), - detach = dict(default=True, type='bool'), - state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']), - signal = dict(default=None), - restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), - restart_policy_retry = dict(default=0, type='int'), - extra_hosts = dict(type='dict'), - debug = dict(default=False, type='bool'), - privileged = dict(default=False, type='bool'), - stdin_open = dict(default=False, type='bool'), - tty = dict(default=False, type='bool'), - lxc_conf = dict(default=None, type='list'), - name = dict(default=None), - net = dict(default=None), - pid = dict(default=None), - insecure_registry = dict(default=False, type='bool'), - log_driver = dict(default=None, choices=['json-file', 'none', 'syslog', 'journald', 'gelf', 'fluentd']), - log_opt = dict(default=None, type='dict'), - cpu_set = dict(default=None), - cap_add = dict(default=None, type='list'), - cap_drop = dict(default=None, type='list'), - read_only = dict(default=None, type='bool'), - stop_timeout = dict(default=10, type='int'), - ), - required_together = ( - ['tls_client_cert', 'tls_client_key'], - ), - ) - - check_dependencies(module) - - try: - manager = DockerManager(module) - count = int(module.params.get('count')) - name = module.params.get('name') - pull = module.params.get('pull') - - state = module.params.get('state') - if state == 'running': - # Renamed running to started in 1.9 - state = 'started' - - if count < 0: - module.fail_json(msg="Count must be greater than zero") - - if count > 1 and name: - module.fail_json(msg="Count and name must not be used together") - - # Explicitly pull new container images, if requested. Do this before - # noticing running and deployed containers so that the image names - # will differ if a newer image has been pulled. - # Missing images should be pulled first to avoid downtime when old - # container is stopped, but image for new one is now downloaded yet. - # It also prevents removal of running container before realizing - # that requested image cannot be retrieved. - if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None): - manager.pull_image() - - containers = ContainerSet(manager) - - if state == 'present': - present(manager, containers, count, name) - elif state == 'started': - started(manager, containers, count, name) - elif state == 'reloaded': - reloaded(manager, containers, count, name) - elif state == 'restarted': - restarted(manager, containers, count, name) - elif state == 'stopped': - stopped(manager, containers, count, name) - elif state == 'killed': - killed(manager, containers, count, name) - elif state == 'absent': - absent(manager, containers, count, name) - else: - module.fail_json(msg='Unrecognized state %s. Must be one of: ' - 'present; started; reloaded; restarted; ' - 'stopped; killed; absent.' % state) - - module.exit_json(changed=manager.has_changed(), - msg=manager.get_summary_message(), - summary=manager.counters, - reload_reasons=manager.get_reload_reason_message(), - ansible_facts=_ansible_facts(containers.changed)) - - except DockerAPIError as e: - module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) - - except RequestException as e: - module.fail_json(changed=manager.has_changed(), msg=repr(e)) - -# import module snippets -from ansible.module_utils.basic import * - -if __name__ == '__main__': - main() diff --git a/ansible/roles/vm_set/library/vm_topology.py b/ansible/roles/vm_set/library/vm_topology.py index d8e8af640cc..d7d2d001081 100644 --- a/ansible/roles/vm_set/library/vm_topology.py +++ b/ansible/roles/vm_set/library/vm_topology.py @@ -5,7 +5,7 @@ import os import os.path import re -from docker import Client +import docker from ansible.module_utils.basic import * import traceback from pprint import pprint @@ -550,13 +550,13 @@ def ifconfig(cmdline): @staticmethod def get_pid(ptf_name): - cli = Client(base_url='unix://var/run/docker.sock') + cli = docker.from_env() try: - result = cli.inspect_container(ptf_name) + ctn = cli.containers.get(ptf_name) except: return None - return result['State']['Pid'] + return ctn.attrs['State']['Pid'] @staticmethod def brctl(cmdline): diff --git a/ansible/roles/vm_set/tasks/add_topo.yml b/ansible/roles/vm_set/tasks/add_topo.yml index a23d029dd1c..1266a804725 100644 --- a/ansible/roles/vm_set/tasks/add_topo.yml +++ b/ansible/roles/vm_set/tasks/add_topo.yml @@ -10,18 +10,24 @@ ptf_imagetag: "latest" when: ptf_imagetag is not defined -- name: Create a docker container ptf_{{ vm_set_name }} - docker: +- name: Login into docker registry + docker_login: registry: "{{ docker_registry_host }}" username: "{{ docker_registry_username }}" password: "{{ docker_registry_password }}" + become: yes + +- name: Create ptf container ptf_{{ vm_set_name }} + docker_container: name: ptf_{{ vm_set_name }} image: "{{ docker_registry_host }}/{{ ptf_imagename }}:{{ ptf_imagetag }}" - pull: always - state: reloaded - net: none + pull: yes + state: started + restart: yes + network_mode: none detach: True - cap_add: NET_ADMIN + capabilities: + - net_admin privileged: yes become: yes diff --git a/ansible/roles/vm_set/tasks/docker.yml b/ansible/roles/vm_set/tasks/docker.yml index c6936bf2882..30c440ade03 100644 --- a/ansible/roles/vm_set/tasks/docker.yml +++ b/ansible/roles/vm_set/tasks/docker.yml @@ -28,8 +28,3 @@ apt: pkg=docker-ce update_cache=yes become: yes environment: "{{ proxy_env | default({}) }}" - -- name: Install python packages - pip: name=docker-py state=present version=1.7.2 - become: yes - environment: "{{ proxy_env | default({}) }}" diff --git a/ansible/roles/vm_set/tasks/remove_topo.yml b/ansible/roles/vm_set/tasks/remove_topo.yml index 9201357d588..925487e4140 100644 --- a/ansible/roles/vm_set/tasks/remove_topo.yml +++ b/ansible/roles/vm_set/tasks/remove_topo.yml @@ -22,8 +22,7 @@ when: external_port is defined - name: Remove ptf docker container ptf_{{ vm_set_name }} - docker: + docker_container: name: ptf_{{ vm_set_name }} - image: "{{ docker_registry_host }}/{{ ptf_imagename }}" state: absent become: yes diff --git a/ansible/roles/vm_set/tasks/renumber_topo.yml b/ansible/roles/vm_set/tasks/renumber_topo.yml index 1f98a251ae3..48a9466e30c 100644 --- a/ansible/roles/vm_set/tasks/renumber_topo.yml +++ b/ansible/roles/vm_set/tasks/renumber_topo.yml @@ -1,22 +1,28 @@ +- name: Login into docker registry + docker_login: + registry: "{{ docker_registry_host }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + become: yes + - name: Remove ptf docker container ptf_{{ vm_set_name }} - docker: + docker_container: name: ptf_{{ vm_set_name }} - image: "{{ docker_registry_host }}/{{ ptf_imagename }}" state: absent become: yes -- name: Create a docker container ptf_{{ vm_set_name }} - docker: - registry: "{{ docker_registry_host }}" - username: "{{ docker_registry_username }}" - password: "{{ docker_registry_password }}" +- name: Create ptf container ptf_{{ vm_set_name }} + docker_container: name: ptf_{{ vm_set_name }} - image: "{{ docker_registry_host }}/{{ ptf_imagename }}" - pull: always - state: reloaded - net: none + image: "{{ docker_registry_host }}/{{ ptf_imagename }}:{{ ptf_imagetag }}" + pull: yes + state: started + restart: yes + network_mode: none detach: True - cap_add: NET_ADMIN + capabilities: + - net_admin + privileged: yes become: yes - name: Set front panel/mgmt port for dut From bf55baea24116fad63ba3ebd3e4c8ec36cdaff8d Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Mon, 18 Nov 2019 10:44:14 +0000 Subject: [PATCH 2/7] fix with_items grammer in eos/tasks/main.yml Signed-off-by: Guohan Lu --- ansible/roles/eos/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/eos/tasks/main.yml b/ansible/roles/eos/tasks/main.yml index 3d7f2bc3e6c..8f2d3a80962 100644 --- a/ansible/roles/eos/tasks/main.yml +++ b/ansible/roles/eos/tasks/main.yml @@ -43,7 +43,7 @@ - name: Expand {{ hostname }} properties into props set_fact: props="{{ configuration_properties[item] | combine(props | default({})) }}" - with_items: properties_list + with_items: "{{ properties_list }}" when: hostname in configuration and configuration_properties[item] is defined - name: copy rc.eos From 8f69655ddd6a3df33c3d0c7128cd4356e973b45a Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Mon, 18 Nov 2019 10:45:46 +0000 Subject: [PATCH 3/7] [ansible]: replace to_unicode to to_text to_unicode is deprecated and removed starting ansible 2.5 --- ansible/plugins/action/apswitch.py | 6 +++--- ansible/plugins/action/onie.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/plugins/action/apswitch.py b/ansible/plugins/action/apswitch.py index 9ee15a77272..ae39718afb5 100644 --- a/ansible/plugins/action/apswitch.py +++ b/ansible/plugins/action/apswitch.py @@ -2,8 +2,8 @@ __metaclass__ = type from ansible.plugins.action import ActionBase -from ansible.utils.boolean import boolean -from ansible.utils.unicode import to_unicode +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils._text import to_text import ast @@ -44,7 +44,7 @@ def run(self, tmp=None, task_vars=None): _template = self._loader.path_dwim_relative(self._loader.get_basedir(), 'templates', _template) f = open(_template, 'r') - template_data = to_unicode(f.read()) + template_data = to_text(f.read()) f.close() _template = self._templar.template(template_data) diff --git a/ansible/plugins/action/onie.py b/ansible/plugins/action/onie.py index b969b3cf1cb..16dd47edd7f 100644 --- a/ansible/plugins/action/onie.py +++ b/ansible/plugins/action/onie.py @@ -2,8 +2,8 @@ __metaclass__ = type from ansible.plugins.action import ActionBase -from ansible.utils.boolean import boolean -from ansible.utils.unicode import to_unicode +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils._text import to_text import ast @@ -34,7 +34,7 @@ def run(self, tmp=None, task_vars=None): _template = self._loader.path_dwim_relative(self._loader.get_basedir(), 'templates', _template) f = open(_template, 'r') - template_data = to_unicode(f.read()) + template_data = to_text(f.read()) f.close() _template = self._templar.template(template_data) From ebc0d9a960fcd7e1c0facc46166d6192dbf462f2 Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Mon, 18 Nov 2019 11:28:41 +0000 Subject: [PATCH 4/7] [ansible]: always quote variables used in with_items and with_dict Signed-off-by: Guohan Lu --- ansible/config_sonic_basedon_testbed.yml | 6 +++--- .../fanout/tasks/mlnx/deploy_pfcwd_fanout.yml | 6 +++--- ansible/roles/test/tasks/arpall.yml | 6 +++--- .../roles/test/tasks/bgp_multipath_relax.yml | 8 ++++---- .../test/tasks/check_sw_vm_interfaces.yml | 2 +- .../test/tasks/iface_naming_mode/show_acl.yml | 4 ++-- .../test/tasks/iface_naming_mode/show_arp.yml | 4 ++-- .../iface_naming_mode/show_interface.yml | 16 +++++++-------- .../tasks/iface_naming_mode/show_ip_route.yml | 8 ++++---- .../test/tasks/iface_naming_mode/show_ndp.yml | 4 ++-- .../iface_naming_mode/show_pfc_counters.yml | 4 ++-- .../iface_naming_mode/show_portchannel.yml | 4 ++-- .../iface_naming_mode/show_priority_group.yml | 16 +++++++-------- .../iface_naming_mode/show_queue_counters.yml | 20 +++++++++---------- .../tasks/iface_naming_mode/vlan_test.yml | 4 ++-- ansible/roles/test/tasks/interface.yml | 2 +- .../roles/test/tasks/interface_up_down.yml | 10 +++++----- ansible/roles/test/tasks/lag.yml | 2 +- ansible/roles/test/tasks/lag_2.yml | 6 +++--- ansible/roles/test/tasks/lagall.yml | 4 ++-- ansible/roles/test/tasks/pfc_asym.yml | 4 ++-- .../check_timer_accuracy_test.yml | 2 +- ansible/roles/test/tasks/portstat.yml | 2 +- .../test/tasks/portstat/portstat_clear.yml | 4 ++-- .../tasks/portstat/portstat_delete_tag.yml | 2 +- ansible/roles/test/tasks/snmp/lldp.yml | 2 +- ansible/roles/test/tasks/vxlan-decap.yml | 8 ++++---- 27 files changed, 80 insertions(+), 80 deletions(-) diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml index d6dcb245b5b..54137a5435f 100644 --- a/ansible/config_sonic_basedon_testbed.yml +++ b/ansible/config_sonic_basedon_testbed.yml @@ -85,17 +85,17 @@ - name: find all interface indexes mapping connecting to VM set_fact: interface_to_vms: "{{ interface_to_vms|default({}) | combine({ item.key: item.value['interface_indexes'] }) }}" - with_dict: vm_topo_config['vm'] + with_dict: "{{ vm_topo_config['vm'] }}" - name: find all interface indexes connecting to VM set_fact: ifindex_to_vms: "{{ ifindex_to_vms|default([]) }} + {{ item.value['interface_indexes']}}" - with_dict: vm_topo_config['vm'] + with_dict: "{{ vm_topo_config['vm'] }}" - name: find all interface names set_fact: intf_names: "{{ intf_names | default({}) | combine({item.key: port_alias[item.value[0]|int:item.value[-1]|int+1] }) }}" - with_dict: interface_to_vms + with_dict: "{{ interface_to_vms }}" - name: create minigraph file in ansible minigraph folder template: src=templates/minigraph_template.j2 diff --git a/ansible/roles/fanout/tasks/mlnx/deploy_pfcwd_fanout.yml b/ansible/roles/fanout/tasks/mlnx/deploy_pfcwd_fanout.yml index caedf235adf..587c72a0134 100644 --- a/ansible/roles/fanout/tasks/mlnx/deploy_pfcwd_fanout.yml +++ b/ansible/roles/fanout/tasks/mlnx/deploy_pfcwd_fanout.yml @@ -19,7 +19,7 @@ command: make args: chdir: "{{item | dirname}}" - with_items: pfcwd_dockers + with_items: "{{ pfcwd_dockers }}" delegate_to: localhost when: pfcwd_dockers_url is not defined @@ -27,12 +27,12 @@ copy: src: "{{ item }}" dest: "{{fanout_img_path}}" - with_items: pfcwd_dockers + with_items: "{{ pfcwd_dockers }}" when: pfcwd_dockers_url is not defined - name: Download pre-built pfcwd dockers if path specified get_url: url={{pfcwd_dockers_url}}{{item | basename}} dest={{fanout_img_path}}/{{item | basename}} - with_items: pfcwd_dockers + with_items: "{{ pfcwd_dockers }}" when: pfcwd_dockers_url is defined - block: diff --git a/ansible/roles/test/tasks/arpall.yml b/ansible/roles/test/tasks/arpall.yml index 9e5fd022b15..7fc2fdd7e20 100644 --- a/ansible/roles/test/tasks/arpall.yml +++ b/ansible/roles/test/tasks/arpall.yml @@ -7,7 +7,7 @@ - name: get all available interface names set_fact: ports: "{{ ports|default([]) + [ item | regex_replace('Ethernet', '') | int ] }}" - with_items: minigraph_ports + with_items: "{{ minigraph_ports }}" - name: get all interface numbers set_fact: @@ -27,7 +27,7 @@ set_fact: po1: "{{ item.key }}" when: intf1 in item.value['members'] - with_dict: minigraph_portchannels + with_dict: "{{ minigraph_portchannels }}" - name: move interface {{ intf1 }} out of {{ po1 }} shell: teamdctl {{ po1 }} port remove {{ intf1 }} @@ -43,7 +43,7 @@ set_fact: po2: "{{ item.key }}" when: intf2 in item.value['members'] - with_dict: minigraph_portchannels + with_dict: "{{ minigraph_portchannels }}" - name: move {{ intf2 }} out of {{ po2 }} shell: teamdctl {{ po2 }} port remove {{ intf2 }} diff --git a/ansible/roles/test/tasks/bgp_multipath_relax.yml b/ansible/roles/test/tasks/bgp_multipath_relax.yml index d829f818fc2..750dad8928e 100644 --- a/ansible/roles/test/tasks/bgp_multipath_relax.yml +++ b/ansible/roles/test/tasks/bgp_multipath_relax.yml @@ -18,7 +18,7 @@ - name: Find all V4 bgp neighbors from minigraph set_fact: bgp_v4nei: "{{ bgp_v4nei | default({}) | combine({ item['name']: item['addr'] }) }}" - with_items: minigraph_bgp + with_items: "{{ minigraph_bgp }}" when: "'::' not in item['addr']" - include_vars: "vars/topo_{{ testbed_type }}.yml" @@ -74,7 +74,7 @@ assert: that: - vips_asn in item - with_items: bgp_route[vips_prefix]['aspath'] + with_items: "{{ bgp_route[vips_prefix]['aspath'] }}" #### Verify each t2 adv routes: this option takes time and resources; verified working but print out too much, comment this #### out unless we have a reason to do so @@ -90,7 +90,7 @@ # - item.ansible_facts['bgp_route_neiadv'][vips_prefix]['aspath'] | length == 2 # - vips_asn in item.ansible_facts['bgp_route_neiadv'][vips_prefix]['aspath'] # - vips_asn == item.ansible_facts['bgp_route_neiadv'][vips_prefix]['aspath'][-1] -# with_items: adv_t2_results.results +# with_items: "{{ adv_t2_results.results }}" ######### Verify each t2 option @@ -109,4 +109,4 @@ assert: that: - item in "{{ vips_t0 + [vips_asn] }}" - with_items: bgp_route_neiadv[vips_prefix]['aspath'] + with_items: "{{ bgp_route_neiadv[vips_prefix]['aspath'] }}" diff --git a/ansible/roles/test/tasks/check_sw_vm_interfaces.yml b/ansible/roles/test/tasks/check_sw_vm_interfaces.yml index 9a09d4968e4..9b4362ddbbf 100644 --- a/ansible/roles/test/tasks/check_sw_vm_interfaces.yml +++ b/ansible/roles/test/tasks/check_sw_vm_interfaces.yml @@ -47,7 +47,7 @@ connection: switch ignore_errors: yes when: vms["{{ item }}"]['hwsku'] == 'Arista-VM' - with_items: vms + with_items: "{{ vms }}" register: vm_portchannel_status - name: Debug Port-Channel on VMs diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_acl.yml b/ansible/roles/test/tasks/iface_naming_mode/show_acl.yml index 5b6837eca4c..d950bb87294 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/show_acl.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/show_acl.yml @@ -10,10 +10,10 @@ - name: check acl table output shows default interface names when mode is default assert: {that: item in acl_table.stdout} - with_items: minigraph_acls['DataAcl'] + with_items: "{{ minigraph_acls['DataAcl'] }}" when: mode=='default' and item not in minigraph_portchannels - name: check acl table output shows alias interface names when mode is set to alias assert: {that: " '{{port_name_map[item]}}' in acl_table.stdout"} - with_items: minigraph_acls['DataAcl'] + with_items: "{{ minigraph_acls['DataAcl'] }}" when: mode=='alias' and item not in minigraph_portchannels diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_arp.yml b/ansible/roles/test/tasks/iface_naming_mode/show_arp.yml index 1160c90f1a8..e5cdc82de60 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/show_arp.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/show_arp.yml @@ -17,12 +17,12 @@ assert: that: - arp_output.stdout | search("{{item}}.*\s+{{arptable['v4'][item]['interface']}}") - with_items: arptable['v4'] + with_items: "{{ arptable['v4'] }}" when: arptable['v4'][item]['interface']!='eth0' and mode=='default' - name: Check the output shows alias interface names corresponding to the arp assert: that: - arp_output.stdout | search("{{item}}.*\s+{{port_name_map[arptable['v4'][item]['interface']]}}") - with_items: arptable['v4'] + with_items: "{{ arptable['v4'] }}" when: arptable['v4'][item]['interface']!='eth0' and mode =='alias' diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_interface.yml b/ansible/roles/test/tasks/iface_naming_mode/show_interface.yml index bfbc9f6ff51..5a031cbd04f 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/show_interface.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/show_interface.yml @@ -12,14 +12,14 @@ assert: that: - show_int_neighbor.stdout | search("{{port_name_map[item]}}\s+{{minigraph_neighbors[item]['name']}}") - with_items: minigraph_neighbors + with_items: "{{ minigraph_neighbors }}" when: mode=='alias' - name: Check show interfaces neighbor expected in default mode assert: that: - show_int_neighbor.stdout | search("{{item}}\s+{{minigraph_neighbors[item]['name']}}") - with_items: minigraph_neighbors + with_items: "{{ minigraph_neighbors }}" when: mode=='default' # show ip interfaces @@ -34,14 +34,14 @@ assert: that: - show_ip_intf.stdout | search("{{port_name_map[item['attachto']]}}\s+{{item['addr']}}") - with_items: minigraph_interfaces + with_items: "{{ minigraph_interfaces }}" when: item.addr|ipv4 and mode=='alias' - name: Check intf name and ip address in show ip interface output in default mode assert: that: - show_ip_intf.stdout | search("{{item['attachto']}}\s+{{item['addr']}}") - with_items: minigraph_interfaces + with_items: "{{ minigraph_interfaces }}" when: item.addr|ipv4 and mode=='default' - name: Get show ipv6 interface output @@ -54,14 +54,14 @@ assert: that: - show_ipv6_intf.stdout | search("{{port_name_map[item['attachto']]}}\s+{{item['addr']}}") - with_items: minigraph_interfaces + with_items: "{{ minigraph_interfaces }}" when: item.addr|ipv6 and mode=='alias' - name: Check intf name and ip address in show ip interface output in default mode assert: that: - show_ipv6_intf.stdout | search("{{item['attachto']}}\s+{{item['addr']}}") - with_items: minigraph_interfaces + with_items: "{{ minigraph_interfaces }}" when: item.addr|ipv6 and mode=='default' @@ -82,12 +82,12 @@ - name: check counter output in alias mode assert: {that: item in port_alias} - with_items: int_counter + with_items: "{{ int_counter }}" when: mode=="alias" - name: check counter output in default mode assert: {that: item in default_interfaces} - with_items: int_counter + with_items: "{{ int_counter }}" when: mode=="default" # show interface description diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_ip_route.yml b/ansible/roles/test/tasks/iface_naming_mode/show_ip_route.yml index 0319b838355..c1dad21a65a 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/show_ip_route.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/show_ip_route.yml @@ -30,14 +30,14 @@ assert: that: - route.stdout | search("via {{item}}") - with_items: spine_port_alias + with_items: "{{ spine_port_alias }}" when: mode=='alias' - name: check the output shows default interface names in default mdoe assert: that: - route.stdout | search(" via {{item}}") - with_items: spine_ports + with_items: "{{ spine_ports }}" when: mode=='default' # Test ipv6 routes @@ -55,12 +55,12 @@ assert: that: - route.stdout | search("via {{item}}") - with_items: spine_port_alias + with_items: "{{ spine_port_alias }}" when: mode =='alias' - name: check the output shows default interface names in default mdoe assert: that: - route.stdout | search(" via {{item}}") - with_items: spine_ports + with_items: "{{ spine_ports }}" when: mode =='default' diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_ndp.yml b/ansible/roles/test/tasks/iface_naming_mode/show_ndp.yml index 3900b0fb27f..3eac8349e70 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/show_ndp.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/show_ndp.yml @@ -17,13 +17,13 @@ assert: that: - ndp_output.stdout | search("{{item}}.*\s+{{arptable['v6'][item]['interface']}}") - with_items: arptable['v6'] + with_items: "{{ arptable['v6'] }}" when: arptable['v6'][item]['interface']!='eth0' and mode=='default' - name: Check the output shows alias interface names corresponding to the neighbor assert: that: - ndp_output.stdout | search("{{item}}.*\s+{{port_name_map[arptable['v6'][item]['interface']]}}") - with_items: arptable['v6'] + with_items: "{{ arptable['v6'] }}" when: arptable['v6'][item]['interface']!='eth0' and mode =='alias' diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_pfc_counters.yml b/ansible/roles/test/tasks/iface_naming_mode/show_pfc_counters.yml index b7e28d88744..f164d8f17cf 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/show_pfc_counters.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/show_pfc_counters.yml @@ -16,7 +16,7 @@ that: - "'{{item}}' in pfc_rx.stdout and '{{item}}' in pfc_tx.stdout " - "'{{port_name_map[item]}}' not in pfc_rx.stdout and '{{port_name_map[item]}}' not in pfc_tx.stdout" - with_items: default_interfaces + with_items: "{{ default_interfaces }}" when: mode=='default' - name: check if the output shows alias interface names in alias mode @@ -24,7 +24,7 @@ that: - "'{{item}}' in pfc_rx.stdout and '{{item}}' in pfc_tx.stdout " - "'{{port_alias_map[item]}}' not in pfc_rx.stdout and '{{port_alias_map[item]}}' not in pfc_tx.stdout " - with_items: port_alias + with_items: "{{ port_alias }}" when: mode=='alias' # As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround,the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_portchannel.yml b/ansible/roles/test/tasks/iface_naming_mode/show_portchannel.yml index c09f284bfc2..248ce0b22f1 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/show_portchannel.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/show_portchannel.yml @@ -9,14 +9,14 @@ assert: that: - int_po.stdout | search("{{item.key}}\s+LACP\(A\)\(Up\).*{{item.value['members'][0]}}") - with_dict: minigraph_portchannels + with_dict: "{{ minigraph_portchannels }}" when: mode=='default' - name: check show interface portchannel output shows alias name in alias mode assert: that: - int_po.stdout | search("{{item.key}}\s+LACP\(A\)\(Up\).*{{port_name_map[item.value['members'][0]]}}") - with_dict: minigraph_portchannels + with_dict: "{{ minigraph_portchannels }}" when: mode=='alias' # As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround, the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_priority_group.yml b/ansible/roles/test/tasks/iface_naming_mode/show_priority_group.yml index 9debc960cca..f3cf49fb2fd 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/show_priority_group.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/show_priority_group.yml @@ -10,14 +10,14 @@ assert: that: - show_pg.stdout | search("{{item}}.*") - with_items: upport_alias_list + with_items: "{{ upport_alias_list }}" when: mode=='alias' - name: Check "show priority-group persistent-watermark shared" in default mode assert: that: - show_pg.stdout | search("{{item}}.*") - with_items: up_ports + with_items: "{{ up_ports }}" when: mode=='default' - name: show priority-group persistent-watermark headroom in {{mode}} mode @@ -30,14 +30,14 @@ assert: that: - show_pg.stdout | search("{{item}}.*") - with_items: upport_alias_list + with_items: "{{ upport_alias_list }}" when: mode=='alias' - name: Check "show priority-group persistent-watermark headroom" in default mode assert: that: - show_pg.stdout | search("{{item}}.*") - with_items: up_ports + with_items: "{{ up_ports }}" when: mode=='default' @@ -51,14 +51,14 @@ assert: that: - show_pg.stdout | search("{{item}}.*") - with_items: upport_alias_list + with_items: "{{ upport_alias_list }}" when: mode=='alias' - name: Check "show priority-group watermark shared" in default mode assert: that: - show_pg.stdout | search("{{item}}.*") - with_items: up_ports + with_items: "{{ up_ports }}" when: mode=='default' - name: show priority-group watermark headroom in {{mode}} mode @@ -71,14 +71,14 @@ assert: that: - show_pg.stdout | search("{{item}}.*") - with_items: upport_alias_list + with_items: "{{ upport_alias_list }}" when: mode=='alias' - name: Check "show priority-group watermark headroom" in default mode assert: that: - show_pg.stdout | search("{{item}}.*") - with_items: up_ports + with_items: "{{ up_ports }}" when: mode=='default' # As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround, the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_queue_counters.yml b/ansible/roles/test/tasks/iface_naming_mode/show_queue_counters.yml index 6c114858633..5f8ea8374fd 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/show_queue_counters.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/show_queue_counters.yml @@ -21,14 +21,14 @@ assert: that: - queue_counter.stdout | search("{{item}}\s+[UC|MC\d]+\s+\S+\s+\S+\s+\S+\s+\S+") and '{{port_name_map[item]}}' not in queue_counter.stdout - with_items: default_interfaces + with_items: "{{ default_interfaces }}" when: mode=='default' - name: Check alias interface name is present in output when mode is set to alias assert: that: - queue_counter.stdout | search("{{item}}\s+[UC|MC\d]+\s+\S+\s+\S+\s+\S+\s+\S+") and '{{port_alias_map[item]}}' not in queue_counter.stdout - with_items: port_alias + with_items: "{{ port_alias }}" when: mode=='alias' - name: show queue watermark in {{mode}} mode @@ -41,14 +41,14 @@ assert: that: - show_queue_wm_mcast.stdout | search("{{item}}") - with_items: port_alias + with_items: "{{ port_alias }}" when: mode=='alias' - name: Check show queue watermark multicast in default mode assert: that: - show_queue_wm_mcast.stdout | search("{{item}}") - with_items: default_interfaces + with_items: "{{ default_interfaces }}" when: mode=='default' - name: show queue watermark unicast in {{mode}} mode @@ -61,14 +61,14 @@ assert: that: - show_queue_wm_ucast.stdout | search("{{item}}") - with_items: port_alias + with_items: "{{ port_alias }}" when: mode=='alias' - name: Check show queue watermark multicast in default mode assert: that: - show_queue_wm_ucast.stdout | search("{{item}}") - with_items: default_interfaces + with_items: "{{ default_interfaces }}" when: mode=='default' - name: show queue persistent-watermark in {{mode}} mode @@ -81,14 +81,14 @@ assert: that: - show_queue_wm_mcast.stdout | search("{{item}}") - with_items: port_alias + with_items: "{{ port_alias }}" when: mode=='alias' - name: Check show queue persistent-watermark multicast in default mode assert: that: - show_queue_wm_mcast.stdout | search("{{item}}") - with_items: default_interfaces + with_items: "{{ default_interfaces }}" when: mode=='default' - name: show queue persistent-watermark unicast in {{mode}} mode @@ -101,14 +101,14 @@ assert: that: - show_queue_wm_ucast.stdout | search("{{item}}") - with_items: port_alias + with_items: "{{ port_alias }}" when: mode=='alias' - name: Check show queue persistent-watermark multicast in default mode assert: that: - show_queue_wm_ucast.stdout | search("{{item}}") - with_items: default_interfaces + with_items: "{{ default_interfaces }}" when: mode=='default' diff --git a/ansible/roles/test/tasks/iface_naming_mode/vlan_test.yml b/ansible/roles/test/tasks/iface_naming_mode/vlan_test.yml index 90d9e3f3441..0d475cb129a 100644 --- a/ansible/roles/test/tasks/iface_naming_mode/vlan_test.yml +++ b/ansible/roles/test/tasks/iface_naming_mode/vlan_test.yml @@ -18,14 +18,14 @@ assert: that: - show_vlan_brief.stdout | search("{{port_name_map[item]}}.*untagged") - with_items: minigraph_vlans['Vlan1000']['members'] + with_items: "{{ minigraph_vlans['Vlan1000']['members'] }}" when: mode=="alias" - name: check interface names of minigraph vlan 1000 are shown as default names assert: that: - show_vlan_brief.stdout | search("{{item}}.*untagged") - with_items: minigraph_vlans['Vlan1000']['members'] + with_items: "{{ minigraph_vlans['Vlan1000']['members'] }}" when: mode=="default" # add vlan diff --git a/ansible/roles/test/tasks/interface.yml b/ansible/roles/test/tasks/interface.yml index 90fb9134a77..322de5ff552 100644 --- a/ansible/roles/test/tasks/interface.yml +++ b/ansible/roles/test/tasks/interface.yml @@ -17,7 +17,7 @@ - set_fact: neighbors="{{device_conn}}" - include: resume_fanout_ports.yml - with_items: ansible_interface_link_down_ports + with_items: "{{ ansible_interface_link_down_ports }}" - name: pause and wait interface to be up pause: seconds=30 diff --git a/ansible/roles/test/tasks/interface_up_down.yml b/ansible/roles/test/tasks/interface_up_down.yml index f9de040a147..56dcb7ec69e 100644 --- a/ansible/roles/test/tasks/interface_up_down.yml +++ b/ansible/roles/test/tasks/interface_up_down.yml @@ -41,7 +41,7 @@ - name: verify all local interfaces are up assert: { that: "ansible_{{ item }}['active'] == true" } - with_items: ansible_interfaces + with_items: "{{ ansible_interfaces }}" when: - item | match("Ethernet.*") @@ -63,7 +63,7 @@ login: "{{ switch_login[ansible_interface_up_down_data_struct_facts[item]['nei_device_type']] }}" skip_default_user: "yes" connection: cisco - with_items: ansible_interface_up_down_data_struct_facts.keys() + with_items: "{{ ansible_interface_up_down_data_struct_facts.keys() }}" - name: sleep for some time pause: seconds=5 @@ -73,7 +73,7 @@ - name: verify all local interfaces are down assert: { that: "ansible_{{ item }}['active'] == false" } - with_items: ansible_interfaces + with_items: "{{ ansible_interfaces }}" when: - item | match("Ethernet.*") @@ -84,7 +84,7 @@ login: "{{ switch_login[ansible_interface_up_down_data_struct_facts[item]['nei_device_type']] }}" skip_default_user: "yes" connection: cisco - with_items: ansible_interface_up_down_data_struct_facts.keys() + with_items: "{{ ansible_interface_up_down_data_struct_facts.keys() }}" - name: sleep for some time pause: seconds=5 @@ -94,6 +94,6 @@ - name: verify all local interfaces are up assert: { that: "ansible_{{ item }}['active'] == true" } - with_items: ansible_interfaces + with_items: "{{ ansible_interfaces }}" when: - item | match("Ethernet.*") diff --git a/ansible/roles/test/tasks/lag.yml b/ansible/roles/test/tasks/lag.yml index 3be043c1a69..0b409f32526 100644 --- a/ansible/roles/test/tasks/lag.yml +++ b/ansible/roles/test/tasks/lag.yml @@ -67,7 +67,7 @@ - name: "Run {{ testname }} with changing member port state to up and down" include: per_lag_test.yml - with_items: minigraph_portchannel_interfaces + with_items: "{{ minigraph_portchannel_interfaces }}" always: - include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml diff --git a/ansible/roles/test/tasks/lag_2.yml b/ansible/roles/test/tasks/lag_2.yml index b6f1e6e530c..3ba0b26b5d6 100644 --- a/ansible/roles/test/tasks/lag_2.yml +++ b/ansible/roles/test/tasks/lag_2.yml @@ -71,15 +71,15 @@ - name: test each lag interface minimum links and rate include: single_lag_test.yml - with_items: lag_facts.names + with_items: "{{ lag_facts.names }}" when: lag_facts.lags[item]['po_config']['runner']['min_ports'] is defined and test_minlink|bool == true - name: test each lag interface LACP DU rate include: single_lag_lacp_rate_test.yml - with_items: lag_facts.names + with_items: "{{ lag_facts.names }}" when: lag_facts.lags[item]['po_config']['runner']['min_ports'] is defined and test_rate|bool == true - name: test each lag interface with fallback config include: lag_fallback.yml - with_items: lag_facts.names + with_items: "{{ lag_facts.names }}" when: lag_facts.lags[item]['po_config']['runner']['fallback'] is defined diff --git a/ansible/roles/test/tasks/lagall.yml b/ansible/roles/test/tasks/lagall.yml index 116f313a3e3..743288a3290 100644 --- a/ansible/roles/test/tasks/lagall.yml +++ b/ansible/roles/test/tasks/lagall.yml @@ -75,7 +75,7 @@ - name: Add VMs information to in-memory inventory. add_host: name={{ lag_vms[item][0] }} ansible_ssh_user={{ switch_login['Arista']['user'] }} ansible_ssh_pass={{ switch_login['Arista']['passwd'][0] }} - with_items: lag_vms.keys() + with_items: "{{ lag_vms.keys() }}" #----------------------------------- # Start tests @@ -100,7 +100,7 @@ lag_member_1: "{{ lag_vms[item][2] }}" vm_ip: "{{ lag_vms[item][0] }}" vm_name: "{{ item }}" - with_items: lag_vms.keys() + with_items: "{{ lag_vms.keys() }}" # Get a list of LAGs on DUT. - set_fact: diff --git a/ansible/roles/test/tasks/pfc_asym.yml b/ansible/roles/test/tasks/pfc_asym.yml index d3f070ab82f..6b02e1ff402 100644 --- a/ansible/roles/test/tasks/pfc_asym.yml +++ b/ansible/roles/test/tasks/pfc_asym.yml @@ -208,7 +208,7 @@ - name: Enable asymmetric PFC on all server interfaces command: config interface pfc asymmetric {{ item.dut_name }} on become: yes - with_items: '{{ server_ports }}' + with_items: "{{ server_ports }}" - name: Start PTF runner include: ptf_runner.yml @@ -260,7 +260,7 @@ - name: Disable asymmetric PFC on all server interfaces command: config interface pfc asymmetric {{ item.dut_name }} off become: yes - with_items: '{{ server_ports }}' + with_items: "{{ server_ports }}" - name: Remove SAI tests from PTF container file: path=/root/saitests state=absent diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml index cc0b03a582f..da723391f3d 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml @@ -38,7 +38,7 @@ - debug: var: "{{item}}" - with_items: + with_items: - detect_time_list - restore_time_list diff --git a/ansible/roles/test/tasks/portstat.yml b/ansible/roles/test/tasks/portstat.yml index e4bf866630b..2f1e273410c 100644 --- a/ansible/roles/test/tasks/portstat.yml +++ b/ansible/roles/test/tasks/portstat.yml @@ -55,7 +55,7 @@ - name: Run the commands shell: "{{ item }}" - with_items: portstat_commands + with_items: "{{ portstat_commands }}" always: - name: Clear and reset counters diff --git a/ansible/roles/test/tasks/portstat/portstat_clear.yml b/ansible/roles/test/tasks/portstat/portstat_clear.yml index 851421cc42e..db32c84ec39 100644 --- a/ansible/roles/test/tasks/portstat/portstat_clear.yml +++ b/ansible/roles/test/tasks/portstat/portstat_clear.yml @@ -12,7 +12,7 @@ - name: Find ethernet0 set_fact: portstat_eth0: "{{ item }}" - with_items: before_out.stdout_lines + with_items: "{{ before_out.stdout_lines }}" when: "'Ethernet0' in item" - name: Pull out RX and TX OK counters @@ -35,7 +35,7 @@ - name: Find ethernet0 set_fact: portstat_eth0: "{{ item }}" - with_items: after_out.stdout_lines + with_items: "{{ after_out.stdout_lines }}" when: "'Ethernet0' in item" - name: Pull out RX and TX OK counters diff --git a/ansible/roles/test/tasks/portstat/portstat_delete_tag.yml b/ansible/roles/test/tasks/portstat/portstat_delete_tag.yml index 1ab849fff1e..51d3dbcea0e 100644 --- a/ansible/roles/test/tasks/portstat/portstat_delete_tag.yml +++ b/ansible/roles/test/tasks/portstat/portstat_delete_tag.yml @@ -24,7 +24,7 @@ - name: create the list of files to not be deleted set_fact: files_not_deleted: "{{files_not_deleted}} + ['{{item}}']" - with_items: file_names + with_items: "{{ file_names }}" when: file_to_delete != item - name: create several test stats files diff --git a/ansible/roles/test/tasks/snmp/lldp.yml b/ansible/roles/test/tasks/snmp/lldp.yml index 4d483650d3f..5bb1301d8ff 100644 --- a/ansible/roles/test/tasks/snmp/lldp.yml +++ b/ansible/roles/test/tasks/snmp/lldp.yml @@ -65,7 +65,7 @@ set_fact: minigraph_lldp_nei: "{{ minigraph_lldp_nei|default({}) | combine({ item.key : item.value}) }}" when: "'server' not in item.value['name'] | lower" - with_dict: minigraph_neighbors + with_dict: "{{ minigraph_neighbors }}" - name: Check if lldpRemTable is present on port {{ item.value.name }} when: "{{ item.value['lldpRemTimeMark'] is defined }} diff --git a/ansible/roles/test/tasks/vxlan-decap.yml b/ansible/roles/test/tasks/vxlan-decap.yml index f2ac0d23370..1a1bf6fbb90 100644 --- a/ansible/roles/test/tasks/vxlan-decap.yml +++ b/ansible/roles/test/tasks/vxlan-decap.yml @@ -45,7 +45,7 @@ - name: Render DUT vxlan configuration. Tunnel Maps template: src=vxlan_db.maps.json.j2 dest=/tmp/vxlan_db.maps.{{ item }}.json - with_items: minigraph_vlans + with_items: "{{ minigraph_vlans }}" - include: ptf_runner.yml vars: @@ -65,7 +65,7 @@ - name: Configure vxlan decap tunnel maps shell: sonic-cfggen -j /tmp/vxlan_db.maps.{{ item }}.json --write-to-db - with_items: minigraph_vlans + with_items: "{{ minigraph_vlans }}" - include: ptf_runner.yml vars: @@ -82,7 +82,7 @@ - name: Remove vxlan tunnel maps configuration shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL_MAP|tunnelVxlan|map{{ item }}" - with_items: minigraph_vlans + with_items: "{{ minigraph_vlans }}" - name: Remove vxlan tunnel configuration shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL|tunnelVxlan" @@ -103,7 +103,7 @@ - always: - name: Remove vxlan tunnel maps configuration shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL_MAP|tunnelVxlan|map{{ item }}" - with_items: minigraph_vlans + with_items: "{{ minigraph_vlans }}" - name: Remove vxlan tunnel configuration shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL|tunnelVxlan" From 11c483130afcd3671c7f68223117047f397860cd Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Mon, 18 Nov 2019 11:56:19 +0000 Subject: [PATCH 5/7] [ansible]: move eos reboot handler into main.yml use listen for "Update VM state" You cannot notify a handler that is defined inside of an include. Signed-off-by: Guohan Lu --- .../handlers/common_handlers/update_state.yml | 23 --------------- ansible/roles/eos/handlers/main.yml | 28 +++++++++++++++++-- 2 files changed, 26 insertions(+), 25 deletions(-) delete mode 100755 ansible/roles/eos/handlers/common_handlers/update_state.yml diff --git a/ansible/roles/eos/handlers/common_handlers/update_state.yml b/ansible/roles/eos/handlers/common_handlers/update_state.yml deleted file mode 100755 index d84085a058f..00000000000 --- a/ansible/roles/eos/handlers/common_handlers/update_state.yml +++ /dev/null @@ -1,23 +0,0 @@ -- name: Reboot the VM - command: /sbin/shutdown -r now "Ansible updates triggered" - async: 300 - poll: 0 - ignore_errors: true - -- name: Wait for VM to shutdown - wait_for: - host: "{{ ansible_ssh_host }}" - port: 22 - state: stopped - delay: 10 - timeout: 300 - connection: local - -- name: Wait for VM to startup - wait_for: - host: "{{ ansible_ssh_host }}" - port: 22 - state: started - delay: 10 - timeout: 1200 - connection: local diff --git a/ansible/roles/eos/handlers/main.yml b/ansible/roles/eos/handlers/main.yml index fd86a76c7d6..a99ee1aa321 100755 --- a/ansible/roles/eos/handlers/main.yml +++ b/ansible/roles/eos/handlers/main.yml @@ -1,5 +1,29 @@ # Notify handlers are always run in the same order they are defined, not in the order listed in the notify-statement. # This is also the case for handlers using listen. -- name: Update VM state - include: roles/eos/handlers/common_handlers/update_state.yml +- name: Reboot the VM + command: /sbin/shutdown -r now "Ansible updates triggered" + async: 300 + poll: 0 + ignore_errors: true + listen: "Update VM state" + +- name: Wait for VM to shutdown + wait_for: + host: "{{ ansible_ssh_host }}" + port: 22 + state: stopped + delay: 10 + timeout: 300 + connection: local + listen: "Update VM state" + +- name: Wait for VM to startup + wait_for: + host: "{{ ansible_ssh_host }}" + port: 22 + state: started + delay: 10 + timeout: 1200 + connection: local + listen: "Update VM state" From 886ed1d6006032047a92d454b9cecb90fe9307de Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Tue, 19 Nov 2019 04:18:04 +0000 Subject: [PATCH 6/7] [ansible]: import callback_loader from different location due to ansible changes Signed-off-by: Guohan Lu --- tests/ansible_host.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ansible_host.py b/tests/ansible_host.py index ff1f8ad79af..de8bdd5d87b 100644 --- a/tests/ansible_host.py +++ b/tests/ansible_host.py @@ -1,4 +1,4 @@ -from ansible.plugins import callback_loader +from ansible.plugins.loader import callback_loader from ansible.errors import AnsibleError def dump_ansible_results(results, stdout_callback='yaml'): From bd69f3d20becf6e69b7330b3d4d681432d4c16b0 Mon Sep 17 00:00:00 2001 From: Guohan Lu Date: Tue, 19 Nov 2019 07:57:12 +0000 Subject: [PATCH 7/7] [ansible]: datatime class is overriden by ansible module_utils Signed-off-by: Guohan Lu --- ansible/library/extract_log.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/library/extract_log.py b/ansible/library/extract_log.py index 9cf8c1cf5ea..4825d6f0db3 100644 --- a/ansible/library/extract_log.py +++ b/ansible/library/extract_log.py @@ -118,9 +118,9 @@ def convert_date(s): if len(re_result) > 0: str_date = re_result[0] try: - dt = datetime.strptime(str_date, '%b %d %X.%f') + dt = datetime.datetime.strptime(str_date, '%b %d %X.%f') except ValueError: - dt = datetime.strptime(str_date, '%b %d %X') + dt = datetime.datetime.strptime(str_date, '%b %d %X') else: re_result = re.findall(r'^\d{4}-\d{2}-\d{2}\.\d{2}:\d{2}:\d{2}\.\d{6}', s) str_date = re_result[0]