summaryrefslogtreecommitdiffstats
path: root/contrib/inventory
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/inventory')
-rw-r--r--contrib/inventory/abiquo.ini48
-rwxr-xr-xcontrib/inventory/abiquo.py232
-rwxr-xr-xcontrib/inventory/apache-libcloud.py346
-rw-r--r--contrib/inventory/apstra_aos.ini20
-rwxr-xr-xcontrib/inventory/apstra_aos.py589
-rw-r--r--contrib/inventory/azure_rm.ini23
-rwxr-xr-xcontrib/inventory/azure_rm.py973
-rw-r--r--contrib/inventory/brook.ini39
-rwxr-xr-xcontrib/inventory/brook.py258
-rw-r--r--contrib/inventory/cloudforms.ini40
-rwxr-xr-xcontrib/inventory/cloudforms.py483
-rw-r--r--contrib/inventory/cloudstack.ini5
-rwxr-xr-xcontrib/inventory/cloudstack.py277
-rw-r--r--contrib/inventory/cobbler.ini24
-rwxr-xr-xcontrib/inventory/cobbler.py312
-rw-r--r--contrib/inventory/collins.ini57
-rwxr-xr-xcontrib/inventory/collins.py439
-rw-r--r--contrib/inventory/consul_io.ini54
-rwxr-xr-xcontrib/inventory/consul_io.py537
-rw-r--r--contrib/inventory/digital_ocean.ini34
-rwxr-xr-xcontrib/inventory/digital_ocean.py551
-rwxr-xr-xcontrib/inventory/docker.py905
-rw-r--r--contrib/inventory/docker.yml74
-rwxr-xr-xcontrib/inventory/fleet.py107
-rw-r--r--contrib/inventory/foreman.ini200
-rwxr-xr-xcontrib/inventory/foreman.py662
-rwxr-xr-xcontrib/inventory/freeipa.py123
-rw-r--r--contrib/inventory/gce.ini76
-rwxr-xr-xcontrib/inventory/gce.py521
-rwxr-xr-xcontrib/inventory/infoblox.py126
-rw-r--r--contrib/inventory/infoblox.yaml24
-rwxr-xr-xcontrib/inventory/jail.py37
-rwxr-xr-xcontrib/inventory/landscape.py125
-rw-r--r--contrib/inventory/libcloud.ini15
-rwxr-xr-xcontrib/inventory/libvirt_lxc.py37
-rw-r--r--contrib/inventory/linode.ini18
-rwxr-xr-xcontrib/inventory/linode.py348
-rwxr-xr-xcontrib/inventory/lxc_inventory.py70
-rw-r--r--contrib/inventory/lxd.ini13
-rwxr-xr-xcontrib/inventory/lxd.py101
-rw-r--r--contrib/inventory/mdt.ini17
-rwxr-xr-xcontrib/inventory/mdt_dynamic_inventory.py132
-rw-r--r--contrib/inventory/nagios_livestatus.ini41
-rwxr-xr-xcontrib/inventory/nagios_livestatus.py173
-rw-r--r--contrib/inventory/nagios_ndo.ini10
-rwxr-xr-xcontrib/inventory/nagios_ndo.py105
-rwxr-xr-xcontrib/inventory/nsot.py344
-rw-r--r--contrib/inventory/nsot.yaml22
-rwxr-xr-xcontrib/inventory/openshift.py100
-rw-r--r--contrib/inventory/openstack.yml24
-rwxr-xr-xcontrib/inventory/openstack_inventory.py272
-rwxr-xr-xcontrib/inventory/openvz.py85
-rw-r--r--contrib/inventory/ovirt.ini35
-rwxr-xr-xcontrib/inventory/ovirt.py289
-rwxr-xr-xcontrib/inventory/ovirt4.py257
-rw-r--r--contrib/inventory/packet_net.ini53
-rwxr-xr-xcontrib/inventory/packet_net.py506
-rwxr-xr-xcontrib/inventory/proxmox.py248
-rwxr-xr-xcontrib/inventory/rackhd.py96
-rw-r--r--contrib/inventory/rax.ini66
-rwxr-xr-xcontrib/inventory/rax.py470
l---------contrib/inventory/rhv.py1
-rw-r--r--contrib/inventory/rudder.ini35
-rwxr-xr-xcontrib/inventory/rudder.py296
-rw-r--r--contrib/inventory/scaleway.ini37
-rwxr-xr-xcontrib/inventory/scaleway.py230
-rwxr-xr-xcontrib/inventory/serf.py112
-rwxr-xr-xcontrib/inventory/softlayer.py204
-rw-r--r--contrib/inventory/spacewalk.ini16
-rwxr-xr-xcontrib/inventory/spacewalk.py237
-rwxr-xr-xcontrib/inventory/ssh_config.py131
-rwxr-xr-xcontrib/inventory/stacki.py188
-rw-r--r--contrib/inventory/stacki.yml7
-rwxr-xr-xcontrib/inventory/vagrant.py131
-rwxr-xr-xcontrib/inventory/vbox.py117
-rw-r--r--contrib/inventory/zabbix.ini20
-rwxr-xr-xcontrib/inventory/zabbix.py196
-rwxr-xr-xcontrib/inventory/zone.py43
78 files changed, 0 insertions, 14269 deletions
diff --git a/contrib/inventory/abiquo.ini b/contrib/inventory/abiquo.ini
deleted file mode 100644
index 991a2ed803..0000000000
--- a/contrib/inventory/abiquo.ini
+++ /dev/null
@@ -1,48 +0,0 @@
-# Ansible external inventory script settings for Abiquo
-#
-
-# Define an Abiquo user with access to Abiquo API which will be used to
-# perform required queries to obtain information to generate the Ansible
-# inventory output.
-#
-[auth]
-apiuser = admin
-apipass = xabiquo
-
-
-# Specify Abiquo API version in major.minor format and the access URI to
-# API endpoint. Tested versions are: 2.6 , 3.0 and 3.1
-# To confirm that your box haves access to Abiquo API you can perform a
-# curl command, replacing with suitable values, similar to this:
-# curl -X GET https://192.168.2.100/api/login -u admin:xabiquo
-#
-[api]
-version = 3.0
-uri = https://192.168.2.100/api
-# You probably won't need to modify login preferences, but just in case
-login_path = /login
-login_type = application/vnd.abiquo.user+json
-
-
-# To avoid performing excessive calls to Abiquo API you can define a
-# cache for the plugin output. Within the time defined in seconds, latest
-# output will be reused. After that time, the cache will be refreshed.
-#
-[cache]
-cache_max_age = 30
-cache_dir = /tmp
-
-
-[defaults]
-# Depending in your Abiquo environment, you may want to use only public IP
-# addresses (if using public cloud providers) or also private IP addresses.
-# You can set this with public_ip_only configuration.
-public_ip_only = false
-# default_net_interface only is used if public_ip_only = false
-# If public_ip_only is set to false, you can choose default nic to obtain
-# IP address to define the host.
-default_net_interface = nic0
-# Only deployed VM are displayed in the plugin output.
-deployed_only = true
-# Define if VM metadata is obtained from Abiquo API.
-get_metadata = false
diff --git a/contrib/inventory/abiquo.py b/contrib/inventory/abiquo.py
deleted file mode 100755
index 5a7950bd70..0000000000
--- a/contrib/inventory/abiquo.py
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-'''
-External inventory script for Abiquo
-====================================
-
-Shamelessly copied from an existing inventory script.
-
-This script generates an inventory that Ansible can understand by making API requests to Abiquo API
-Requires some python libraries, ensure to have them installed when using this script.
-
-This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6.
-
-Before using this script you may want to modify abiquo.ini config file.
-
-This script generates an Ansible hosts file with these host groups:
-
-ABQ_xxx: Defines a hosts itself by Abiquo VM name label
-all: Contains all hosts defined in Abiquo user's enterprise
-virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it
-virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it
-imagetemplate: Creates a host group for each image template containing all hosts using it
-
-'''
-
-# (c) 2014, Daniel Beneyto <daniel.beneyto@abiquo.com>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import sys
-import time
-
-import json
-
-from ansible.module_utils.six.moves import configparser as ConfigParser
-from ansible.module_utils.urls import open_url
-
-
-def api_get(link, config):
- try:
- if link is None:
- url = config.get('api', 'uri') + config.get('api', 'login_path')
- headers = {"Accept": config.get('api', 'login_type')}
- else:
- url = link['href'] + '?limit=0'
- headers = {"Accept": link['type']}
- result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''),
- url_password=config.get('auth', 'apipass').replace('\n', ''))
- return json.loads(result.read())
- except Exception:
- return None
-
-
-def save_cache(data, config):
- ''' saves item to cache '''
- dpath = config.get('cache', 'cache_dir')
- try:
- cache = open('/'.join([dpath, 'inventory']), 'w')
- cache.write(json.dumps(data))
- cache.close()
- except IOError as e:
- pass # not really sure what to do here
-
-
-def get_cache(cache_item, config):
- ''' returns cached item '''
- dpath = config.get('cache', 'cache_dir')
- inv = {}
- try:
- cache = open('/'.join([dpath, 'inventory']), 'r')
- inv = cache.read()
- cache.close()
- except IOError as e:
- pass # not really sure what to do here
-
- return inv
-
-
-def cache_available(config):
- ''' checks if we have a 'fresh' cache available for item requested '''
-
- if config.has_option('cache', 'cache_dir'):
- dpath = config.get('cache', 'cache_dir')
-
- try:
- existing = os.stat('/'.join([dpath, 'inventory']))
- except Exception:
- # cache doesn't exist or isn't accessible
- return False
-
- if config.has_option('cache', 'cache_max_age'):
- maxage = config.get('cache', 'cache_max_age')
- if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
- return True
-
- return False
-
-
-def generate_inv_from_api(enterprise_entity, config):
- try:
- inventory['all'] = {}
- inventory['all']['children'] = []
- inventory['all']['hosts'] = []
- inventory['_meta'] = {}
- inventory['_meta']['hostvars'] = {}
-
- enterprise = api_get(enterprise_entity, config)
- vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines')
- vms = api_get(vms_entity, config)
- for vmcollection in vms['collection']:
- for link in vmcollection['links']:
- if link['rel'] == 'virtualappliance':
- vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
- elif link['rel'] == 'virtualdatacenter':
- vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
- elif link['rel'] == 'virtualmachinetemplate':
- vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
-
- # From abiquo.ini: Only adding to inventory VMs with public IP
- if config.getboolean('defaults', 'public_ip_only') is True:
- for link in vmcollection['links']:
- if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip':
- vm_nic = link['title']
- break
- else:
- vm_nic = None
- # Otherwise, assigning defined network interface IP address
- else:
- for link in vmcollection['links']:
- if link['rel'] == config.get('defaults', 'default_net_interface'):
- vm_nic = link['title']
- break
- else:
- vm_nic = None
-
- vm_state = True
- # From abiquo.ini: Only adding to inventory VMs deployed
- if config.getboolean('defaults', 'deployed_only') is True and vmcollection['state'] == 'NOT_ALLOCATED':
- vm_state = False
-
- if vm_nic is not None and vm_state:
- if vm_vapp not in inventory:
- inventory[vm_vapp] = {}
- inventory[vm_vapp]['children'] = []
- inventory[vm_vapp]['hosts'] = []
- if vm_vdc not in inventory:
- inventory[vm_vdc] = {}
- inventory[vm_vdc]['hosts'] = []
- inventory[vm_vdc]['children'] = []
- if vm_template not in inventory:
- inventory[vm_template] = {}
- inventory[vm_template]['children'] = []
- inventory[vm_template]['hosts'] = []
- if config.getboolean('defaults', 'get_metadata') is True:
- meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata')
- try:
- metadata = api_get(meta_entity, config)
- if (config.getfloat("api", "version") >= 3.0):
- vm_metadata = metadata['metadata']
- else:
- vm_metadata = metadata['metadata']['metadata']
- inventory['_meta']['hostvars'][vm_nic] = vm_metadata
- except Exception as e:
- pass
-
- inventory[vm_vapp]['children'].append(vmcollection['name'])
- inventory[vm_vdc]['children'].append(vmcollection['name'])
- inventory[vm_template]['children'].append(vmcollection['name'])
- inventory['all']['children'].append(vmcollection['name'])
- inventory[vmcollection['name']] = []
- inventory[vmcollection['name']].append(vm_nic)
-
- return inventory
- except Exception as e:
- # Return empty hosts output
- return {'all': {'hosts': []}, '_meta': {'hostvars': {}}}
-
-
-def get_inventory(enterprise, config):
- ''' Reads the inventory from cache or Abiquo api '''
-
- if cache_available(config):
- inv = get_cache('inventory', config)
- else:
- default_group = os.path.basename(sys.argv[0]).rstrip('.py')
- # MAKE ABIQUO API CALLS #
- inv = generate_inv_from_api(enterprise, config)
-
- save_cache(inv, config)
- return json.dumps(inv)
-
-
-if __name__ == '__main__':
- inventory = {}
- enterprise = {}
-
- # Read config
- config = ConfigParser.SafeConfigParser()
- for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']:
- if os.path.exists(configfilename):
- config.read(configfilename)
- break
-
- try:
- login = api_get(None, config)
- enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise')
- except Exception as e:
- enterprise = None
-
- if cache_available(config):
- inventory = get_cache('inventory', config)
- else:
- inventory = get_inventory(enterprise, config)
-
- # return to ansible
- sys.stdout.write(str(inventory))
- sys.stdout.flush()
diff --git a/contrib/inventory/apache-libcloud.py b/contrib/inventory/apache-libcloud.py
deleted file mode 100755
index 3857d2f934..0000000000
--- a/contrib/inventory/apache-libcloud.py
+++ /dev/null
@@ -1,346 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2013, Sebastien Goasguen <runseb@gmail.com>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-'''
-Apache Libcloud generic external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API request to
-Cloud providers using the Apache libcloud library.
-
-This script also assumes there is a libcloud.ini file alongside it
-
-'''
-
-import sys
-import os
-import argparse
-import re
-from time import time
-
-from ansible.module_utils.six import iteritems, string_types
-from ansible.module_utils.six.moves import configparser as ConfigParser
-from libcloud.compute.types import Provider
-from libcloud.compute.providers import get_driver
-import libcloud.security as sec
-
-import json
-
-
-class LibcloudInventory(object):
- def __init__(self):
- ''' Main execution path '''
-
- # Inventory grouped by instance IDs, tags, security groups, regions,
- # and availability zones
- self.inventory = {}
-
- # Index of hostname (address) to instance ID
- self.index = {}
-
- # Read settings and parse CLI arguments
- self.read_settings()
- self.parse_cli_args()
-
- # Cache
- if self.args.refresh_cache:
- self.do_api_calls_update_cache()
- elif not self.is_cache_valid():
- self.do_api_calls_update_cache()
-
- # Data to print
- if self.args.host:
- data_to_print = self.get_host_info()
-
- elif self.args.list:
- # Display list of instances for inventory
- if len(self.inventory) == 0:
- data_to_print = self.get_inventory_from_cache()
- else:
- data_to_print = self.json_format_dict(self.inventory, True)
-
- print(data_to_print)
-
- def is_cache_valid(self):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if os.path.isfile(self.cache_path_index):
- return True
-
- return False
-
- def read_settings(self):
- ''' Reads the settings from the libcloud.ini file '''
-
- config = ConfigParser.SafeConfigParser()
- libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini')
- libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path)
- config.read(libcloud_ini_path)
-
- if not config.has_section('driver'):
- raise ValueError('libcloud.ini file must contain a [driver] section')
-
- if config.has_option('driver', 'provider'):
- self.provider = config.get('driver', 'provider')
- else:
- raise ValueError('libcloud.ini does not have a provider defined')
-
- if config.has_option('driver', 'key'):
- self.key = config.get('driver', 'key')
- else:
- raise ValueError('libcloud.ini does not have a key defined')
-
- if config.has_option('driver', 'secret'):
- self.secret = config.get('driver', 'secret')
- else:
- raise ValueError('libcloud.ini does not have a secret defined')
-
- if config.has_option('driver', 'host'):
- self.host = config.get('driver', 'host')
- if config.has_option('driver', 'secure'):
- self.secure = config.get('driver', 'secure')
- if config.has_option('driver', 'verify_ssl_cert'):
- self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert')
- if config.has_option('driver', 'port'):
- self.port = config.get('driver', 'port')
- if config.has_option('driver', 'path'):
- self.path = config.get('driver', 'path')
- if config.has_option('driver', 'api_version'):
- self.api_version = config.get('driver', 'api_version')
-
- Driver = get_driver(getattr(Provider, self.provider))
-
- self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure,
- host=self.host, path=self.path)
-
- # Cache related
- cache_path = config.get('cache', 'cache_path')
- self.cache_path_cache = cache_path + "/ansible-libcloud.cache"
- self.cache_path_index = cache_path + "/ansible-libcloud.index"
- self.cache_max_age = config.getint('cache', 'cache_max_age')
-
- def parse_cli_args(self):
- '''
- Command line argument processing
- '''
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all the variables about a specific instance')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)')
- self.args = parser.parse_args()
-
- def do_api_calls_update_cache(self):
- '''
- Do API calls to a location, and save data in cache files
- '''
-
- self.get_nodes()
-
- self.write_to_cache(self.inventory, self.cache_path_cache)
- self.write_to_cache(self.index, self.cache_path_index)
-
- def get_nodes(self):
- '''
- Gets the list of all nodes
- '''
-
- for node in self.conn.list_nodes():
- self.add_node(node)
-
- def get_node(self, node_id):
- '''
- Gets details about a specific node
- '''
-
- return [node for node in self.conn.list_nodes() if node.id == node_id][0]
-
- def add_node(self, node):
- '''
- Adds a node to the inventory and index, as long as it is
- addressable
- '''
-
- # Only want running instances
- if node.state != 0:
- return
-
- # Select the best destination address
- if not node.public_ips == []:
- dest = node.public_ips[0]
- if not dest:
- # Skip instances we cannot address (e.g. private VPC subnet)
- return
-
- # Add to index
- self.index[dest] = node.name
-
- # Inventory: Group by instance ID (always a group of 1)
- self.inventory[node.name] = [dest]
- '''
- # Inventory: Group by region
- self.push(self.inventory, region, dest)
-
- # Inventory: Group by availability zone
- self.push(self.inventory, node.placement, dest)
-
- # Inventory: Group by instance type
- self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest)
- '''
- # Inventory: Group by key pair
- if node.extra['key_name']:
- self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest)
-
- # Inventory: Group by security group, quick thing to handle single sg
- if node.extra['security_group']:
- self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest)
-
- # Inventory: Group by tag
- if node.extra['tags']:
- for tagkey in node.extra['tags'].keys():
- self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest)
-
- def get_host_info(self):
- '''
- Get variables about a specific host
- '''
-
- if len(self.index) == 0:
- # Need to load index from cache
- self.load_index_from_cache()
-
- if self.args.host not in self.index:
- # try updating the cache
- self.do_api_calls_update_cache()
- if self.args.host not in self.index:
- # host migh not exist anymore
- return self.json_format_dict({}, True)
-
- node_id = self.index[self.args.host]
-
- node = self.get_node(node_id)
- instance_vars = {}
- for key, value in vars(node).items():
- key = self.to_safe('ec2_' + key)
-
- # Handle complex types
- if isinstance(value, (int, bool)):
- instance_vars[key] = value
- elif isinstance(value, string_types):
- instance_vars[key] = value.strip()
- elif value is None:
- instance_vars[key] = ''
- elif key == 'ec2_region':
- instance_vars[key] = value.name
- elif key == 'ec2_tags':
- for k, v in iteritems(value):
- key = self.to_safe('ec2_tag_' + k)
- instance_vars[key] = v
- elif key == 'ec2_groups':
- group_ids = []
- group_names = []
- for group in value:
- group_ids.append(group.id)
- group_names.append(group.name)
- instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
- instance_vars["ec2_security_group_names"] = ','.join(group_names)
- else:
- pass
- # TODO Product codes if someone finds them useful
- # print(key)
- # print(type(value))
- # print(value)
-
- return self.json_format_dict(instance_vars, True)
-
- def push(self, my_dict, key, element):
- '''
- Pushed an element onto an array that may not have been defined in
- the dict
- '''
-
- if key in my_dict:
- my_dict[key].append(element)
- else:
- my_dict[key] = [element]
-
- def get_inventory_from_cache(self):
- '''
- Reads the inventory from the cache file and returns it as a JSON
- object
- '''
-
- cache = open(self.cache_path_cache, 'r')
- json_inventory = cache.read()
- return json_inventory
-
- def load_index_from_cache(self):
- '''
- Reads the index from the cache file sets self.index
- '''
-
- cache = open(self.cache_path_index, 'r')
- json_index = cache.read()
- self.index = json.loads(json_index)
-
- def write_to_cache(self, data, filename):
- '''
- Writes data in JSON format to a file
- '''
-
- json_data = self.json_format_dict(data, True)
- cache = open(filename, 'w')
- cache.write(json_data)
- cache.close()
-
- def to_safe(self, word):
- '''
- Converts 'bad' characters in a string to underscores so they can be
- used as Ansible groups
- '''
-
- return re.sub(r"[^A-Za-z0-9\-]", "_", word)
-
- def json_format_dict(self, data, pretty=False):
- '''
- Converts a dict to a JSON object and dumps it as a formatted
- string
- '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-def main():
- LibcloudInventory()
-
-
-if __name__ == '__main__':
- main()
diff --git a/contrib/inventory/apstra_aos.ini b/contrib/inventory/apstra_aos.ini
deleted file mode 100644
index 1ec1255c9c..0000000000
--- a/contrib/inventory/apstra_aos.ini
+++ /dev/null
@@ -1,20 +0,0 @@
-# Ansible Apstra AOS external inventory script settings
-# Dynamic Inventory script parameter can be provided using this file
-# Or by using Environment Variables:
-# - AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
-#
-# This file takes precedence over the Environment Variables
-#
-
-[aos]
-
-# aos_server = 172.20.62.3
-# port = 8888
-# username = admin
-# password = admin
-
-## Blueprint Mode
-# to use the inventory in mode Blueprint, you need to define the blueprint name you want to use
-
-# blueprint = my-blueprint-l2
-# blueprint_interface = true
diff --git a/contrib/inventory/apstra_aos.py b/contrib/inventory/apstra_aos.py
deleted file mode 100755
index 7b9af7db5c..0000000000
--- a/contrib/inventory/apstra_aos.py
+++ /dev/null
@@ -1,589 +0,0 @@
-#!/usr/bin/env python
-#
-# (c) 2017 Apstra Inc, <community@apstra.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-"""
-Apstra AOS external inventory script
-====================================
-
-Ansible has a feature where instead of reading from /etc/ansible/hosts
-as a text file, it can query external programs to obtain the list
-of hosts, groups the hosts are in, and even variables to assign to each host.
-
-To use this:
- - copy this file over /etc/ansible/hosts and chmod +x the file.
- - Copy both files (.py and .ini) in your preferred directory
-
-More information about Ansible Dynamic Inventory here
-http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
-
-2 modes are currently, supported: **device based** or **blueprint based**:
- - For **Device based**, the list of device is taken from the global device list
- the serial ID will be used as the inventory_hostname
- - For **Blueprint based**, the list of device is taken from the given blueprint
- the Node name will be used as the inventory_hostname
-
-Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
-The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
-The config file takes precedence over the Environment Variables
-
-Tested with Apstra AOS 1.1
-
-This script has been inspired by the cobbler.py inventory. thanks
-
-Author: Damien Garros (@dgarros)
-Version: 0.2.0
-"""
-import json
-import os
-import re
-import sys
-
-try:
- import argparse
- HAS_ARGPARSE = True
-except ImportError:
- HAS_ARGPARSE = False
-
-try:
- from apstra.aosom.session import Session
- HAS_AOS_PYEZ = True
-except ImportError:
- HAS_AOS_PYEZ = False
-
-from ansible.module_utils.six.moves import configparser
-
-
-"""
-##
-Expected output format in Device mode
-{
- "Cumulus": {
- "hosts": [
- "52540073956E",
- "52540022211A"
- ],
- "vars": {}
- },
- "EOS": {
- "hosts": [
- "5254001CAFD8",
- "525400DDDF72"
- ],
- "vars": {}
- },
- "Generic Model": {
- "hosts": [
- "525400E5486D"
- ],
- "vars": {}
- },
- "Ubuntu GNU/Linux": {
- "hosts": [
- "525400E5486D"
- ],
- "vars": {}
- },
- "VX": {
- "hosts": [
- "52540073956E",
- "52540022211A"
- ],
- "vars": {}
- },
- "_meta": {
- "hostvars": {
- "5254001CAFD8": {
- "agent_start_time": "2017-02-03T00:49:16.000000Z",
- "ansible_ssh_host": "172.20.52.6",
- "aos_hcl_model": "Arista_vEOS",
- "aos_server": "",
- "aos_version": "AOS_1.1.1_OB.5",
- "comm_state": "on",
- "device_start_time": "2017-02-03T00:47:58.454480Z",
- "domain_name": "",
- "error_message": "",
- "fqdn": "localhost",
- "hostname": "localhost",
- "hw_model": "vEOS",
- "hw_version": "",
- "is_acknowledged": false,
- "mgmt_ifname": "Management1",
- "mgmt_ipaddr": "172.20.52.6",
- "mgmt_macaddr": "52:54:00:1C:AF:D8",
- "os_arch": "x86_64",
- "os_family": "EOS",
- "os_version": "4.16.6M",
- "os_version_info": {
- "build": "6M",
- "major": "4",
- "minor": "16"
- },
- "serial_number": "5254001CAFD8",
- "state": "OOS-QUARANTINED",
- "vendor": "Arista"
- },
- "52540022211A": {
- "agent_start_time": "2017-02-03T00:45:22.000000Z",
- "ansible_ssh_host": "172.20.52.7",
- "aos_hcl_model": "Cumulus_VX",
- "aos_server": "172.20.52.3",
- "aos_version": "AOS_1.1.1_OB.5",
- "comm_state": "on",
- "device_start_time": "2017-02-03T00:45:11.019189Z",
- "domain_name": "",
- "error_message": "",
- "fqdn": "cumulus",
- "hostname": "cumulus",
- "hw_model": "VX",
- "hw_version": "",
- "is_acknowledged": false,
- "mgmt_ifname": "eth0",
- "mgmt_ipaddr": "172.20.52.7",
- "mgmt_macaddr": "52:54:00:22:21:1a",
- "os_arch": "x86_64",
- "os_family": "Cumulus",
- "os_version": "3.1.1",
- "os_version_info": {
- "build": "1",
- "major": "3",
- "minor": "1"
- },
- "serial_number": "52540022211A",
- "state": "OOS-QUARANTINED",
- "vendor": "Cumulus"
- },
- "52540073956E": {
- "agent_start_time": "2017-02-03T00:45:19.000000Z",
- "ansible_ssh_host": "172.20.52.8",
- "aos_hcl_model": "Cumulus_VX",
- "aos_server": "172.20.52.3",
- "aos_version": "AOS_1.1.1_OB.5",
- "comm_state": "on",
- "device_start_time": "2017-02-03T00:45:11.030113Z",
- "domain_name": "",
- "error_message": "",
- "fqdn": "cumulus",
- "hostname": "cumulus",
- "hw_model": "VX",
- "hw_version": "",
- "is_acknowledged": false,
- "mgmt_ifname": "eth0",
- "mgmt_ipaddr": "172.20.52.8",
- "mgmt_macaddr": "52:54:00:73:95:6e",
- "os_arch": "x86_64",
- "os_family": "Cumulus",
- "os_version": "3.1.1",
- "os_version_info": {
- "build": "1",
- "major": "3",
- "minor": "1"
- },
- "serial_number": "52540073956E",
- "state": "OOS-QUARANTINED",
- "vendor": "Cumulus"
- },
- "525400DDDF72": {
- "agent_start_time": "2017-02-03T00:49:07.000000Z",
- "ansible_ssh_host": "172.20.52.5",
- "aos_hcl_model": "Arista_vEOS",
- "aos_server": "",
- "aos_version": "AOS_1.1.1_OB.5",
- "comm_state": "on",
- "device_start_time": "2017-02-03T00:47:46.929921Z",
- "domain_name": "",
- "error_message": "",
- "fqdn": "localhost",
- "hostname": "localhost",
- "hw_model": "vEOS",
- "hw_version": "",
- "is_acknowledged": false,
- "mgmt_ifname": "Management1",
- "mgmt_ipaddr": "172.20.52.5",
- "mgmt_macaddr": "52:54:00:DD:DF:72",
- "os_arch": "x86_64",
- "os_family": "EOS",
- "os_version": "4.16.6M",
- "os_version_info": {
- "build": "6M",
- "major": "4",
- "minor": "16"
- },
- "serial_number": "525400DDDF72",
- "state": "OOS-QUARANTINED",
- "vendor": "Arista"
- },
- "525400E5486D": {
- "agent_start_time": "2017-02-02T18:44:42.000000Z",
- "ansible_ssh_host": "172.20.52.4",
- "aos_hcl_model": "Generic_Server_1RU_1x10G",
- "aos_server": "172.20.52.3",
- "aos_version": "AOS_1.1.1_OB.5",
- "comm_state": "on",
- "device_start_time": "2017-02-02T21:11:25.188734Z",
- "domain_name": "",
- "error_message": "",
- "fqdn": "localhost",
- "hostname": "localhost",
- "hw_model": "Generic Model",
- "hw_version": "pc-i440fx-trusty",
- "is_acknowledged": false,
- "mgmt_ifname": "eth0",
- "mgmt_ipaddr": "172.20.52.4",
- "mgmt_macaddr": "52:54:00:e5:48:6d",
- "os_arch": "x86_64",
- "os_family": "Ubuntu GNU/Linux",
- "os_version": "14.04 LTS",
- "os_version_info": {
- "build": "",
- "major": "14",
- "minor": "04"
- },
- "serial_number": "525400E5486D",
- "state": "OOS-QUARANTINED",
- "vendor": "Generic Manufacturer"
- }
- }
- },
- "all": {
- "hosts": [
- "5254001CAFD8",
- "52540073956E",
- "525400DDDF72",
- "525400E5486D",
- "52540022211A"
- ],
- "vars": {}
- },
- "vEOS": {
- "hosts": [
- "5254001CAFD8",
- "525400DDDF72"
- ],
- "vars": {}
- }
-}
-"""
-
-
-def fail(msg):
- sys.stderr.write("%s\n" % msg)
- sys.exit(1)
-
-
-class AosInventory(object):
-
- def __init__(self):
-
- """ Main execution path """
-
- if not HAS_AOS_PYEZ:
- raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
- if not HAS_ARGPARSE:
- raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7')
-
- # Initialize inventory
- self.inventory = dict() # A list of groups and the hosts in that group
- self.inventory['_meta'] = dict()
- self.inventory['_meta']['hostvars'] = dict()
-
- # Read settings and parse CLI arguments
- self.read_settings()
- self.parse_cli_args()
-
- # ----------------------------------------------------
- # Open session to AOS
- # ----------------------------------------------------
- aos = Session(server=self.aos_server,
- port=self.aos_server_port,
- user=self.aos_username,
- passwd=self.aos_password)
-
- aos.login()
-
- # Save session information in variables of group all
- self.add_var_to_group('all', 'aos_session', aos.session)
-
- # Add the AOS server itself in the inventory
- self.add_host_to_group("all", 'aos')
- self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
- self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
- self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
-
- # ----------------------------------------------------
- # Build the inventory
- # 2 modes are supported: device based or blueprint based
- # - For device based, the list of device is taken from the global device list
- # the serial ID will be used as the inventory_hostname
- # - For Blueprint based, the list of device is taken from the given blueprint
- # the Node name will be used as the inventory_hostname
- # ----------------------------------------------------
- if self.aos_blueprint:
-
- bp = aos.Blueprints[self.aos_blueprint]
- if bp.exists is False:
- fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
-
- for dev_name, dev_id in bp.params['devices'].value.items():
-
- self.add_host_to_group('all', dev_name)
- device = aos.Devices.find(uid=dev_id)
-
- if 'facts' in device.value.keys():
- self.add_device_facts_to_var(dev_name, device)
-
- # Define admin State and Status
- if 'user_config' in device.value.keys():
- if 'admin_state' in device.value['user_config'].keys():
- self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
-
- self.add_device_status_to_var(dev_name, device)
-
- # Go over the contents data structure
- for node in bp.contents['system']['nodes']:
- if node['display_name'] == dev_name:
- self.add_host_to_group(node['role'], dev_name)
-
- # Check for additional attribute to import
- attributes_to_import = [
- 'loopback_ip',
- 'asn',
- 'role',
- 'position',
- ]
- for attr in attributes_to_import:
- if attr in node.keys():
- self.add_var_to_host(dev_name, attr, node[attr])
-
- # if blueprint_interface is enabled in the configuration
- # Collect links information
- if self.aos_blueprint_int:
- interfaces = dict()
-
- for link in bp.contents['system']['links']:
- # each link has 2 sides [0,1], and it's unknown which one match this device
- # at first we assume, first side match(0) and peer is (1)
- peer_id = 1
-
- for side in link['endpoints']:
- if side['display_name'] == dev_name:
-
- # import local information first
- int_name = side['interface']
-
- # init dict
- interfaces[int_name] = dict()
- if 'ip' in side.keys():
- interfaces[int_name]['ip'] = side['ip']
-
- if 'interface' in side.keys():
- interfaces[int_name]['name'] = side['interface']
-
- if 'display_name' in link['endpoints'][peer_id].keys():
- interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
-
- if 'ip' in link['endpoints'][peer_id].keys():
- interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
-
- if 'type' in link['endpoints'][peer_id].keys():
- interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
-
- else:
- # if we haven't match the first time, prepare the peer_id
- # for the second loop iteration
- peer_id = 0
-
- self.add_var_to_host(dev_name, 'interfaces', interfaces)
-
- else:
- for device in aos.Devices:
- # If not reacheable, create by key and
- # If reacheable, create by hostname
-
- self.add_host_to_group('all', device.name)
-
- # populate information for this host
- self.add_device_status_to_var(device.name, device)
-
- if 'user_config' in device.value.keys():
- for key, value in device.value['user_config'].items():
- self.add_var_to_host(device.name, key, value)
-
- # Based on device status online|offline, collect facts as well
- if device.value['status']['comm_state'] == 'on':
-
- if 'facts' in device.value.keys():
- self.add_device_facts_to_var(device.name, device)
-
- # Check if device is associated with a blueprint
- # if it's create a new group
- if 'blueprint_active' in device.value['status'].keys():
- if 'blueprint_id' in device.value['status'].keys():
- bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
-
- if bp:
- self.add_host_to_group(bp.name, device.name)
-
- # ----------------------------------------------------
- # Convert the inventory and return a JSON String
- # ----------------------------------------------------
- data_to_print = ""
- data_to_print += self.json_format_dict(self.inventory, True)
-
- print(data_to_print)
-
- def read_settings(self):
- """ Reads the settings from the apstra_aos.ini file """
-
- config = configparser.ConfigParser()
- config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
-
- # Default Values
- self.aos_blueprint = False
- self.aos_blueprint_int = True
- self.aos_username = 'admin'
- self.aos_password = 'admin'
- self.aos_server_port = 8888
-
- # Try to reach all parameters from File, if not available try from ENV
- try:
- self.aos_server = config.get('aos', 'aos_server')
- except Exception:
- if 'AOS_SERVER' in os.environ.keys():
- self.aos_server = os.environ['AOS_SERVER']
-
- try:
- self.aos_server_port = config.get('aos', 'port')
- except Exception:
- if 'AOS_PORT' in os.environ.keys():
- self.aos_server_port = os.environ['AOS_PORT']
-
- try:
- self.aos_username = config.get('aos', 'username')
- except Exception:
- if 'AOS_USERNAME' in os.environ.keys():
- self.aos_username = os.environ['AOS_USERNAME']
-
- try:
- self.aos_password = config.get('aos', 'password')
- except Exception:
- if 'AOS_PASSWORD' in os.environ.keys():
- self.aos_password = os.environ['AOS_PASSWORD']
-
- try:
- self.aos_blueprint = config.get('aos', 'blueprint')
- except Exception:
- if 'AOS_BLUEPRINT' in os.environ.keys():
- self.aos_blueprint = os.environ['AOS_BLUEPRINT']
-
- try:
- if config.get('aos', 'blueprint_interface') in ['false', 'no']:
- self.aos_blueprint_int = False
- except Exception:
- pass
-
- def parse_cli_args(self):
- """ Command line argument processing """
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
- parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
- parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
- self.args = parser.parse_args()
-
- def json_format_dict(self, data, pretty=False):
- """ Converts a dict to a JSON object and dumps it as a formatted string """
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
- def add_host_to_group(self, group, host):
-
- # Cleanup group name first
- clean_group = self.cleanup_group_name(group)
-
- # Check if the group exist, if not initialize it
- if clean_group not in self.inventory.keys():
- self.inventory[clean_group] = {}
- self.inventory[clean_group]['hosts'] = []
- self.inventory[clean_group]['vars'] = {}
-
- self.inventory[clean_group]['hosts'].append(host)
-
- def add_var_to_host(self, host, var, value):
-
- # Check if the host exist, if not initialize it
- if host not in self.inventory['_meta']['hostvars'].keys():
- self.inventory['_meta']['hostvars'][host] = {}
-
- self.inventory['_meta']['hostvars'][host][var] = value
-
- def add_var_to_group(self, group, var, value):
-
- # Cleanup group name first
- clean_group = self.cleanup_group_name(group)
-
- # Check if the group exist, if not initialize it
- if clean_group not in self.inventory.keys():
- self.inventory[clean_group] = {}
- self.inventory[clean_group]['hosts'] = []
- self.inventory[clean_group]['vars'] = {}
-
- self.inventory[clean_group]['vars'][var] = value
-
- def add_device_facts_to_var(self, device_name, device):
-
- # Populate variables for this host
- self.add_var_to_host(device_name,
- 'ansible_ssh_host',
- device.value['facts']['mgmt_ipaddr'])
-
- self.add_var_to_host(device_name, 'id', device.id)
-
- # self.add_host_to_group('all', device.name)
- for key, value in device.value['facts'].items():
- self.add_var_to_host(device_name, key, value)
-
- if key == 'os_family':
- self.add_host_to_group(value, device_name)
- elif key == 'hw_model':
- self.add_host_to_group(value, device_name)
-
- def cleanup_group_name(self, group_name):
- """
- Clean up group name by :
- - Replacing all non-alphanumeric caracter by underscore
- - Converting to lowercase
- """
-
- rx = re.compile(r'\W+')
- clean_group = rx.sub('_', group_name).lower()
-
- return clean_group
-
- def add_device_status_to_var(self, device_name, device):
-
- if 'status' in device.value.keys():
- for key, value in device.value['status'].items():
- self.add_var_to_host(device.name, key, value)
-
-
-# Run the script
-if __name__ == '__main__':
- AosInventory()
diff --git a/contrib/inventory/azure_rm.ini b/contrib/inventory/azure_rm.ini
deleted file mode 100644
index 6edd9b981b..0000000000
--- a/contrib/inventory/azure_rm.ini
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Configuration file for azure_rm.py
-#
-[azure]
-# Control which resource groups are included. By default all resources groups are included.
-# Set resource_groups to a comma separated list of resource groups names.
-#resource_groups=
-
-# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
-#tags=
-
-# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus)
-#locations=
-
-# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
-include_powerstate=yes
-
-# Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1.
-group_by_resource_group=yes
-group_by_location=yes
-group_by_security_group=yes
-group_by_os_family=yes
-group_by_tag=yes
diff --git a/contrib/inventory/azure_rm.py b/contrib/inventory/azure_rm.py
deleted file mode 100755
index 7dc438f6a2..0000000000
--- a/contrib/inventory/azure_rm.py
+++ /dev/null
@@ -1,973 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-'''
-Important note (2018/10)
-========================
-This inventory script is in maintenance mode: only critical bug fixes but no new features.
-There's new Azure external inventory script at https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/inventory/azure_rm.py,
-with better performance and latest new features. Please go to the link to get latest Azure inventory.
-
-Azure External Inventory Script
-===============================
-Generates dynamic inventory by making API requests to the Azure Resource
-Manager using the Azure Python SDK. For instruction on installing the
-Azure Python SDK see https://azure-sdk-for-python.readthedocs.io/
-
-Authentication
---------------
-The order of precedence is command line arguments, environment variables,
-and finally the [default] profile found in ~/.azure/credentials.
-
-If using a credentials file, it should be an ini formatted file with one or
-more sections, which we refer to as profiles. The script looks for a
-[default] section, if a profile is not specified either on the command line
-or with an environment variable. The keys in a profile will match the
-list of command line arguments below.
-
-For command line arguments and environment variables specify a profile found
-in your ~/.azure/credentials file, or a service principal or Active Directory
-user.
-
-Command line arguments:
- - profile
- - client_id
- - secret
- - subscription_id
- - tenant
- - ad_user
- - password
- - cloud_environment
- - adfs_authority_url
-
-Environment variables:
- - AZURE_PROFILE
- - AZURE_CLIENT_ID
- - AZURE_SECRET
- - AZURE_SUBSCRIPTION_ID
- - AZURE_TENANT
- - AZURE_AD_USER
- - AZURE_PASSWORD
- - AZURE_CLOUD_ENVIRONMENT
- - AZURE_ADFS_AUTHORITY_URL
-
-Run for Specific Host
------------------------
-When run for a specific host using the --host option, a resource group is
-required. For a specific host, this script returns the following variables:
-
-{
- "ansible_host": "XXX.XXX.XXX.XXX",
- "computer_name": "computer_name2",
- "fqdn": null,
- "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
- "image": {
- "offer": "CentOS",
- "publisher": "OpenLogic",
- "sku": "7.1",
- "version": "latest"
- },
- "location": "westus",
- "mac_address": "00-00-5E-00-53-FE",
- "name": "object-name",
- "network_interface": "interface-name",
- "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
- "network_security_group": null,
- "network_security_group_id": null,
- "os_disk": {
- "name": "object-name",
- "operating_system_type": "Linux"
- },
- "plan": null,
- "powerstate": "running",
- "private_ip": "172.26.3.6",
- "private_ip_alloc_method": "Static",
- "provisioning_state": "Succeeded",
- "public_ip": "XXX.XXX.XXX.XXX",
- "public_ip_alloc_method": "Static",
- "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
- "public_ip_name": "object-name",
- "resource_group": "galaxy-production",
- "security_group": "object-name",
- "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
- "tags": {
- "db": "database"
- },
- "type": "Microsoft.Compute/virtualMachines",
- "virtual_machine_size": "Standard_DS4"
-}
-
-Groups
-------
-When run in --list mode, instances are grouped by the following categories:
- - azure
- - location
- - resource_group
- - security_group
- - tag key
- - tag key_value
-
-Control groups using azure_rm.ini or set environment variables:
-
-AZURE_GROUP_BY_RESOURCE_GROUP=yes
-AZURE_GROUP_BY_LOCATION=yes
-AZURE_GROUP_BY_SECURITY_GROUP=yes
-AZURE_GROUP_BY_TAG=yes
-
-Select hosts within specific resource groups by assigning a comma separated list to:
-
-AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
-
-Select hosts for specific tag key by assigning a comma separated list of tag keys to:
-
-AZURE_TAGS=key1,key2,key3
-
-Select hosts for specific locations:
-
-AZURE_LOCATIONS=eastus,westus,eastus2
-
-Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
-
-AZURE_TAGS=key1:value1,key2:value2
-
-If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
-AZURE_INCLUDE_POWERSTATE=no
-
-azure_rm.ini
-------------
-As mentioned above, you can control execution using environment variables or a .ini file. A sample
-azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
-'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
-a different path for the .ini file, define the AZURE_INI_PATH environment variable:
-
- export AZURE_INI_PATH=/path/to/custom.ini
-
-Powerstate:
------------
-The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
-up. If the value is anything other than 'running', the machine is down, and will be unreachable.
-
-Examples:
----------
- Execute /bin/uname on all instances in the galaxy-qa resource group
- $ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
-
- Use the inventory script to print instance specific information
- $ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
-
- Use with a playbook
- $ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
-
-
-Insecure Platform Warning
--------------------------
-If you receive InsecurePlatformWarning from urllib3, install the
-requests security packages:
-
- pip install requests[security]
-
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-Company: Ansible by Red Hat
-
-Version: 1.0.0
-'''
-
-import argparse
-import json
-import os
-import re
-import sys
-import inspect
-
-from os.path import expanduser
-from ansible.module_utils.six.moves import configparser as cp
-import ansible.module_utils.six.moves.urllib.parse as urlparse
-
-HAS_AZURE = True
-HAS_AZURE_EXC = None
-HAS_AZURE_CLI_CORE = True
-CLIError = None
-
-try:
- from msrestazure.azure_active_directory import AADTokenCredentials
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_active_directory import MSIAuthentication
- from msrestazure import azure_cloud
- from azure.mgmt.compute import __version__ as azure_compute_version
- from azure.common import AzureMissingResourceHttpError, AzureHttpError
- from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
- from azure.mgmt.network import NetworkManagementClient
- from azure.mgmt.resource.resources import ResourceManagementClient
- from azure.mgmt.resource.subscriptions import SubscriptionClient
- from azure.mgmt.compute import ComputeManagementClient
- from adal.authentication_context import AuthenticationContext
-except ImportError as exc:
- HAS_AZURE_EXC = exc
- HAS_AZURE = False
-
-try:
- from azure.cli.core.util import CLIError
- from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
- from azure.common.cloud import get_cli_active_cloud
-except ImportError:
- HAS_AZURE_CLI_CORE = False
- CLIError = Exception
-
-try:
- from ansible.release import __version__ as ansible_version
-except ImportError:
- ansible_version = 'unknown'
-
-AZURE_CREDENTIAL_ENV_MAPPING = dict(
- profile='AZURE_PROFILE',
- subscription_id='AZURE_SUBSCRIPTION_ID',
- client_id='AZURE_CLIENT_ID',
- secret='AZURE_SECRET',
- tenant='AZURE_TENANT',
- ad_user='AZURE_AD_USER',
- password='AZURE_PASSWORD',
- cloud_environment='AZURE_CLOUD_ENVIRONMENT',
- adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
-)
-
-AZURE_CONFIG_SETTINGS = dict(
- resource_groups='AZURE_RESOURCE_GROUPS',
- tags='AZURE_TAGS',
- locations='AZURE_LOCATIONS',
- include_powerstate='AZURE_INCLUDE_POWERSTATE',
- group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
- group_by_location='AZURE_GROUP_BY_LOCATION',
- group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
- group_by_tag='AZURE_GROUP_BY_TAG',
- group_by_os_family='AZURE_GROUP_BY_OS_FAMILY',
- use_private_ip='AZURE_USE_PRIVATE_IP'
-)
-
-AZURE_MIN_VERSION = "2.0.0"
-ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version)
-
-
-def azure_id_to_dict(id):
- pieces = re.sub(r'^\/', '', id).split('/')
- result = {}
- index = 0
- while index < len(pieces) - 1:
- result[pieces[index]] = pieces[index + 1]
- index += 1
- return result
-
-
-class AzureRM(object):
-
- def __init__(self, args):
- self._args = args
- self._cloud_environment = None
- self._compute_client = None
- self._resource_client = None
- self._network_client = None
- self._adfs_authority_url = None
- self._resource = None
-
- self.debug = False
- if args.debug:
- self.debug = True
-
- self.credentials = self._get_credentials(args)
- if not self.credentials:
- self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
- "or define a profile in ~/.azure/credentials.")
-
- # if cloud_environment specified, look up/build Cloud object
- raw_cloud_env = self.credentials.get('cloud_environment')
- if not raw_cloud_env:
- self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
- else:
- # try to look up "well-known" values via the name attribute on azure_cloud members
- all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
- matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
- if len(matched_clouds) == 1:
- self._cloud_environment = matched_clouds[0]
- elif len(matched_clouds) > 1:
- self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
- else:
- if not urlparse.urlparse(raw_cloud_env).scheme:
- self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
- try:
- self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
- except Exception as e:
- self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))
-
- if self.credentials.get('subscription_id', None) is None:
- self.fail("Credentials did not include a subscription_id value.")
- self.log("setting subscription_id")
- self.subscription_id = self.credentials['subscription_id']
-
- # get authentication authority
- # for adfs, user could pass in authority or not.
- # for others, use default authority from cloud environment
- if self.credentials.get('adfs_authority_url'):
- self._adfs_authority_url = self.credentials.get('adfs_authority_url')
- else:
- self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
-
- # get resource from cloud environment
- self._resource = self._cloud_environment.endpoints.active_directory_resource_id
-
- if self.credentials.get('credentials'):
- self.azure_credentials = self.credentials.get('credentials')
- elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
- self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
- secret=self.credentials['secret'],
- tenant=self.credentials['tenant'],
- cloud_environment=self._cloud_environment)
-
- elif self.credentials.get('ad_user') is not None and \
- self.credentials.get('password') is not None and \
- self.credentials.get('client_id') is not None and \
- self.credentials.get('tenant') is not None:
-
- self.azure_credentials = self.acquire_token_with_username_password(
- self._adfs_authority_url,
- self._resource,
- self.credentials['ad_user'],
- self.credentials['password'],
- self.credentials['client_id'],
- self.credentials['tenant'])
-
- elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
- tenant = self.credentials.get('tenant')
- if not tenant:
- tenant = 'common'
- self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
- self.credentials['password'],
- tenant=tenant,
- cloud_environment=self._cloud_environment)
-
- else:
- self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
- "Credentials must include client_id, secret and tenant or ad_user and password, or "
- "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
- "be logged in using AzureCLI.")
-
- def log(self, msg):
- if self.debug:
- print(msg + u'\n')
-
- def fail(self, msg):
- raise Exception(msg)
-
- def _get_profile(self, profile="default"):
- path = expanduser("~")
- path += "/.azure/credentials"
- try:
- config = cp.ConfigParser()
- config.read(path)
- except Exception as exc:
- self.fail("Failed to access {0}. Check that the file exists and you have read "
- "access. {1}".format(path, str(exc)))
- credentials = dict()
- for key in AZURE_CREDENTIAL_ENV_MAPPING:
- try:
- credentials[key] = config.get(profile, key, raw=True)
- except Exception:
- pass
-
- if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
- return credentials
-
- return None
-
- def _get_env_credentials(self):
- env_credentials = dict()
- for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
- env_credentials[attribute] = os.environ.get(env_variable, None)
-
- if env_credentials['profile'] is not None:
- credentials = self._get_profile(env_credentials['profile'])
- return credentials
-
- if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
- return env_credentials
-
- return None
-
- def _get_azure_cli_credentials(self):
- credentials, subscription_id = get_azure_cli_credentials()
- cloud_environment = get_cli_active_cloud()
-
- cli_credentials = {
- 'credentials': credentials,
- 'subscription_id': subscription_id,
- 'cloud_environment': cloud_environment
- }
- return cli_credentials
-
- def _get_msi_credentials(self, subscription_id_param=None):
- credentials = MSIAuthentication()
- subscription_id_param = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
- try:
- # try to get the subscription in MSI to test whether MSI is enabled
- subscription_client = SubscriptionClient(credentials)
- subscription = next(subscription_client.subscriptions.list())
- subscription_id = str(subscription.subscription_id)
- return {
- 'credentials': credentials,
- 'subscription_id': subscription_id_param or subscription_id
- }
- except Exception as exc:
- return None
-
- def _get_credentials(self, params):
- # Get authentication credentials.
- # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
-
- self.log('Getting credentials')
-
- arg_credentials = dict()
- for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
- arg_credentials[attribute] = getattr(params, attribute)
-
- # try module params
- if arg_credentials['profile'] is not None:
- self.log('Retrieving credentials with profile parameter.')
- credentials = self._get_profile(arg_credentials['profile'])
- return credentials
-
- if arg_credentials['client_id'] is not None:
- self.log('Received credentials from parameters.')
- return arg_credentials
-
- if arg_credentials['ad_user'] is not None:
- self.log('Received credentials from parameters.')
- return arg_credentials
-
- # try environment
- env_credentials = self._get_env_credentials()
- if env_credentials:
- self.log('Received credentials from env.')
- return env_credentials
-
- # try default profile from ~./azure/credentials
- default_credentials = self._get_profile()
- if default_credentials:
- self.log('Retrieved default profile credentials from ~/.azure/credentials.')
- return default_credentials
-
- msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id'))
- if msi_credentials:
- self.log('Retrieved credentials from MSI.')
- return msi_credentials
-
- try:
- if HAS_AZURE_CLI_CORE:
- self.log('Retrieving credentials from AzureCLI profile')
- cli_credentials = self._get_azure_cli_credentials()
- return cli_credentials
- except CLIError as ce:
- self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
-
- return None
-
- def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
- authority_uri = authority
-
- if tenant is not None:
- authority_uri = authority + '/' + tenant
-
- context = AuthenticationContext(authority_uri)
- token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
- return AADTokenCredentials(token_response)
-
- def _register(self, key):
- try:
- # We have to perform the one-time registration here. Otherwise, we receive an error the first
- # time we attempt to use the requested client.
- resource_client = self.rm_client
- resource_client.providers.register(key)
- except Exception as exc:
- self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
- self.log("You might need to register {0} using an admin account".format(key))
- self.log(("To register a provider using the Python CLI: "
- "https://docs.microsoft.com/azure/azure-resource-manager/"
- "resource-manager-common-deployment-errors#noregisteredproviderfound"))
-
- def get_mgmt_svc_client(self, client_type, base_url, api_version):
- client = client_type(self.azure_credentials,
- self.subscription_id,
- base_url=base_url,
- api_version=api_version)
- client.config.add_user_agent(ANSIBLE_USER_AGENT)
- return client
-
- @property
- def network_client(self):
- self.log('Getting network client')
- if not self._network_client:
- self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
- self._cloud_environment.endpoints.resource_manager,
- '2017-06-01')
- self._register('Microsoft.Network')
- return self._network_client
-
- @property
- def rm_client(self):
- self.log('Getting resource manager client')
- if not self._resource_client:
- self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
- self._cloud_environment.endpoints.resource_manager,
- '2017-05-10')
- return self._resource_client
-
- @property
- def compute_client(self):
- self.log('Getting compute client')
- if not self._compute_client:
- self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
- self._cloud_environment.endpoints.resource_manager,
- '2017-03-30')
- self._register('Microsoft.Compute')
- return self._compute_client
-
-
-class AzureInventory(object):
-
- def __init__(self):
-
- self._args = self._parse_cli_args()
-
- try:
- rm = AzureRM(self._args)
- except Exception as e:
- sys.exit("{0}".format(str(e)))
-
- self._compute_client = rm.compute_client
- self._network_client = rm.network_client
- self._resource_client = rm.rm_client
- self._security_groups = None
-
- self.resource_groups = []
- self.tags = None
- self.locations = None
- self.replace_dash_in_groups = False
- self.group_by_resource_group = True
- self.group_by_location = True
- self.group_by_os_family = True
- self.group_by_security_group = True
- self.group_by_tag = True
- self.include_powerstate = True
- self.use_private_ip = False
-
- self._inventory = dict(
- _meta=dict(
- hostvars=dict()
- ),
- azure=[]
- )
-
- self._get_settings()
-
- if self._args.resource_groups:
- self.resource_groups = self._args.resource_groups.split(',')
-
- if self._args.tags:
- self.tags = self._args.tags.split(',')
-
- if self._args.locations:
- self.locations = self._args.locations.split(',')
-
- if self._args.no_powerstate:
- self.include_powerstate = False
-
- self.get_inventory()
- print(self._json_format_dict(pretty=self._args.pretty))
- sys.exit(0)
-
- def _parse_cli_args(self):
- # Parse command line arguments
- parser = argparse.ArgumentParser(
- description='Produce an Ansible Inventory file for an Azure subscription')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--debug', action='store_true', default=False,
- help='Send debug messages to STDOUT')
- parser.add_argument('--host', action='store',
- help='Get all information about an instance')
- parser.add_argument('--pretty', action='store_true', default=False,
- help='Pretty print JSON output(default: False)')
- parser.add_argument('--profile', action='store',
- help='Azure profile contained in ~/.azure/credentials')
- parser.add_argument('--subscription_id', action='store',
- help='Azure Subscription Id')
- parser.add_argument('--client_id', action='store',
- help='Azure Client Id ')
- parser.add_argument('--secret', action='store',
- help='Azure Client Secret')
- parser.add_argument('--tenant', action='store',
- help='Azure Tenant Id')
- parser.add_argument('--ad_user', action='store',
- help='Active Directory User')
- parser.add_argument('--password', action='store',
- help='password')
- parser.add_argument('--adfs_authority_url', action='store',
- help='Azure ADFS authority url')
- parser.add_argument('--cloud_environment', action='store',
- help='Azure Cloud Environment name or metadata discovery URL')
- parser.add_argument('--resource-groups', action='store',
- help='Return inventory for comma separated list of resource group names')
- parser.add_argument('--tags', action='store',
- help='Return inventory for comma separated list of tag key:value pairs')
- parser.add_argument('--locations', action='store',
- help='Return inventory for comma separated list of locations')
- parser.add_argument('--no-powerstate', action='store_true', default=False,
- help='Do not include the power state of each virtual host')
- return parser.parse_args()
-
- def get_inventory(self):
- if len(self.resource_groups) > 0:
- # get VMs for requested resource groups
- for resource_group in self.resource_groups:
- try:
- virtual_machines = self._compute_client.virtual_machines.list(resource_group.lower())
- except Exception as exc:
- sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc)))
- if self._args.host or self.tags:
- selected_machines = self._selected_machines(virtual_machines)
- self._load_machines(selected_machines)
- else:
- self._load_machines(virtual_machines)
- else:
- # get all VMs within the subscription
- try:
- virtual_machines = self._compute_client.virtual_machines.list_all()
- except Exception as exc:
- sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
-
- if self._args.host or self.tags or self.locations:
- selected_machines = self._selected_machines(virtual_machines)
- self._load_machines(selected_machines)
- else:
- self._load_machines(virtual_machines)
-
- def _load_machines(self, machines):
- for machine in machines:
- id_dict = azure_id_to_dict(machine.id)
-
- # TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
- # fixed, we should remove the .lower(). Opened Issue
- # #574: https://github.com/Azure/azure-sdk-for-python/issues/574
- resource_group = id_dict['resourceGroups'].lower()
-
- if self.group_by_security_group:
- self._get_security_groups(resource_group)
-
- host_vars = dict(
- ansible_host=None,
- private_ip=None,
- private_ip_alloc_method=None,
- public_ip=None,
- public_ip_name=None,
- public_ip_id=None,
- public_ip_alloc_method=None,
- fqdn=None,
- location=machine.location,
- name=machine.name,
- type=machine.type,
- id=machine.id,
- tags=machine.tags,
- network_interface_id=None,
- network_interface=None,
- resource_group=resource_group,
- mac_address=None,
- plan=(machine.plan.name if machine.plan else None),
- virtual_machine_size=machine.hardware_profile.vm_size,
- computer_name=(machine.os_profile.computer_name if machine.os_profile else None),
- provisioning_state=machine.provisioning_state,
- )
-
- host_vars['os_disk'] = dict(
- name=machine.storage_profile.os_disk.name,
- operating_system_type=machine.storage_profile.os_disk.os_type.value.lower()
- )
-
- if self.include_powerstate:
- host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
-
- if machine.storage_profile.image_reference:
- host_vars['image'] = dict(
- offer=machine.storage_profile.image_reference.offer,
- publisher=machine.storage_profile.image_reference.publisher,
- sku=machine.storage_profile.image_reference.sku,
- version=machine.storage_profile.image_reference.version
- )
-
- # Add windows details
- if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
- host_vars['ansible_connection'] = 'winrm'
- host_vars['windows_auto_updates_enabled'] = \
- machine.os_profile.windows_configuration.enable_automatic_updates
- host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
- host_vars['windows_rm'] = None
- if machine.os_profile.windows_configuration.win_rm is not None:
- host_vars['windows_rm'] = dict(listeners=None)
- if machine.os_profile.windows_configuration.win_rm.listeners is not None:
- host_vars['windows_rm']['listeners'] = []
- for listener in machine.os_profile.windows_configuration.win_rm.listeners:
- host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol.name,
- certificate_url=listener.certificate_url))
-
- for interface in machine.network_profile.network_interfaces:
- interface_reference = self._parse_ref_id(interface.id)
- network_interface = self._network_client.network_interfaces.get(
- interface_reference['resourceGroups'],
- interface_reference['networkInterfaces'])
- if network_interface.primary:
- if self.group_by_security_group and \
- self._security_groups[resource_group].get(network_interface.id, None):
- host_vars['security_group'] = \
- self._security_groups[resource_group][network_interface.id]['name']
- host_vars['security_group_id'] = \
- self._security_groups[resource_group][network_interface.id]['id']
- host_vars['network_interface'] = network_interface.name
- host_vars['network_interface_id'] = network_interface.id
- host_vars['mac_address'] = network_interface.mac_address
- for ip_config in network_interface.ip_configurations:
- host_vars['private_ip'] = ip_config.private_ip_address
- host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
- if self.use_private_ip:
- host_vars['ansible_host'] = ip_config.private_ip_address
- if ip_config.public_ip_address:
- public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
- public_ip_address = self._network_client.public_ip_addresses.get(
- public_ip_reference['resourceGroups'],
- public_ip_reference['publicIPAddresses'])
- if not self.use_private_ip:
- host_vars['ansible_host'] = public_ip_address.ip_address
- host_vars['public_ip'] = public_ip_address.ip_address
- host_vars['public_ip_name'] = public_ip_address.name
- host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
- host_vars['public_ip_id'] = public_ip_address.id
- if public_ip_address.dns_settings:
- host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
-
- self._add_host(host_vars)
-
- def _selected_machines(self, virtual_machines):
- selected_machines = []
- for machine in virtual_machines:
- if self._args.host and self._args.host == machine.name:
- selected_machines.append(machine)
- if self.tags and self._tags_match(machine.tags, self.tags):
- selected_machines.append(machine)
- if self.locations and machine.location in self.locations:
- selected_machines.append(machine)
- return selected_machines
-
- def _get_security_groups(self, resource_group):
- ''' For a given resource_group build a mapping of network_interface.id to security_group name '''
- if not self._security_groups:
- self._security_groups = dict()
- if not self._security_groups.get(resource_group):
- self._security_groups[resource_group] = dict()
- for group in self._network_client.network_security_groups.list(resource_group):
- if group.network_interfaces:
- for interface in group.network_interfaces:
- self._security_groups[resource_group][interface.id] = dict(
- name=group.name,
- id=group.id
- )
-
- def _get_powerstate(self, resource_group, name):
- try:
- vm = self._compute_client.virtual_machines.get(resource_group,
- name,
- expand='instanceview')
- except Exception as exc:
- sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
-
- return next((s.code.replace('PowerState/', '')
- for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
-
- def _add_host(self, vars):
-
- host_name = self._to_safe(vars['name'])
- resource_group = self._to_safe(vars['resource_group'])
- operating_system_type = self._to_safe(vars['os_disk']['operating_system_type'].lower())
- security_group = None
- if vars.get('security_group'):
- security_group = self._to_safe(vars['security_group'])
-
- if self.group_by_os_family:
- if not self._inventory.get(operating_system_type):
- self._inventory[operating_system_type] = []
- self._inventory[operating_system_type].append(host_name)
-
- if self.group_by_resource_group:
- if not self._inventory.get(resource_group):
- self._inventory[resource_group] = []
- self._inventory[resource_group].append(host_name)
-
- if self.group_by_location:
- if not self._inventory.get(vars['location']):
- self._inventory[vars['location']] = []
- self._inventory[vars['location']].append(host_name)
-
- if self.group_by_security_group and security_group:
- if not self._inventory.get(security_group):
- self._inventory[security_group] = []
- self._inventory[security_group].append(host_name)
-
- self._inventory['_meta']['hostvars'][host_name] = vars
- self._inventory['azure'].append(host_name)
-
- if self.group_by_tag and vars.get('tags'):
- for key, value in vars['tags'].items():
- safe_key = self._to_safe(key)
- safe_value = safe_key + '_' + self._to_safe(value)
- if not self._inventory.get(safe_key):
- self._inventory[safe_key] = []
- if not self._inventory.get(safe_value):
- self._inventory[safe_value] = []
- self._inventory[safe_key].append(host_name)
- self._inventory[safe_value].append(host_name)
-
- def _json_format_dict(self, pretty=False):
- # convert inventory to json
- if pretty:
- return json.dumps(self._inventory, sort_keys=True, indent=2)
- else:
- return json.dumps(self._inventory)
-
- def _get_settings(self):
- # Load settings from the .ini, if it exists. Otherwise,
- # look for environment values.
- file_settings = self._load_settings()
- if file_settings:
- for key in AZURE_CONFIG_SETTINGS:
- if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
- values = file_settings.get(key).split(',')
- if len(values) > 0:
- setattr(self, key, values)
- elif file_settings.get(key):
- val = self._to_boolean(file_settings[key])
- setattr(self, key, val)
- else:
- env_settings = self._get_env_settings()
- for key in AZURE_CONFIG_SETTINGS:
- if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
- values = env_settings.get(key).split(',')
- if len(values) > 0:
- setattr(self, key, values)
- elif env_settings.get(key, None) is not None:
- val = self._to_boolean(env_settings[key])
- setattr(self, key, val)
-
- def _parse_ref_id(self, reference):
- response = {}
- keys = reference.strip('/').split('/')
- for index in range(len(keys)):
- if index < len(keys) - 1 and index % 2 == 0:
- response[keys[index]] = keys[index + 1]
- return response
-
- def _to_boolean(self, value):
- if value in ['Yes', 'yes', 1, 'True', 'true', True]:
- result = True
- elif value in ['No', 'no', 0, 'False', 'false', False]:
- result = False
- else:
- result = True
- return result
-
- def _get_env_settings(self):
- env_settings = dict()
- for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
- env_settings[attribute] = os.environ.get(env_variable, None)
- return env_settings
-
- def _load_settings(self):
- basename = os.path.splitext(os.path.basename(__file__))[0]
- default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
- path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
- config = None
- settings = None
- try:
- config = cp.ConfigParser()
- config.read(path)
- except Exception:
- pass
-
- if config is not None:
- settings = dict()
- for key in AZURE_CONFIG_SETTINGS:
- try:
- settings[key] = config.get('azure', key, raw=True)
- except Exception:
- pass
-
- return settings
-
- def _tags_match(self, tag_obj, tag_args):
- '''
- Return True if the tags object from a VM contains the requested tag values.
-
- :param tag_obj: Dictionary of string:string pairs
- :param tag_args: List of strings in the form key=value
- :return: boolean
- '''
-
- if not tag_obj:
- return False
-
- matches = 0
- for arg in tag_args:
- arg_key = arg
- arg_value = None
- if re.search(r':', arg):
- arg_key, arg_value = arg.split(':')
- if arg_value and tag_obj.get(arg_key, None) == arg_value:
- matches += 1
- elif not arg_value and tag_obj.get(arg_key, None) is not None:
- matches += 1
- if matches == len(tag_args):
- return True
- return False
-
- def _to_safe(self, word):
- ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
- regex = r"[^A-Za-z0-9\_"
- if not self.replace_dash_in_groups:
- regex += r"\-"
- return re.sub(regex + "]", "_", word)
-
-
-def main():
- if not HAS_AZURE:
- sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC))
-
- AzureInventory()
-
-
-if __name__ == '__main__':
- main()
diff --git a/contrib/inventory/brook.ini b/contrib/inventory/brook.ini
deleted file mode 100644
index e88c363150..0000000000
--- a/contrib/inventory/brook.ini
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2016 Doalitic.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# The Brook.io inventory script has the following dependencies:
-# 1. A working Brook.io account
-# See https://brook.io
-# 2. A valid token generated through the 'API token' panel of Brook.io
-# 3. The libbrook python libray.
-# See https://github.com/doalitic/libbrook
-#
-# Author: Francisco Ros <fjros@doalitic.com>
-
-[brook]
-# Valid API token (required).
-# E.g. 'Aed342a12A60433697281FeEe1a4037C'
-#
-api_token =
-
-# Project id within Brook.io, as obtained from the project settings (optional). If provided, the
-# generated inventory will just include the hosts that belong to such project. Otherwise, it will
-# include all hosts in projects the requesting user has access to. The response includes groups
-# 'project_x', being 'x' the project name.
-# E.g. '2e8e099e1bc34cc0979d97ac34e9577b'
-#
-project_id =
diff --git a/contrib/inventory/brook.py b/contrib/inventory/brook.py
deleted file mode 100755
index 236571315b..0000000000
--- a/contrib/inventory/brook.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 Doalitic.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-Brook.io external inventory script
-==================================
-
-Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook
-library. Hence, such dependency must be installed in the system to run this script.
-
-The default configuration file is named 'brook.ini' and is located alongside this script. You can
-choose any other file by setting the BROOK_INI_PATH environment variable.
-
-If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in
-projects where the requesting user belongs. Otherwise, only instances from the given project are
-included, provided the requesting user belongs to it.
-
-The following variables are established for every host. They can be retrieved from the hostvars
-dictionary.
- - brook_pid: str
- - brook_name: str
- - brook_description: str
- - brook_project: str
- - brook_template: str
- - brook_region: str
- - brook_zone: str
- - brook_status: str
- - brook_tags: list(str)
- - brook_internal_ips: list(str)
- - brook_external_ips: list(str)
- - brook_created_at
- - brook_updated_at
- - ansible_ssh_host
-
-Instances are grouped by the following categories:
- - tag:
- A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist
- instances with tags 'foo' and/or 'bar'.
- - project:
- A group is created for each project. E.g. group 'project_test' is created if a project named
- 'test' exist.
- - status:
- A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING'
- are created if there are instances in running and pending state.
-
-Examples:
- Execute uname on all instances in project 'test'
- $ ansible -i brook.py project_test -m shell -a "/bin/uname -a"
-
- Install nginx on all debian web servers tagged with 'www'
- $ ansible -i brook.py tag_www -m apt -a "name=nginx state=present"
-
- Run site.yml playbook on web servers
- $ ansible-playbook -i brook.py site.yml -l tag_www
-
-Support:
- This script is tested on Python 2.7 and 3.4. It may work on other versions though.
-
-Author: Francisco Ros <fjros@doalitic.com>
-Version: 0.2
-"""
-
-
-import sys
-import os
-
-from ansible.module_utils.six.moves.configparser import SafeConfigParser as ConfigParser
-
-import json
-
-try:
- import libbrook
-except Exception:
- sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook')
-
-
-class BrookInventory:
-
- _API_ENDPOINT = 'https://api.brook.io'
-
- def __init__(self):
- self._configure_from_file()
- self.client = self.get_api_client()
- self.inventory = self.get_inventory()
-
- def _configure_from_file(self):
- """Initialize from .ini file.
-
- Configuration file is assumed to be named 'brook.ini' and to be located on the same
- directory than this file, unless the environment variable BROOK_INI_PATH says otherwise.
- """
-
- brook_ini_default_path = \
- os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini')
- brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path)
-
- config = ConfigParser(defaults={
- 'api_token': '',
- 'project_id': ''
- })
- config.read(brook_ini_path)
- self.api_token = config.get('brook', 'api_token')
- self.project_id = config.get('brook', 'project_id')
-
- if not self.api_token:
- sys.exit('You must provide (at least) your Brook.io API token to generate the dynamic '
- 'inventory.')
-
- def get_api_client(self):
- """Authenticate user via the provided credentials and return the corresponding API client.
- """
-
- # Get JWT token from API token
- #
- unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT)
- auth_api = libbrook.AuthApi(unauthenticated_client)
- api_token = libbrook.AuthTokenRequest()
- api_token.token = self.api_token
- jwt = auth_api.auth_token(token=api_token)
-
- # Create authenticated API client
- #
- return libbrook.ApiClient(host=self._API_ENDPOINT,
- header_name='Authorization',
- header_value='Bearer %s' % jwt.token)
-
- def get_inventory(self):
- """Generate Ansible inventory.
- """
-
- groups = dict()
- meta = dict()
- meta['hostvars'] = dict()
-
- instances_api = libbrook.InstancesApi(self.client)
- projects_api = libbrook.ProjectsApi(self.client)
- templates_api = libbrook.TemplatesApi(self.client)
-
- # If no project is given, get all projects the requesting user has access to
- #
- if not self.project_id:
- projects = [project.id for project in projects_api.index_projects()]
- else:
- projects = [self.project_id]
-
- # Build inventory from instances in all projects
- #
- for project_id in projects:
- project = projects_api.show_project(project_id=project_id)
- for instance in instances_api.index_instances(project_id=project_id):
- # Get template used for this instance if known
- template = templates_api.show_template(template_id=instance.template) if instance.template else None
-
- # Update hostvars
- try:
- meta['hostvars'][instance.name] = \
- self.hostvars(project, instance, template, instances_api)
- except libbrook.rest.ApiException:
- continue
-
- # Group by project
- project_group = 'project_%s' % project.name
- if project_group in groups:
- groups[project_group].append(instance.name)
- else:
- groups[project_group] = [instance.name]
-
- # Group by status
- status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status']
- if status_group in groups:
- groups[status_group].append(instance.name)
- else:
- groups[status_group] = [instance.name]
-
- # Group by tags
- tags = meta['hostvars'][instance.name]['brook_tags']
- for tag in tags:
- tag_group = 'tag_%s' % tag
- if tag_group in groups:
- groups[tag_group].append(instance.name)
- else:
- groups[tag_group] = [instance.name]
-
- groups['_meta'] = meta
- return groups
-
- def hostvars(self, project, instance, template, api):
- """Return the hostvars dictionary for the given instance.
-
- Raise libbrook.rest.ApiException if it cannot retrieve all required information from the
- Brook.io API.
- """
-
- hostvars = instance.to_dict()
- hostvars['brook_pid'] = hostvars.pop('pid')
- hostvars['brook_name'] = hostvars.pop('name')
- hostvars['brook_description'] = hostvars.pop('description')
- hostvars['brook_project'] = hostvars.pop('project')
- hostvars['brook_template'] = hostvars.pop('template')
- hostvars['brook_region'] = hostvars.pop('region')
- hostvars['brook_zone'] = hostvars.pop('zone')
- hostvars['brook_created_at'] = hostvars.pop('created_at')
- hostvars['brook_updated_at'] = hostvars.pop('updated_at')
- del hostvars['id']
- del hostvars['key']
- del hostvars['provider']
- del hostvars['image']
-
- # Substitute identifiers for names
- #
- hostvars['brook_project'] = project.name
- hostvars['brook_template'] = template.name if template else None
-
- # Retrieve instance state
- #
- status = api.status_instance(project_id=project.id, instance_id=instance.id)
- hostvars.update({'brook_status': status.state})
-
- # Retrieve instance tags
- #
- tags = api.instance_tags(project_id=project.id, instance_id=instance.id)
- hostvars.update({'brook_tags': tags})
-
- # Retrieve instance addresses
- #
- addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id)
- internal_ips = [address.address for address in addresses if address.scope == 'internal']
- external_ips = [address.address for address in addresses
- if address.address and address.scope == 'external']
- hostvars.update({'brook_internal_ips': internal_ips})
- hostvars.update({'brook_external_ips': external_ips})
- try:
- hostvars.update({'ansible_ssh_host': external_ips[0]})
- except IndexError:
- raise libbrook.rest.ApiException(status='502', reason='Instance without public IP')
-
- return hostvars
-
-
-# Run the script
-#
-brook = BrookInventory()
-print(json.dumps(brook.inventory))
diff --git a/contrib/inventory/cloudforms.ini b/contrib/inventory/cloudforms.ini
deleted file mode 100644
index 30b9aa609e..0000000000
--- a/contrib/inventory/cloudforms.ini
+++ /dev/null
@@ -1,40 +0,0 @@
-[cloudforms]
-
-# the version of CloudForms ; currently not used, but tested with
-version = 4.1
-
-# This should be the hostname of the CloudForms server
-url = https://cfme.example.com
-
-# This will more than likely need to be a local CloudForms username
-username = <set your username here>
-
-# The password for said username
-password = <set your password here>
-
-# True = verify SSL certificate / False = trust anything
-ssl_verify = True
-
-# limit the number of vms returned per request
-limit = 100
-
-# purge the CloudForms actions from hosts
-purge_actions = True
-
-# Clean up group names (from tags and other groupings so Ansible doesn't complain)
-clean_group_keys = True
-
-# Explode tags into nested groups / subgroups
-nest_tags = False
-
-# If set, ensure host name are suffixed with this value
-# Note: This suffix *must* include the leading '.' as it is appended to the hostname as is
-# suffix = .example.org
-
-# If true, will try and use an IPv4 address for the ansible_ssh_host rather than just the first IP address in the list
-prefer_ipv4 = False
-
-[cache]
-
-# Maximum time to trust the cache in seconds
-max_age = 600
diff --git a/contrib/inventory/cloudforms.py b/contrib/inventory/cloudforms.py
deleted file mode 100755
index 0057940930..0000000000
--- a/contrib/inventory/cloudforms.py
+++ /dev/null
@@ -1,483 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-#
-# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>
-#
-# This script is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with it. If not, see <http://www.gnu.org/licenses/>.
-#
-# This is loosely based on the foreman inventory script
-# -- Josh Preston <jpreston@redhat.com>
-#
-
-from __future__ import print_function
-import argparse
-from ansible.module_utils.six.moves import configparser as ConfigParser
-import os
-import re
-from time import time
-import requests
-from requests.auth import HTTPBasicAuth
-import warnings
-from ansible.errors import AnsibleError
-
-import json
-
-
-class CloudFormsInventory(object):
- def __init__(self):
- """
- Main execution path
- """
- self.inventory = dict() # A list of groups and the hosts in that group
- self.hosts = dict() # Details about hosts in the inventory
-
- # Parse CLI arguments
- self.parse_cli_args()
-
- # Read settings
- self.read_settings()
-
- # Cache
- if self.args.refresh_cache or not self.is_cache_valid():
- self.update_cache()
- else:
- self.load_inventory_from_cache()
- self.load_hosts_from_cache()
-
- data_to_print = ""
-
- # Data to print
- if self.args.host:
- if self.args.debug:
- print("Fetching host [%s]" % self.args.host)
- data_to_print += self.get_host_info(self.args.host)
- else:
- self.inventory['_meta'] = {'hostvars': {}}
- for hostname in self.hosts:
- self.inventory['_meta']['hostvars'][hostname] = {
- 'cloudforms': self.hosts[hostname],
- }
- # include the ansible_ssh_host in the top level
- if 'ansible_ssh_host' in self.hosts[hostname]:
- self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host']
-
- data_to_print += self.json_format_dict(self.inventory, self.args.pretty)
-
- print(data_to_print)
-
- def is_cache_valid(self):
- """
- Determines if the cache files have expired, or if it is still valid
- """
- if self.args.debug:
- print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age))
-
- if os.path.isfile(self.cache_path_hosts):
- mod_time = os.path.getmtime(self.cache_path_hosts)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if os.path.isfile(self.cache_path_inventory):
- if self.args.debug:
- print("Cache is still valid!")
- return True
-
- if self.args.debug:
- print("Cache is stale or does not exist.")
-
- return False
-
- def read_settings(self):
- """
- Reads the settings from the cloudforms.ini file
- """
- config = ConfigParser.SafeConfigParser()
- config_paths = [
- os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini',
- "/etc/ansible/cloudforms.ini",
- ]
-
- env_value = os.environ.get('CLOUDFORMS_INI_PATH')
- if env_value is not None:
- config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
-
- if self.args.debug:
- for config_path in config_paths:
- print("Reading from configuration file [%s]" % config_path)
-
- config.read(config_paths)
-
- # CloudForms API related
- if config.has_option('cloudforms', 'url'):
- self.cloudforms_url = config.get('cloudforms', 'url')
- else:
- self.cloudforms_url = None
-
- if not self.cloudforms_url:
- warnings.warn("No url specified, expected something like 'https://cfme.example.com'")
-
- if config.has_option('cloudforms', 'username'):
- self.cloudforms_username = config.get('cloudforms', 'username')
- else:
- self.cloudforms_username = None
-
- if not self.cloudforms_username:
- warnings.warn("No username specified, you need to specify a CloudForms username.")
-
- if config.has_option('cloudforms', 'password'):
- self.cloudforms_pw = config.get('cloudforms', 'password', raw=True)
- else:
- self.cloudforms_pw = None
-
- if not self.cloudforms_pw:
- warnings.warn("No password specified, you need to specify a password for the CloudForms user.")
-
- if config.has_option('cloudforms', 'ssl_verify'):
- self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify')
- else:
- self.cloudforms_ssl_verify = True
-
- if config.has_option('cloudforms', 'version'):
- self.cloudforms_version = config.get('cloudforms', 'version')
- else:
- self.cloudforms_version = None
-
- if config.has_option('cloudforms', 'limit'):
- self.cloudforms_limit = config.getint('cloudforms', 'limit')
- else:
- self.cloudforms_limit = 100
-
- if config.has_option('cloudforms', 'purge_actions'):
- self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions')
- else:
- self.cloudforms_purge_actions = True
-
- if config.has_option('cloudforms', 'clean_group_keys'):
- self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys')
- else:
- self.cloudforms_clean_group_keys = True
-
- if config.has_option('cloudforms', 'nest_tags'):
- self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags')
- else:
- self.cloudforms_nest_tags = False
-
- if config.has_option('cloudforms', 'suffix'):
- self.cloudforms_suffix = config.get('cloudforms', 'suffix')
- if self.cloudforms_suffix[0] != '.':
- raise AnsibleError('Leading fullstop is required for Cloudforms suffix')
- else:
- self.cloudforms_suffix = None
-
- if config.has_option('cloudforms', 'prefer_ipv4'):
- self.cloudforms_prefer_ipv4 = config.getboolean('cloudforms', 'prefer_ipv4')
- else:
- self.cloudforms_prefer_ipv4 = False
-
- # Ansible related
- try:
- group_patterns = config.get('ansible', 'group_patterns')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- group_patterns = "[]"
-
- self.group_patterns = eval(group_patterns)
-
- # Cache related
- try:
- cache_path = os.path.expanduser(config.get('cache', 'path'))
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- cache_path = '.'
- (script, ext) = os.path.splitext(os.path.basename(__file__))
- self.cache_path_hosts = cache_path + "/%s.hosts" % script
- self.cache_path_inventory = cache_path + "/%s.inventory" % script
- self.cache_max_age = config.getint('cache', 'max_age')
-
- if self.args.debug:
- print("CloudForms settings:")
- print("cloudforms_url = %s" % self.cloudforms_url)
- print("cloudforms_username = %s" % self.cloudforms_username)
- print("cloudforms_pw = %s" % self.cloudforms_pw)
- print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify)
- print("cloudforms_version = %s" % self.cloudforms_version)
- print("cloudforms_limit = %s" % self.cloudforms_limit)
- print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions)
- print("Cache settings:")
- print("cache_max_age = %s" % self.cache_max_age)
- print("cache_path_hosts = %s" % self.cache_path_hosts)
- print("cache_path_inventory = %s" % self.cache_path_inventory)
-
- def parse_cli_args(self):
- """
- Command line argument processing
- """
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs')
- parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
- parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
- parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)')
- parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)')
- self.args = parser.parse_args()
-
- def _get_json(self, url):
- """
- Make a request and return the JSON
- """
- results = []
-
- ret = requests.get(url,
- auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw),
- verify=self.cloudforms_ssl_verify)
-
- ret.raise_for_status()
-
- try:
- results = json.loads(ret.text)
- except ValueError:
- warnings.warn("Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason))
- results = {}
-
- if self.args.debug:
- print("=======================================================================")
- print("=======================================================================")
- print("=======================================================================")
- print(ret.text)
- print("=======================================================================")
- print("=======================================================================")
- print("=======================================================================")
-
- return results
-
- def _get_hosts(self):
- """
- Get all hosts by paging through the results
- """
- limit = self.cloudforms_limit
-
- page = 0
- last_page = False
-
- results = []
-
- while not last_page:
- offset = page * limit
- ret = self._get_json("%s/api/vms?offset=%s&limit=%s&expand=resources,tags,hosts,&attributes=ipaddresses" % (self.cloudforms_url, offset, limit))
- results += ret['resources']
- if ret['subcount'] < limit:
- last_page = True
- page += 1
-
- return results
-
- def update_cache(self):
- """
- Make calls to cloudforms and save the output in a cache
- """
- self.groups = dict()
- self.hosts = dict()
-
- if self.args.debug:
- print("Updating cache...")
-
- for host in self._get_hosts():
- if self.cloudforms_suffix is not None and not host['name'].endswith(self.cloudforms_suffix):
- host['name'] = host['name'] + self.cloudforms_suffix
-
- # Ignore VMs that are not powered on
- if host['power_state'] != 'on':
- if self.args.debug:
- print("Skipping %s because power_state = %s" % (host['name'], host['power_state']))
- continue
-
- # purge actions
- if self.cloudforms_purge_actions and 'actions' in host:
- del host['actions']
-
- # Create ansible groups for tags
- if 'tags' in host:
-
- # Create top-level group
- if 'tags' not in self.inventory:
- self.inventory['tags'] = dict(children=[], vars={}, hosts=[])
-
- if not self.cloudforms_nest_tags:
- # don't expand tags, just use them in a safe way
- for group in host['tags']:
- # Add sub-group, as a child of top-level
- safe_key = self.to_safe(group['name'])
- if safe_key:
- if self.args.debug:
- print("Adding sub-group '%s' to parent 'tags'" % safe_key)
-
- if safe_key not in self.inventory['tags']['children']:
- self.push(self.inventory['tags'], 'children', safe_key)
-
- self.push(self.inventory, safe_key, host['name'])
-
- if self.args.debug:
- print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key))
- else:
- # expand the tags into nested groups / sub-groups
- # Create nested groups for tags
- safe_parent_tag_name = 'tags'
- for tag in host['tags']:
- tag_hierarchy = tag['name'][1:].split('/')
-
- if self.args.debug:
- print("Working on list %s" % tag_hierarchy)
-
- for tag_name in tag_hierarchy:
- if self.args.debug:
- print("Working on tag_name = %s" % tag_name)
-
- safe_tag_name = self.to_safe(tag_name)
- if self.args.debug:
- print("Using sanitized name %s" % safe_tag_name)
-
- # Create sub-group
- if safe_tag_name not in self.inventory:
- self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[])
-
- # Add sub-group, as a child of top-level
- if safe_parent_tag_name:
- if self.args.debug:
- print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name))
-
- if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']:
- self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name)
-
- # Make sure the next one uses this one as it's parent
- safe_parent_tag_name = safe_tag_name
-
- # Add the host to the last tag
- self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name'])
-
- # Set ansible_ssh_host to the first available ip address
- if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list):
- # If no preference for IPv4, just use the first entry
- if not self.cloudforms_prefer_ipv4:
- host['ansible_ssh_host'] = host['ipaddresses'][0]
- else:
- # Before we search for an IPv4 address, set using the first entry in case we don't find any
- host['ansible_ssh_host'] = host['ipaddresses'][0]
- for currenthost in host['ipaddresses']:
- if '.' in currenthost:
- host['ansible_ssh_host'] = currenthost
-
- # Create additional groups
- for key in ('location', 'type', 'vendor'):
- safe_key = self.to_safe(host[key])
-
- # Create top-level group
- if key not in self.inventory:
- self.inventory[key] = dict(children=[], vars={}, hosts=[])
-
- # Create sub-group
- if safe_key not in self.inventory:
- self.inventory[safe_key] = dict(children=[], vars={}, hosts=[])
-
- # Add sub-group, as a child of top-level
- if safe_key not in self.inventory[key]['children']:
- self.push(self.inventory[key], 'children', safe_key)
-
- if key in host:
- # Add host to sub-group
- self.push(self.inventory[safe_key], 'hosts', host['name'])
-
- self.hosts[host['name']] = host
- self.push(self.inventory, 'all', host['name'])
-
- if self.args.debug:
- print("Saving cached data")
-
- self.write_to_cache(self.hosts, self.cache_path_hosts)
- self.write_to_cache(self.inventory, self.cache_path_inventory)
-
- def get_host_info(self, host):
- """
- Get variables about a specific host
- """
- if not self.hosts or len(self.hosts) == 0:
- # Need to load cache from cache
- self.load_hosts_from_cache()
-
- if host not in self.hosts:
- if self.args.debug:
- print("[%s] not found in cache." % host)
-
- # try updating the cache
- self.update_cache()
-
- if host not in self.hosts:
- if self.args.debug:
- print("[%s] does not exist after cache update." % host)
- # host might not exist anymore
- return self.json_format_dict({}, self.args.pretty)
-
- return self.json_format_dict(self.hosts[host], self.args.pretty)
-
- def push(self, d, k, v):
- """
- Safely puts a new entry onto an array.
- """
- if k in d:
- d[k].append(v)
- else:
- d[k] = [v]
-
- def load_inventory_from_cache(self):
- """
- Reads the inventory from the cache file sets self.inventory
- """
- cache = open(self.cache_path_inventory, 'r')
- json_inventory = cache.read()
- self.inventory = json.loads(json_inventory)
-
- def load_hosts_from_cache(self):
- """
- Reads the cache from the cache file sets self.hosts
- """
- cache = open(self.cache_path_hosts, 'r')
- json_cache = cache.read()
- self.hosts = json.loads(json_cache)
-
- def write_to_cache(self, data, filename):
- """
- Writes data in JSON format to a file
- """
- json_data = self.json_format_dict(data, True)
- cache = open(filename, 'w')
- cache.write(json_data)
- cache.close()
-
- def to_safe(self, word):
- """
- Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
- """
- if self.cloudforms_clean_group_keys:
- regex = r"[^A-Za-z0-9\_]"
- return re.sub(regex, "_", word.replace(" ", ""))
- else:
- return word
-
- def json_format_dict(self, data, pretty=False):
- """
- Converts a dict to a JSON object and dumps it as a formatted string
- """
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-CloudFormsInventory()
diff --git a/contrib/inventory/cloudstack.ini b/contrib/inventory/cloudstack.ini
deleted file mode 100644
index 43777b593f..0000000000
--- a/contrib/inventory/cloudstack.ini
+++ /dev/null
@@ -1,5 +0,0 @@
-[cloudstack]
-#endpoint = https://api.exoscale.ch/compute
-endpoint = https://cloud.example.com/client/api
-key = cloudstack api key
-secret = cloudstack api secret
diff --git a/contrib/inventory/cloudstack.py b/contrib/inventory/cloudstack.py
deleted file mode 100755
index db0322cfd6..0000000000
--- a/contrib/inventory/cloudstack.py
+++ /dev/null
@@ -1,277 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# (c) 2015, René Moser <mail@renemoser.net>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-"""
-Ansible CloudStack external inventory script.
-=============================================
-
-Generates Ansible inventory from CloudStack. Configuration is read from
-'cloudstack.ini'. If you need to pass the project, write a simple wrapper
-script, e.g. project_cloudstack.sh:
-
- #!/bin/bash
- cloudstack.py --project <your_project> $@
-
-
-When run against a specific host, this script returns the following attributes
-based on the data obtained from CloudStack API:
-
- "web01": {
- "cpu_number": 2,
- "nic": [
- {
- "ip": "10.102.76.98",
- "mac": "02:00:50:99:00:01",
- "type": "Isolated",
- "netmask": "255.255.255.0",
- "gateway": "10.102.76.1"
- },
- {
- "ip": "10.102.138.63",
- "mac": "06:b7:5a:00:14:84",
- "type": "Shared",
- "netmask": "255.255.255.0",
- "gateway": "10.102.138.1"
- }
- ],
- "default_ip": "10.102.76.98",
- "zone": "ZUERICH",
- "created": "2014-07-02T07:53:50+0200",
- "hypervisor": "VMware",
- "memory": 2048,
- "state": "Running",
- "tags": [],
- "cpu_speed": 1800,
- "affinity_group": [],
- "service_offering": "Small",
- "cpu_used": "62%"
- }
-
-
-usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] [--domain DOMAIN]
-"""
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import sys
-import argparse
-import json
-
-try:
- from cs import CloudStack, CloudStackException, read_config
-except ImportError:
- print("Error: CloudStack library must be installed: pip install cs.",
- file=sys.stderr)
- sys.exit(1)
-
-
-class CloudStackInventory(object):
- def __init__(self):
-
- parser = argparse.ArgumentParser()
- parser.add_argument('--host')
- parser.add_argument('--list', action='store_true')
- parser.add_argument('--tag', help="Filter machines by a tag. Should be in the form key=value.")
- parser.add_argument('--project')
- parser.add_argument('--domain')
-
- options = parser.parse_args()
- try:
- self.cs = CloudStack(**read_config())
- except CloudStackException:
- print("Error: Could not connect to CloudStack API", file=sys.stderr)
-
- domain_id = None
- if options.domain:
- domain_id = self.get_domain_id(options.domain)
-
- project_id = None
- if options.project:
- project_id = self.get_project_id(options.project, domain_id)
-
- if options.host:
- data = self.get_host(options.host, project_id, domain_id)
- print(json.dumps(data, indent=2))
-
- elif options.list:
- tags = dict()
- if options.tag:
- tags['tags[0].key'], tags['tags[0].value'] = options.tag.split('=')
- data = self.get_list(project_id, domain_id, **tags)
- print(json.dumps(data, indent=2))
- else:
- print("usage: --list [--tag <tag>] | --host <hostname> [--project <project>] [--domain <domain_path>]",
- file=sys.stderr)
- sys.exit(1)
-
- def get_domain_id(self, domain):
- domains = self.cs.listDomains(listall=True)
- if domains:
- for d in domains['domain']:
- if d['path'].lower() == domain.lower():
- return d['id']
- print("Error: Domain %s not found." % domain, file=sys.stderr)
- sys.exit(1)
-
- def get_project_id(self, project, domain_id=None):
- projects = self.cs.listProjects(domainid=domain_id)
- if projects:
- for p in projects['project']:
- if p['name'] == project or p['id'] == project:
- return p['id']
- print("Error: Project %s not found." % project, file=sys.stderr)
- sys.exit(1)
-
- def get_host(self, name, project_id=None, domain_id=None, **kwargs):
- hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs)
- data = {}
- if not hosts:
- return data
- for host in hosts:
- host_name = host['displayname']
- if name == host_name:
- data['zone'] = host['zonename']
- if 'group' in host:
- data['group'] = host['group']
- data['state'] = host['state']
- data['service_offering'] = host['serviceofferingname']
- data['affinity_group'] = host['affinitygroup']
- data['security_group'] = host['securitygroup']
- data['cpu_number'] = host['cpunumber']
- if 'cpu_speed' in host:
- data['cpu_speed'] = host['cpuspeed']
- if 'cpuused' in host:
- data['cpu_used'] = host['cpuused']
- data['memory'] = host['memory']
- data['tags'] = host['tags']
- if 'hypervisor' in host:
- data['hypervisor'] = host['hypervisor']
- data['created'] = host['created']
- data['nic'] = []
- for nic in host['nic']:
- nicdata = {
- 'ip': nic['ipaddress'],
- 'mac': nic['macaddress'],
- 'netmask': nic['netmask'],
- 'gateway': nic['gateway'],
- 'type': nic['type'],
- }
- if 'ip6address' in nic:
- nicdata['ip6'] = nic['ip6address']
- if 'gateway' in nic:
- nicdata['gateway'] = nic['gateway']
- if 'netmask' in nic:
- nicdata['netmask'] = nic['netmask']
- data['nic'].append(nicdata)
- if nic['isdefault']:
- data['default_ip'] = nic['ipaddress']
- if 'ip6address' in nic:
- data['default_ip6'] = nic['ip6address']
- break
- return data
-
- def get_list(self, project_id=None, domain_id=None, **kwargs):
- data = {
- 'all': {
- 'hosts': [],
- },
- '_meta': {
- 'hostvars': {},
- },
- }
-
- groups = self.cs.listInstanceGroups(projectid=project_id, domainid=domain_id)
- if groups:
- for group in groups['instancegroup']:
- group_name = group['name']
- if group_name and group_name not in data:
- data[group_name] = {
- 'hosts': []
- }
-
- hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs)
- if not hosts:
- return data
- for host in hosts:
- host_name = host['displayname']
- data['all']['hosts'].append(host_name)
- data['_meta']['hostvars'][host_name] = {}
-
- # Make a group per zone
- data['_meta']['hostvars'][host_name]['zone'] = host['zonename']
- group_name = host['zonename']
- if group_name not in data:
- data[group_name] = {
- 'hosts': []
- }
- data[group_name]['hosts'].append(host_name)
-
- if 'group' in host:
- data['_meta']['hostvars'][host_name]['group'] = host['group']
- data['_meta']['hostvars'][host_name]['state'] = host['state']
- data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname']
- data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup']
- data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup']
- data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber']
- if 'cpuspeed' in host:
- data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed']
- if 'cpuused' in host:
- data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused']
- data['_meta']['hostvars'][host_name]['created'] = host['created']
- data['_meta']['hostvars'][host_name]['memory'] = host['memory']
- data['_meta']['hostvars'][host_name]['tags'] = host['tags']
- if 'hypervisor' in host:
- data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor']
- data['_meta']['hostvars'][host_name]['created'] = host['created']
- data['_meta']['hostvars'][host_name]['nic'] = []
- for nic in host['nic']:
- nicdata = {
- 'ip': nic['ipaddress'],
- 'mac': nic['macaddress'],
- 'netmask': nic['netmask'],
- 'gateway': nic['gateway'],
- 'type': nic['type'],
- }
- if 'ip6address' in nic:
- nicdata['ip6'] = nic['ip6address']
- if 'gateway' in nic:
- nicdata['gateway'] = nic['gateway']
- if 'netmask' in nic:
- nicdata['netmask'] = nic['netmask']
- data['_meta']['hostvars'][host_name]['nic'].append(nicdata)
- if nic['isdefault']:
- data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress']
- if 'ip6address' in nic:
- data['_meta']['hostvars'][host_name]['default_ip6'] = nic['ip6address']
-
- group_name = ''
- if 'group' in host:
- group_name = host['group']
-
- if group_name and group_name in data:
- data[group_name]['hosts'].append(host_name)
- return data
-
-
-if __name__ == '__main__':
- CloudStackInventory()
diff --git a/contrib/inventory/cobbler.ini b/contrib/inventory/cobbler.ini
deleted file mode 100644
index 2dc8cd3379..0000000000
--- a/contrib/inventory/cobbler.ini
+++ /dev/null
@@ -1,24 +0,0 @@
-# Ansible Cobbler external inventory script settings
-#
-
-[cobbler]
-
-host = http://PATH_TO_COBBLER_SERVER/cobbler_api
-
-# If API needs authentication add 'username' and 'password' options here.
-#username = foo
-#password = bar
-
-# API calls to Cobbler can be slow. For this reason, we cache the results of an API
-# call. Set this to the path you want cache files to be written to. Two files
-# will be written to this directory:
-# - ansible-cobbler.cache
-# - ansible-cobbler.index
-cache_path = /tmp
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-cache_max_age = 900
-
-
-
diff --git a/contrib/inventory/cobbler.py b/contrib/inventory/cobbler.py
deleted file mode 100755
index 60195ac197..0000000000
--- a/contrib/inventory/cobbler.py
+++ /dev/null
@@ -1,312 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Cobbler external inventory script
-=================================
-
-Ansible has a feature where instead of reading from /etc/ansible/hosts
-as a text file, it can query external programs to obtain the list
-of hosts, groups the hosts are in, and even variables to assign to each host.
-
-To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
-This, more or less, allows you to keep one central database containing
-info about all of your managed instances.
-
-This script is an example of sourcing that data from Cobbler
-(https://cobbler.github.io). With cobbler each --mgmt-class in cobbler
-will correspond to a group in Ansible, and --ks-meta variables will be
-passed down for use in templates or even in argument lines.
-
-NOTE: The cobbler system names will not be used. Make sure a
-cobbler --dns-name is set for each cobbler system. If a system
-appears with two DNS names we do not add it twice because we don't want
-ansible talking to it twice. The first one found will be used. If no
---dns-name is set the system will NOT be visible to ansible. We do
-not add cobbler system names because there is no requirement in cobbler
-that those correspond to addresses.
-
-Tested with Cobbler 2.0.11.
-
-Changelog:
- - 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in
- higher performance at ansible startup. Groups are determined by owner rather than
- default mgmt_classes. DNS name determined from hostname. cobbler values are written
- to a 'cobbler' fact namespace
-
- - 2013-09-01 pgehres: Refactored implementation to make use of caching and to
- limit the number of connections to external cobbler server for performance.
- Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0
-
-"""
-
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
-
-######################################################################
-
-import argparse
-import os
-import re
-from time import time
-import xmlrpclib
-
-import json
-
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.six.moves import configparser as ConfigParser
-
-# NOTE -- this file assumes Ansible is being accessed FROM the cobbler
-# server, so it does not attempt to login with a username and password.
-# this will be addressed in a future version of this script.
-
-orderby_keyname = 'owners' # alternatively 'mgmt_classes'
-
-
-class CobblerInventory(object):
-
- def __init__(self):
-
- """ Main execution path """
- self.conn = None
-
- self.inventory = dict() # A list of groups and the hosts in that group
- self.cache = dict() # Details about hosts in the inventory
- self.ignore_settings = False # used to only look at env vars for settings.
-
- # Read env vars, read settings, and parse CLI arguments
- self.parse_env_vars()
- self.read_settings()
- self.parse_cli_args()
-
- # Cache
- if self.args.refresh_cache:
- self.update_cache()
- elif not self.is_cache_valid():
- self.update_cache()
- else:
- self.load_inventory_from_cache()
- self.load_cache_from_cache()
-
- data_to_print = ""
-
- # Data to print
- if self.args.host:
- data_to_print += self.get_host_info()
- else:
- self.inventory['_meta'] = {'hostvars': {}}
- for hostname in self.cache:
- self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]}
- data_to_print += self.json_format_dict(self.inventory, True)
-
- print(data_to_print)
-
- def _connect(self):
- if not self.conn:
- self.conn = xmlrpclib.Server(self.cobbler_host, allow_none=True)
- self.token = None
- if self.cobbler_username is not None:
- self.token = self.conn.login(self.cobbler_username, self.cobbler_password)
-
- def is_cache_valid(self):
- """ Determines if the cache files have expired, or if it is still valid """
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if os.path.isfile(self.cache_path_inventory):
- return True
-
- return False
-
- def read_settings(self):
- """ Reads the settings from the cobbler.ini file """
-
- if(self.ignore_settings):
- return
-
- config = ConfigParser.SafeConfigParser()
- config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini')
-
- self.cobbler_host = config.get('cobbler', 'host')
- self.cobbler_username = None
- self.cobbler_password = None
- if config.has_option('cobbler', 'username'):
- self.cobbler_username = config.get('cobbler', 'username')
- if config.has_option('cobbler', 'password'):
- self.cobbler_password = config.get('cobbler', 'password')
-
- # Cache related
- cache_path = config.get('cobbler', 'cache_path')
- self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
- self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
- self.cache_max_age = config.getint('cobbler', 'cache_max_age')
-
- def parse_env_vars(self):
- """ Reads the settings from the environment """
-
- # Env. Vars:
- # COBBLER_host
- # COBBLER_username
- # COBBLER_password
- # COBBLER_cache_path
- # COBBLER_cache_max_age
- # COBBLER_ignore_settings
-
- self.cobbler_host = os.getenv('COBBLER_host', None)
- self.cobbler_username = os.getenv('COBBLER_username', None)
- self.cobbler_password = os.getenv('COBBLER_password', None)
-
- # Cache related
- cache_path = os.getenv('COBBLER_cache_path', None)
- if(cache_path is not None):
- self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
- self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
-
- self.cache_max_age = int(os.getenv('COBBLER_cache_max_age', "30"))
-
- # ignore_settings is used to ignore the settings file, for use in Ansible
- # Tower (or AWX inventory scripts and not throw python exceptions.)
- if(os.getenv('COBBLER_ignore_settings', False) == "True"):
- self.ignore_settings = True
-
- def parse_cli_args(self):
- """ Command line argument processing """
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler')
- parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
- parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)')
- self.args = parser.parse_args()
-
- def update_cache(self):
- """ Make calls to cobbler and save the output in a cache """
-
- self._connect()
- self.groups = dict()
- self.hosts = dict()
- if self.token is not None:
- data = self.conn.get_systems(self.token)
- else:
- data = self.conn.get_systems()
-
- for host in data:
- # Get the FQDN for the host and add it to the right groups
- dns_name = host['hostname'] # None
- ksmeta = None
- interfaces = host['interfaces']
- # hostname is often empty for non-static IP hosts
- if dns_name == '':
- for (iname, ivalue) in iteritems(interfaces):
- if ivalue['management'] or not ivalue['static']:
- this_dns_name = ivalue.get('dns_name', None)
- dns_name = this_dns_name if this_dns_name else ''
-
- if dns_name == '' or dns_name is None:
- continue
-
- status = host['status']
- profile = host['profile']
- classes = host[orderby_keyname]
-
- if status not in self.inventory:
- self.inventory[status] = []
- self.inventory[status].append(dns_name)
-
- if profile not in self.inventory:
- self.inventory[profile] = []
- self.inventory[profile].append(dns_name)
-
- for cls in classes:
- if cls not in self.inventory:
- self.inventory[cls] = []
- self.inventory[cls].append(dns_name)
-
- # Since we already have all of the data for the host, update the host details as well
-
- # The old way was ksmeta only -- provide backwards compatibility
-
- self.cache[dns_name] = host
- if "ks_meta" in host:
- for key, value in iteritems(host["ks_meta"]):
- self.cache[dns_name][key] = value
-
- self.write_to_cache(self.cache, self.cache_path_cache)
- self.write_to_cache(self.inventory, self.cache_path_inventory)
-
- def get_host_info(self):
- """ Get variables about a specific host """
-
- if not self.cache or len(self.cache) == 0:
- # Need to load index from cache
- self.load_cache_from_cache()
-
- if self.args.host not in self.cache:
- # try updating the cache
- self.update_cache()
-
- if self.args.host not in self.cache:
- # host might not exist anymore
- return self.json_format_dict({}, True)
-
- return self.json_format_dict(self.cache[self.args.host], True)
-
- def push(self, my_dict, key, element):
- """ Pushed an element onto an array that may not have been defined in the dict """
-
- if key in my_dict:
- my_dict[key].append(element)
- else:
- my_dict[key] = [element]
-
- def load_inventory_from_cache(self):
- """ Reads the index from the cache file sets self.index """
-
- cache = open(self.cache_path_inventory, 'r')
- json_inventory = cache.read()
- self.inventory = json.loads(json_inventory)
-
- def load_cache_from_cache(self):
- """ Reads the cache from the cache file sets self.cache """
-
- cache = open(self.cache_path_cache, 'r')
- json_cache = cache.read()
- self.cache = json.loads(json_cache)
-
- def write_to_cache(self, data, filename):
- """ Writes data in JSON format to a file """
- json_data = self.json_format_dict(data, True)
- cache = open(filename, 'w')
- cache.write(json_data)
- cache.close()
-
- def to_safe(self, word):
- """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
-
- return re.sub(r"[^A-Za-z0-9\-]", "_", word)
-
- def json_format_dict(self, data, pretty=False):
- """ Converts a dict to a JSON object and dumps it as a formatted string """
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-CobblerInventory()
diff --git a/contrib/inventory/collins.ini b/contrib/inventory/collins.ini
deleted file mode 100644
index 0ce0c2acbd..0000000000
--- a/contrib/inventory/collins.ini
+++ /dev/null
@@ -1,57 +0,0 @@
-# Ansible Collins external inventory script settings
-#
-
-[collins]
-
-# You should not have a trailing slash or collins
-# will not properly match the URI
-host = http://localhost:9000
-
-username = blake
-password = admin:first
-
-# Specifies a timeout for all HTTP requests to Collins.
-timeout_secs = 120
-
-# Specifies a maximum number of retries per Collins request.
-max_retries = 5
-
-# Specifies the number of results to return per paginated query as specified in
-# the Pagination section of the Collins API docs:
-# http://tumblr.github.io/collins/api.html
-results_per_query = 100
-
-# Specifies the Collins asset type which will be queried for; most typically
-# you'll want to leave this at the default of SERVER_NODE.
-asset_type = SERVER_NODE
-
-# Collins assets can optionally be assigned hostnames; this option will preference
-# the selection of an asset's hostname over an IP address as the primary identifier
-# in the Ansible inventory. Typically, this value should be set to true if assets
-# are assigned hostnames.
-prefer_hostnames = true
-
-# Within Collins, assets can be granted multiple IP addresses; this configuration
-# value specifies the index within the 'ADDRESSES' array as returned by the
-# following API endpoint:
-# http://tumblr.github.io/collins/api.html#api-ipam-asset-addresses-section
-ip_address_index = 0
-
-# Sets whether Collins instances in multiple datacenters will be queried.
-query_remote_dcs = false
-
-# API calls to Collins can involve large, substantial queries. For this reason,
-# we cache the results of an API call. Set this to the path you want cache files
-# to be written to. Two files will be written to this directory:
-# - ansible-collins.cache
-# - ansible-collins.index
-cache_path = /tmp
-
-# If errors occur while querying inventory, logging messages will be written
-# to a logfile in the specified directory:
-# - ansible-collins.log
-log_path = /tmp
-
-# The number of seconds that a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-cache_max_age = 600
diff --git a/contrib/inventory/collins.py b/contrib/inventory/collins.py
deleted file mode 100755
index 39f6c4b447..0000000000
--- a/contrib/inventory/collins.py
+++ /dev/null
@@ -1,439 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Collins external inventory script
-=================================
-
-Ansible has a feature where instead of reading from /etc/ansible/hosts
-as a text file, it can query external programs to obtain the list
-of hosts, groups the hosts are in, and even variables to assign to each host.
-
-Collins is a hardware asset management system originally developed by
-Tumblr for tracking new hardware as it built out its own datacenters. It
-exposes a rich API for manipulating and querying one's hardware inventory,
-which makes it an ideal 'single point of truth' for driving systems
-automation like Ansible. Extensive documentation on Collins, including a quickstart,
-API docs, and a full reference manual, can be found here:
-
-http://tumblr.github.io/collins
-
-This script adds support to Ansible for obtaining a dynamic inventory of
-assets in your infrastructure, grouping them in Ansible by their useful attributes,
-and binding all facts provided by Collins to each host so that they can be used to
-drive automation. Some parts of this script were cribbed shamelessly from mdehaan's
-Cobbler inventory script.
-
-To use it, copy it to your repo and pass -i <collins script> to the ansible or
-ansible-playbook command; if you'd like to use it by default, simply copy collins.ini
-to /etc/ansible and this script to /etc/ansible/hosts.
-
-Alongside the options set in collins.ini, there are several environment variables
-that will be used instead of the configured values if they are set:
-
- - COLLINS_USERNAME - specifies a username to use for Collins authentication
- - COLLINS_PASSWORD - specifies a password to use for Collins authentication
- - COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying;
- this can be used to run Ansible automation against different asset classes than
- server nodes, such as network switches and PDUs
- - COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to
- <location of collins.py>/collins.ini
-
-If errors are encountered during operation, this script will return an exit code of
-255; otherwise, it will return an exit code of 0.
-
-Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name'].
-
-Tested against Ansible 1.8.2 and Collins 1.3.0.
-"""
-
-# (c) 2014, Steve Salevan <steve.salevan@gmail.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-
-import argparse
-import logging
-import os
-import re
-import sys
-from time import time
-import traceback
-
-import json
-
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.six.moves import configparser as ConfigParser
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-
-from ansible.module_utils.urls import open_url
-
-
-class CollinsDefaults(object):
- ASSETS_API_ENDPOINT = '%s/api/assets'
- SPECIAL_ATTRIBUTES = set([
- 'CREATED',
- 'DELETED',
- 'UPDATED',
- 'STATE',
- ])
- LOG_FORMAT = '%(asctime)-15s %(message)s'
-
-
-class Error(Exception):
- pass
-
-
-class MaxRetriesError(Error):
- pass
-
-
-class CollinsInventory(object):
-
- def __init__(self):
- """ Constructs CollinsInventory object and reads all configuration. """
-
- self.inventory = dict() # A list of groups and the hosts in that group
- self.cache = dict() # Details about hosts in the inventory
-
- # Read settings and parse CLI arguments
- self.read_settings()
- self.parse_cli_args()
-
- logging.basicConfig(format=CollinsDefaults.LOG_FORMAT,
- filename=self.log_location)
- self.log = logging.getLogger('CollinsInventory')
-
- def _asset_get_attribute(self, asset, attrib):
- """ Returns a user-defined attribute from an asset if it exists; otherwise,
- returns None. """
-
- if 'ATTRIBS' in asset:
- for attrib_block in asset['ATTRIBS'].keys():
- if attrib in asset['ATTRIBS'][attrib_block]:
- return asset['ATTRIBS'][attrib_block][attrib]
- return None
-
- def _asset_has_attribute(self, asset, attrib):
- """ Returns whether a user-defined attribute is present on an asset. """
-
- if 'ATTRIBS' in asset:
- for attrib_block in asset['ATTRIBS'].keys():
- if attrib in asset['ATTRIBS'][attrib_block]:
- return True
- return False
-
- def run(self):
- """ Main execution path """
-
- # Updates cache if cache is not present or has expired.
- successful = True
- if self.args.refresh_cache:
- successful = self.update_cache()
- elif not self.is_cache_valid():
- successful = self.update_cache()
- else:
- successful = self.load_inventory_from_cache()
- successful &= self.load_cache_from_cache()
-
- data_to_print = ""
-
- # Data to print
- if self.args.host:
- data_to_print = self.get_host_info()
-
- elif self.args.list:
- # Display list of instances for inventory
- data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
-
- else: # default action with no options
- data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
-
- print(data_to_print)
- return successful
-
- def find_assets(self, attributes=None, operation='AND'):
- """ Obtains Collins assets matching the provided attributes. """
- attributes = {} if attributes is None else attributes
-
- # Formats asset search query to locate assets matching attributes, using
- # the CQL search feature as described here:
- # http://tumblr.github.io/collins/recipes.html
- attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)]
- query_parameters = {
- 'details': ['True'],
- 'operation': [operation],
- 'query': attributes_query,
- 'remoteLookup': [str(self.query_remote_dcs)],
- 'size': [self.results_per_query],
- 'type': [self.collins_asset_type],
- }
- assets = []
- cur_page = 0
- num_retries = 0
- # Locates all assets matching the provided query, exhausting pagination.
- while True:
- if num_retries == self.collins_max_retries:
- raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries)
- query_parameters['page'] = cur_page
- query_url = "%s?%s" % (
- (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
- urlencode(query_parameters, doseq=True)
- )
- try:
- response = open_url(query_url,
- timeout=self.collins_timeout_secs,
- url_username=self.collins_username,
- url_password=self.collins_password,
- force_basic_auth=True)
- json_response = json.loads(response.read())
- # Adds any assets found to the array of assets.
- assets += json_response['data']['Data']
- # If we've retrieved all of our assets, breaks out of the loop.
- if len(json_response['data']['Data']) == 0:
- break
- cur_page += 1
- num_retries = 0
- except Exception:
- self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
- num_retries += 1
- return assets
-
- def is_cache_valid(self):
- """ Determines if the cache files have expired, or if it is still valid """
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if os.path.isfile(self.cache_path_inventory):
- return True
-
- return False
-
- def read_settings(self):
- """ Reads the settings from the collins.ini file """
-
- config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
-
- config = ConfigParser.SafeConfigParser()
- config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
-
- self.collins_host = config.get('collins', 'host')
- self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username'))
- self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password'))
- self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type'))
- self.collins_timeout_secs = config.getint('collins', 'timeout_secs')
- self.collins_max_retries = config.getint('collins', 'max_retries')
-
- self.results_per_query = config.getint('collins', 'results_per_query')
- self.ip_address_index = config.getint('collins', 'ip_address_index')
- self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs')
- self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames')
-
- cache_path = config.get('collins', 'cache_path')
- self.cache_path_cache = cache_path + \
- '/ansible-collins-%s.cache' % self.collins_asset_type
- self.cache_path_inventory = cache_path + \
- '/ansible-collins-%s.index' % self.collins_asset_type
- self.cache_max_age = config.getint('collins', 'cache_max_age')
-
- log_path = config.get('collins', 'log_path')
- self.log_location = log_path + '/ansible-collins.log'
-
- def parse_cli_args(self):
- """ Command line argument processing """
-
- parser = argparse.ArgumentParser(
- description='Produces an Ansible Inventory file based on Collins')
- parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
- parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to Collins '
- '(default: False - use cache files)')
- parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output')
- self.args = parser.parse_args()
-
- def update_cache(self):
- """ Make calls to Collins and saves the output in a cache """
-
- self.cache = dict()
- self.inventory = dict()
-
- # Locates all server assets from Collins.
- try:
- server_assets = self.find_assets()
- except Exception:
- self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
- return False
-
- for asset in server_assets:
- # Determines the index to retrieve the asset's IP address either by an
- # attribute set on the Collins asset or the pre-configured value.
- if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'):
- ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
- try:
- ip_index = int(ip_index)
- except Exception:
- self.log.error(
- "ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
- ip_index)
- else:
- ip_index = self.ip_address_index
-
- asset['COLLINS'] = {}
-
- # Attempts to locate the asset's primary identifier (hostname or IP address),
- # which will be used to index the asset throughout the Ansible inventory.
- if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
- asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
- elif 'ADDRESSES' not in asset:
- self.log.warning("No IP addresses found for asset '%s', skipping", asset)
- continue
- elif len(asset['ADDRESSES']) < ip_index + 1:
- self.log.warning(
- "No IP address found at index %s for asset '%s', skipping",
- ip_index, asset)
- continue
- else:
- asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS']
-
- # Adds an asset index to the Ansible inventory based upon unpacking
- # the name of the asset's current STATE from its dictionary.
- if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']:
- state_inventory_key = self.to_safe(
- 'STATE-%s' % asset['ASSET']['STATE']['NAME'])
- self.push(self.inventory, state_inventory_key, asset_identifier)
-
- # Indexes asset by all user-defined Collins attributes.
- if 'ATTRIBS' in asset:
- for attrib_block in asset['ATTRIBS'].keys():
- for attrib in asset['ATTRIBS'][attrib_block].keys():
- asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib]
- attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib]))
- self.push(self.inventory, attrib_key, asset_identifier)
-
- # Indexes asset by all built-in Collins attributes.
- for attribute in asset['ASSET'].keys():
- if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES:
- attribute_val = asset['ASSET'][attribute]
- if attribute_val is not None:
- attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val))
- self.push(self.inventory, attrib_key, asset_identifier)
-
- # Indexes asset by hardware product information.
- if 'HARDWARE' in asset:
- if 'PRODUCT' in asset['HARDWARE']['BASE']:
- product = asset['HARDWARE']['BASE']['PRODUCT']
- if product:
- product_key = self.to_safe(
- 'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT'])
- self.push(self.inventory, product_key, asset_identifier)
-
- # Indexing now complete, adds the host details to the asset cache.
- self.cache[asset_identifier] = asset
-
- try:
- self.write_to_cache(self.cache, self.cache_path_cache)
- self.write_to_cache(self.inventory, self.cache_path_inventory)
- except Exception:
- self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
- return False
- return True
-
- def push(self, dictionary, key, value):
- """ Adds a value to a list at a dictionary key, creating the list if it doesn't
- exist. """
-
- if key not in dictionary:
- dictionary[key] = []
- dictionary[key].append(value)
-
- def get_host_info(self):
- """ Get variables about a specific host. """
-
- if not self.cache or len(self.cache) == 0:
- # Need to load index from cache
- self.load_cache_from_cache()
-
- if self.args.host not in self.cache:
- # try updating the cache
- self.update_cache()
-
- if self.args.host not in self.cache:
- # host might not exist anymore
- return self.json_format_dict({}, self.args.pretty)
-
- return self.json_format_dict(self.cache[self.args.host], self.args.pretty)
-
- def load_inventory_from_cache(self):
- """ Reads the index from the cache file sets self.index """
-
- try:
- cache = open(self.cache_path_inventory, 'r')
- json_inventory = cache.read()
- self.inventory = json.loads(json_inventory)
- return True
- except Exception:
- self.log.error("Error while loading inventory:\n%s",
- traceback.format_exc())
- self.inventory = {}
- return False
-
- def load_cache_from_cache(self):
- """ Reads the cache from the cache file sets self.cache """
-
- try:
- cache = open(self.cache_path_cache, 'r')
- json_cache = cache.read()
- self.cache = json.loads(json_cache)
- return True
- except Exception:
- self.log.error("Error while loading host cache:\n%s",
- traceback.format_exc())
- self.cache = {}
- return False
-
- def write_to_cache(self, data, filename):
- """ Writes data in JSON format to a specified file. """
-
- json_data = self.json_format_dict(data, self.args.pretty)
- cache = open(filename, 'w')
- cache.write(json_data)
- cache.close()
-
- def to_safe(self, word):
- """ Converts 'bad' characters in a string to underscores so they
- can be used as Ansible groups """
-
- return re.sub(r"[^A-Za-z0-9\-]", "_", word)
-
- def json_format_dict(self, data, pretty=False):
- """ Converts a dict to a JSON object and dumps it as a formatted string """
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-if __name__ in '__main__':
- inventory = CollinsInventory()
- if inventory.run():
- sys.exit(0)
- else:
- sys.exit(-1)
diff --git a/contrib/inventory/consul_io.ini b/contrib/inventory/consul_io.ini
deleted file mode 100644
index d18a1494dd..0000000000
--- a/contrib/inventory/consul_io.ini
+++ /dev/null
@@ -1,54 +0,0 @@
-# Ansible Consul external inventory script settings.
-
-[consul]
-
-#
-# Bulk load. Load all possible data before building inventory JSON
-# If true, script processes in-memory data. JSON generation reduces drastically
-#
-bulk_load = false
-
-# restrict included nodes to those from this datacenter
-#datacenter = nyc1
-
-# url of the consul cluster to query
-#url = http://demo.consul.io
-url = http://localhost:8500
-
-# suffix added to each service to create a group name e.g Service of 'redis' and
-# a suffix of '_servers' will add each address to the group name 'redis_servers'
-servers_suffix = _servers
-
-#
-# By default, final JSON is built based on all available info in consul.
-# Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info
-# There are cases when speed is preferable than having services groups
-# False value will reduce script execution time drastically.
-#
-suffixes = true
-
-# if specified then the inventory will generate domain names that will resolve
-# via Consul's inbuilt DNS.
-#domain=consul
-
-# make groups from service tags. the name of the group is derived from the
-# service name and the tag name e.g. a service named nginx with tags ['master', 'v1']
-# will create groups nginx_master and nginx_v1
-tags = true
-
-# looks up the node name at the given path for a list of groups to which the
-# node should be added.
-kv_groups=ansible/groups
-
-# looks up the node name at the given path for a json dictionary of metadata that
-# should be attached as metadata for the node
-kv_metadata=ansible/metadata
-
-# looks up the health of each service and adds the node to 'up' and 'down' groups
-# based on the service availability
-#
-# !!!! if availability is true, suffixes also must be true. !!!!
-#
-availability = true
-available_suffix = _up
-unavailable_suffix = _down
diff --git a/contrib/inventory/consul_io.py b/contrib/inventory/consul_io.py
deleted file mode 100755
index 4dad3eeec1..0000000000
--- a/contrib/inventory/consul_io.py
+++ /dev/null
@@ -1,537 +0,0 @@
-#!/usr/bin/env python
-
-#
-# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-'''
-Consul.io inventory script (http://consul.io)
-======================================
-
-Generates Ansible inventory from nodes in a Consul cluster. This script will
-group nodes by:
- - datacenter,
- - registered service
- - service tags
- - service status
- - values from the k/v store
-
-This script can be run with the switches
---list as expected groups all the nodes in all datacenters
---datacenter, to restrict the nodes to a single datacenter
---host to restrict the inventory to a single named node. (requires datacenter config)
-
-The configuration for this plugin is read from a consul_io.ini file located in the
-same directory as this inventory script. All config options in the config file
-are optional except the host and port, which must point to a valid agent or
-server running the http api. For more information on enabling the endpoint see.
-
-http://www.consul.io/docs/agent/options.html
-
-Other options include:
-
-'datacenter':
-
-which restricts the included nodes to those from the given datacenter
-This can also be set with the environmental variable CONSUL_DATACENTER
-
-'url':
-
-the URL of the Consul cluster. host, port and scheme are derived from the
-URL. If not specified, connection configuration defaults to http requests
-to localhost on port 8500.
-This can also be set with the environmental variable CONSUL_URL
-
-'domain':
-
-if specified then the inventory will generate domain names that will resolve
-via Consul's inbuilt DNS. The name is derived from the node name, datacenter
-and domain <node_name>.node.<datacenter>.<domain>. Note that you will need to
-have consul hooked into your DNS server for these to resolve. See the consul
-DNS docs for more info.
-
-which restricts the included nodes to those from the given datacenter
-
-'servers_suffix':
-
-defining the a suffix to add to the service name when creating the service
-group. e.g Service name of 'redis' and a suffix of '_servers' will add
-each nodes address to the group name 'redis_servers'. No suffix is added
-if this is not set
-
-'tags':
-
-boolean flag defining if service tags should be used to create Inventory
-groups e.g. an nginx service with the tags ['master', 'v1'] will create
-groups nginx_master and nginx_v1 to which the node running the service
-will be added. No tag groups are created if this is missing.
-
-'token':
-
-ACL token to use to authorize access to the key value store. May be required
-to retrieve the kv_groups and kv_metadata based on your consul configuration.
-
-'kv_groups':
-
-This is used to lookup groups for a node in the key value store. It specifies a
-path to which each discovered node's name will be added to create a key to query
-the key/value store. There it expects to find a comma separated list of group
-names to which the node should be added e.g. if the inventory contains node
-'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key
-'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query
- returned 'test,honeypot' then the node address to both groups.
-
-'kv_metadata':
-
-kv_metadata is used to lookup metadata for each discovered node. Like kv_groups
-above it is used to build a path to lookup in the kv store where it expects to
-find a json dictionary of metadata entries. If found, each key/value pair in the
-dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter
-'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key
-'ansible/metadata/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}'
-
-'availability':
-
-if true then availability groups will be created for each service. The node will
-be added to one of the groups based on the health status of the service. The
-group name is derived from the service name and the configurable availability
-suffixes
-
-'available_suffix':
-
-suffix that should be appended to the service availability groups for available
-services e.g. if the suffix is '_up' and the service is nginx, then nodes with
-healthy nginx services will be added to the nginix_up group. Defaults to
-'_available'
-
-'unavailable_suffix':
-
-as above but for unhealthy services, defaults to '_unavailable'
-
-Note that if the inventory discovers an 'ssh' service running on a node it will
-register the port as ansible_ssh_port in the node's metadata and this port will
-be used to access the machine.
-```
-
-'''
-
-import os
-import re
-import argparse
-import sys
-
-from ansible.module_utils.six.moves import configparser
-
-
-def get_log_filename():
- tty_filename = '/dev/tty'
- stdout_filename = '/dev/stdout'
-
- if not os.path.exists(tty_filename):
- return stdout_filename
- if not os.access(tty_filename, os.W_OK):
- return stdout_filename
- if os.getenv('TEAMCITY_VERSION'):
- return stdout_filename
-
- return tty_filename
-
-
-def setup_logging():
- filename = get_log_filename()
-
- import logging.config
- logging.config.dictConfig({
- 'version': 1,
- 'formatters': {
- 'simple': {
- 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
- },
- },
- 'root': {
- 'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'),
- 'handlers': ['console'],
- },
- 'handlers': {
- 'console': {
- 'class': 'logging.FileHandler',
- 'filename': filename,
- 'formatter': 'simple',
- },
- },
- 'loggers': {
- 'iso8601': {
- 'qualname': 'iso8601',
- 'level': 'INFO',
- },
- },
- })
- logger = logging.getLogger('consul_io.py')
- logger.debug('Invoked with %r', sys.argv)
-
-
-if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'):
- setup_logging()
-
-
-import json
-
-try:
- import consul
-except ImportError as e:
- sys.exit("""failed=True msg='python-consul required for this module.
-See https://python-consul.readthedocs.io/en/latest/#installation'""")
-
-from ansible.module_utils.six import iteritems
-
-
-class ConsulInventory(object):
-
- def __init__(self):
- ''' Create an inventory based on the catalog of nodes and services
- registered in a consul cluster'''
- self.node_metadata = {}
- self.nodes = {}
- self.nodes_by_service = {}
- self.nodes_by_tag = {}
- self.nodes_by_datacenter = {}
- self.nodes_by_kv = {}
- self.nodes_by_availability = {}
- self.current_dc = None
- self.inmemory_kv = []
- self.inmemory_nodes = []
-
- config = ConsulConfig()
- self.config = config
-
- self.consul_api = config.get_consul_api()
-
- if config.has_config('datacenter'):
- if config.has_config('host'):
- self.load_data_for_node(config.host, config.datacenter)
- else:
- self.load_data_for_datacenter(config.datacenter)
- else:
- self.load_all_data_consul()
-
- self.combine_all_results()
- print(json.dumps(self.inventory, sort_keys=True, indent=2))
-
- def bulk_load(self, datacenter):
- index, groups_list = self.consul_api.kv.get(self.config.kv_groups, recurse=True, dc=datacenter)
- index, metadata_list = self.consul_api.kv.get(self.config.kv_metadata, recurse=True, dc=datacenter)
- index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
- self.inmemory_kv += groups_list
- self.inmemory_kv += metadata_list
- self.inmemory_nodes += nodes
-
- def load_all_data_consul(self):
- ''' cycle through each of the datacenters in the consul catalog and process
- the nodes in each '''
- self.datacenters = self.consul_api.catalog.datacenters()
- for datacenter in self.datacenters:
- self.current_dc = datacenter
- self.bulk_load(datacenter)
- self.load_data_for_datacenter(datacenter)
-
- def load_availability_groups(self, node, datacenter):
- '''check the health of each service on a node and add the node to either
- an 'available' or 'unavailable' grouping. The suffix for each group can be
- controlled from the config'''
- if self.config.has_config('availability'):
- for service_name, service in iteritems(node['Services']):
- for node in self.consul_api.health.service(service_name)[1]:
- if self.is_service_available(node, service_name):
- suffix = self.config.get_availability_suffix(
- 'available_suffix', '_available')
- else:
- suffix = self.config.get_availability_suffix(
- 'unavailable_suffix', '_unavailable')
- self.add_node_to_map(self.nodes_by_availability,
- service_name + suffix, node['Node'])
-
- def is_service_available(self, node, service_name):
- '''check the availability of the service on the node beside ensuring the
- availability of the node itself'''
- consul_ok = service_ok = False
- for check in node['Checks']:
- if check['CheckID'] == 'serfHealth':
- consul_ok = check['Status'] == 'passing'
- elif check['ServiceName'] == service_name:
- service_ok = check['Status'] == 'passing'
- return consul_ok and service_ok
-
- def consul_get_kv_inmemory(self, key):
- result = filter(lambda x: x['Key'] == key, self.inmemory_kv)
- return result.pop() if result else None
-
- def consul_get_node_inmemory(self, node):
- result = filter(lambda x: x['Node'] == node, self.inmemory_nodes)
- return {"Node": result.pop(), "Services": {}} if result else None
-
- def load_data_for_datacenter(self, datacenter):
- '''processes all the nodes in a particular datacenter'''
- if self.config.bulk_load == 'true':
- nodes = self.inmemory_nodes
- else:
- index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
- for node in nodes:
- self.add_node_to_map(self.nodes_by_datacenter, datacenter, node)
- self.load_data_for_node(node['Node'], datacenter)
-
- def load_data_for_node(self, node, datacenter):
- '''loads the data for a single node adding it to various groups based on
- metadata retrieved from the kv store and service availability'''
-
- if self.config.suffixes == 'true':
- index, node_data = self.consul_api.catalog.node(node, dc=datacenter)
- else:
- node_data = self.consul_get_node_inmemory(node)
- node = node_data['Node']
-
- self.add_node_to_map(self.nodes, 'all', node)
- self.add_metadata(node_data, "consul_datacenter", datacenter)
- self.add_metadata(node_data, "consul_nodename", node['Node'])
-
- self.load_groups_from_kv(node_data)
- self.load_node_metadata_from_kv(node_data)
- if self.config.suffixes == 'true':
- self.load_availability_groups(node_data, datacenter)
- for name, service in node_data['Services'].items():
- self.load_data_from_service(name, service, node_data)
-
- def load_node_metadata_from_kv(self, node_data):
- ''' load the json dict at the metadata path defined by the kv_metadata value
- and the node name add each entry in the dictionary to the node's
- metadata '''
- node = node_data['Node']
- if self.config.has_config('kv_metadata'):
- key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node'])
- if self.config.bulk_load == 'true':
- metadata = self.consul_get_kv_inmemory(key)
- else:
- index, metadata = self.consul_api.kv.get(key)
- if metadata and metadata['Value']:
- try:
- metadata = json.loads(metadata['Value'])
- for k, v in metadata.items():
- self.add_metadata(node_data, k, v)
- except Exception:
- pass
-
- def load_groups_from_kv(self, node_data):
- ''' load the comma separated list of groups at the path defined by the
- kv_groups config value and the node name add the node address to each
- group found '''
- node = node_data['Node']
- if self.config.has_config('kv_groups'):
- key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node'])
- if self.config.bulk_load == 'true':
- groups = self.consul_get_kv_inmemory(key)
- else:
- index, groups = self.consul_api.kv.get(key)
- if groups and groups['Value']:
- for group in groups['Value'].split(','):
- self.add_node_to_map(self.nodes_by_kv, group.strip(), node)
-
- def load_data_from_service(self, service_name, service, node_data):
- '''process a service registered on a node, adding the node to a group with
- the service name. Each service tag is extracted and the node is added to a
- tag grouping also'''
- self.add_metadata(node_data, "consul_services", service_name, True)
-
- if self.is_service("ssh", service_name):
- self.add_metadata(node_data, "ansible_ssh_port", service['Port'])
-
- if self.config.has_config('servers_suffix'):
- service_name = service_name + self.config.servers_suffix
-
- self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node'])
- self.extract_groups_from_tags(service_name, service, node_data)
-
- def is_service(self, target, name):
- return name and (name.lower() == target.lower())
-
- def extract_groups_from_tags(self, service_name, service, node_data):
- '''iterates each service tag and adds the node to groups derived from the
- service and tag names e.g. nginx_master'''
- if self.config.has_config('tags') and service['Tags']:
- tags = service['Tags']
- self.add_metadata(node_data, "consul_%s_tags" % service_name, tags)
- for tag in service['Tags']:
- tagname = service_name + '_' + tag
- self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node'])
-
- def combine_all_results(self):
- '''prunes and sorts all groupings for combination into the final map'''
- self.inventory = {"_meta": {"hostvars": self.node_metadata}}
- groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service,
- self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
- for grouping in groupings:
- for name, addresses in grouping.items():
- self.inventory[name] = sorted(list(set(addresses)))
-
- def add_metadata(self, node_data, key, value, is_list=False):
- ''' Pushed an element onto a metadata dict for the node, creating
- the dict if it doesn't exist '''
- key = self.to_safe(key)
- node = self.get_inventory_name(node_data['Node'])
-
- if node in self.node_metadata:
- metadata = self.node_metadata[node]
- else:
- metadata = {}
- self.node_metadata[node] = metadata
- if is_list:
- self.push(metadata, key, value)
- else:
- metadata[key] = value
-
- def get_inventory_name(self, node_data):
- '''return the ip or a node name that can be looked up in consul's dns'''
- domain = self.config.domain
- if domain:
- node_name = node_data['Node']
- if self.current_dc:
- return '%s.node.%s.%s' % (node_name, self.current_dc, domain)
- else:
- return '%s.node.%s' % (node_name, domain)
- else:
- return node_data['Address']
-
- def add_node_to_map(self, map, name, node):
- self.push(map, name, self.get_inventory_name(node))
-
- def push(self, my_dict, key, element):
- ''' Pushed an element onto an array that may not have been defined in the
- dict '''
- key = self.to_safe(key)
- if key in my_dict:
- my_dict[key].append(element)
- else:
- my_dict[key] = [element]
-
- def to_safe(self, word):
- ''' Converts 'bad' characters in a string to underscores so they can be used
- as Ansible groups '''
- return re.sub(r'[^A-Za-z0-9\-\.]', '_', word)
-
- def sanitize_dict(self, d):
-
- new_dict = {}
- for k, v in d.items():
- if v is not None:
- new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
- return new_dict
-
- def sanitize_list(self, seq):
- new_seq = []
- for d in seq:
- new_seq.append(self.sanitize_dict(d))
- return new_seq
-
-
-class ConsulConfig(dict):
-
- def __init__(self):
- self.read_settings()
- self.read_cli_args()
- self.read_env_vars()
-
- def has_config(self, name):
- if hasattr(self, name):
- return getattr(self, name)
- else:
- return False
-
- def read_settings(self):
- ''' Reads the settings from the consul_io.ini file (or consul.ini for backwards compatibility)'''
- config = configparser.SafeConfigParser()
- if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini'):
- config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini')
- else:
- config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini')
-
- config_options = ['host', 'token', 'datacenter', 'servers_suffix',
- 'tags', 'kv_metadata', 'kv_groups', 'availability',
- 'unavailable_suffix', 'available_suffix', 'url',
- 'domain', 'suffixes', 'bulk_load']
- for option in config_options:
- value = None
- if config.has_option('consul', option):
- value = config.get('consul', option).lower()
- setattr(self, option, value)
-
- def read_cli_args(self):
- ''' Command line argument processing '''
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster')
-
- parser.add_argument('--list', action='store_true',
- help='Get all inventory variables from all nodes in the consul cluster')
- parser.add_argument('--host', action='store',
- help='Get all inventory variables about a specific consul node,'
- 'requires datacenter set in consul.ini.')
- parser.add_argument('--datacenter', action='store',
- help='Get all inventory about a specific consul datacenter')
-
- args = parser.parse_args()
- arg_names = ['host', 'datacenter']
-
- for arg in arg_names:
- if getattr(args, arg):
- setattr(self, arg, getattr(args, arg))
-
- def read_env_vars(self):
- env_var_options = ['datacenter', 'url']
- for option in env_var_options:
- value = None
- env_var = 'CONSUL_' + option.upper()
- if os.environ.get(env_var):
- setattr(self, option, os.environ.get(env_var))
-
- def get_availability_suffix(self, suffix, default):
- if self.has_config(suffix):
- return self.has_config(suffix)
- return default
-
- def get_consul_api(self):
- '''get an instance of the api based on the supplied configuration'''
- host = 'localhost'
- port = 8500
- token = None
- scheme = 'http'
-
- if hasattr(self, 'url'):
- from ansible.module_utils.six.moves.urllib.parse import urlparse
- o = urlparse(self.url)
- if o.hostname:
- host = o.hostname
- if o.port:
- port = o.port
- if o.scheme:
- scheme = o.scheme
-
- if hasattr(self, 'token'):
- token = self.token
- if not token:
- token = 'anonymous'
- return consul.Consul(host=host, port=port, token=token, scheme=scheme)
-
-
-ConsulInventory()
diff --git a/contrib/inventory/digital_ocean.ini b/contrib/inventory/digital_ocean.ini
deleted file mode 100644
index b809554b20..0000000000
--- a/contrib/inventory/digital_ocean.ini
+++ /dev/null
@@ -1,34 +0,0 @@
-# Ansible DigitalOcean external inventory script settings
-#
-
-[digital_ocean]
-
-# The module needs your DigitalOcean API Token.
-# It may also be specified on the command line via --api-token
-# or via the environment variables DO_API_TOKEN or DO_API_KEY
-#
-#api_token = 123456abcdefg
-
-
-# API calls to DigitalOcean may be slow. For this reason, we cache the results
-# of an API call. Set this to the path you want cache files to be written to.
-# One file will be written to this directory:
-# - ansible-digital_ocean.cache
-#
-cache_path = /tmp
-
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-#
-cache_max_age = 300
-
-# Use the private network IP address instead of the public when available.
-#
-use_private_network = False
-
-# Pass variables to every group, e.g.:
-#
-# group_variables = { 'ansible_user': 'root' }
-#
-group_variables = {}
diff --git a/contrib/inventory/digital_ocean.py b/contrib/inventory/digital_ocean.py
deleted file mode 100755
index eecdb85c1c..0000000000
--- a/contrib/inventory/digital_ocean.py
+++ /dev/null
@@ -1,551 +0,0 @@
-#!/usr/bin/env python
-
-"""
-DigitalOcean external inventory script
-======================================
-
-Generates Ansible inventory of DigitalOcean Droplets.
-
-In addition to the --list and --host options used by Ansible, there are options
-for generating JSON of other DigitalOcean data. This is useful when creating
-droplets. For example, --regions will return all the DigitalOcean Regions.
-This information can also be easily found in the cache file, whose default
-location is /tmp/ansible-digital_ocean.cache).
-
-The --pretty (-p) option pretty-prints the output for better human readability.
-
-----
-Although the cache stores all the information received from DigitalOcean,
-the cache is not used for current droplet information (in --list, --host,
---all, and --droplets). This is so that accurate droplet information is always
-found. You can force this script to use the cache with --force-cache.
-
-----
-Configuration is read from `digital_ocean.ini`, then from environment variables,
-and then from command-line arguments.
-
-Most notably, the DigitalOcean API Token must be specified. It can be specified
-in the INI file or with the following environment variables:
- export DO_API_TOKEN='abc123' or
- export DO_API_KEY='abc123'
-
-Alternatively, it can be passed on the command-line with --api-token.
-
-If you specify DigitalOcean credentials in the INI file, a handy way to
-get them into your environment (e.g., to use the digital_ocean module)
-is to use the output of the --env option with export:
- export $(digital_ocean.py --env)
-
-----
-The following groups are generated from --list:
- - ID (droplet ID)
- - NAME (droplet NAME)
- - digital_ocean
- - image_ID
- - image_NAME
- - distro_NAME (distribution NAME from image)
- - region_NAME
- - size_NAME
- - status_STATUS
-
-For each host, the following variables are registered:
- - do_backup_ids
- - do_created_at
- - do_disk
- - do_features - list
- - do_id
- - do_image - object
- - do_ip_address
- - do_private_ip_address
- - do_kernel - object
- - do_locked
- - do_memory
- - do_name
- - do_networks - object
- - do_next_backup_window
- - do_region - object
- - do_size - object
- - do_size_slug
- - do_snapshot_ids - list
- - do_status
- - do_tags
- - do_vcpus
- - do_volume_ids
-
------
-```
-usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets]
- [--regions] [--images] [--sizes] [--ssh-keys]
- [--domains] [--tags] [--pretty]
- [--cache-path CACHE_PATH]
- [--cache-max_age CACHE_MAX_AGE] [--force-cache]
- [--refresh-cache] [--env] [--api-token API_TOKEN]
-
-Produce an Ansible Inventory file based on DigitalOcean credentials
-
-optional arguments:
- -h, --help show this help message and exit
- --list List all active Droplets as Ansible inventory
- (default: True)
- --host HOST Get all Ansible inventory variables about a specific
- Droplet
- --all List all DigitalOcean information as JSON
- --droplets, -d List Droplets as JSON
- --regions List Regions as JSON
- --images List Images as JSON
- --sizes List Sizes as JSON
- --ssh-keys List SSH keys as JSON
- --domains List Domains as JSON
- --tags List Tags as JSON
- --pretty, -p Pretty-print results
- --cache-path CACHE_PATH
- Path to the cache files (default: .)
- --cache-max_age CACHE_MAX_AGE
- Maximum age of the cached items (default: 0)
- --force-cache Only use data from the cache
- --refresh-cache, -r Force refresh of cache by making API requests to
- DigitalOcean (default: False - use cache files)
- --env, -e Display DO_API_TOKEN
- --api-token API_TOKEN, -a API_TOKEN
- DigitalOcean API Token
-```
-
-"""
-
-# (c) 2013, Evan Wies <evan@neomantra.net>
-# (c) 2017, Ansible Project
-# (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
-#
-# Inspired by the EC2 inventory plugin:
-# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-import argparse
-import ast
-import os
-import re
-import requests
-import sys
-from time import time
-
-try:
- import ConfigParser
-except ImportError:
- import configparser as ConfigParser
-
-import json
-
-
-class DoManager:
- def __init__(self, api_token):
- self.api_token = api_token
- self.api_endpoint = 'https://api.digitalocean.com/v2'
- self.headers = {'Authorization': 'Bearer {0}'.format(self.api_token),
- 'Content-type': 'application/json'}
- self.timeout = 60
-
- def _url_builder(self, path):
- if path[0] == '/':
- path = path[1:]
- return '%s/%s' % (self.api_endpoint, path)
-
- def send(self, url, method='GET', data=None):
- url = self._url_builder(url)
- data = json.dumps(data)
- try:
- if method == 'GET':
- resp_data = {}
- incomplete = True
- while incomplete:
- resp = requests.get(url, data=data, headers=self.headers, timeout=self.timeout)
- json_resp = resp.json()
-
- for key, value in json_resp.items():
- if isinstance(value, list) and key in resp_data:
- resp_data[key] += value
- else:
- resp_data[key] = value
-
- try:
- url = json_resp['links']['pages']['next']
- except KeyError:
- incomplete = False
-
- except ValueError as e:
- sys.exit("Unable to parse result from %s: %s" % (url, e))
- return resp_data
-
- def all_active_droplets(self):
- resp = self.send('droplets/')
- return resp['droplets']
-
- def all_regions(self):
- resp = self.send('regions/')
- return resp['regions']
-
- def all_images(self, filter_name='global'):
- params = {'filter': filter_name}
- resp = self.send('images/', data=params)
- return resp['images']
-
- def sizes(self):
- resp = self.send('sizes/')
- return resp['sizes']
-
- def all_ssh_keys(self):
- resp = self.send('account/keys')
- return resp['ssh_keys']
-
- def all_domains(self):
- resp = self.send('domains/')
- return resp['domains']
-
- def show_droplet(self, droplet_id):
- resp = self.send('droplets/%s' % droplet_id)
- return resp['droplet']
-
- def all_tags(self):
- resp = self.send('tags')
- return resp['tags']
-
-
-class DigitalOceanInventory(object):
-
- ###########################################################################
- # Main execution path
- ###########################################################################
-
- def __init__(self):
- """Main execution path """
-
- # DigitalOceanInventory data
- self.data = {} # All DigitalOcean data
- self.inventory = {} # Ansible Inventory
-
- # Define defaults
- self.cache_path = '.'
- self.cache_max_age = 0
- self.use_private_network = False
- self.group_variables = {}
-
- # Read settings, environment variables, and CLI arguments
- self.read_settings()
- self.read_environment()
- self.read_cli_args()
-
- # Verify credentials were set
- if not hasattr(self, 'api_token'):
- msg = 'Could not find values for DigitalOcean api_token. They must be specified via either ini file, ' \
- 'command line argument (--api-token), or environment variables (DO_API_TOKEN)\n'
- sys.stderr.write(msg)
- sys.exit(-1)
-
- # env command, show DigitalOcean credentials
- if self.args.env:
- print("DO_API_TOKEN=%s" % self.api_token)
- sys.exit(0)
-
- # Manage cache
- self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache"
- self.cache_refreshed = False
-
- if self.is_cache_valid():
- self.load_from_cache()
- if len(self.data) == 0:
- if self.args.force_cache:
- sys.stderr.write('Cache is empty and --force-cache was specified\n')
- sys.exit(-1)
-
- self.manager = DoManager(self.api_token)
-
- # Pick the json_data to print based on the CLI command
- if self.args.droplets:
- self.load_from_digital_ocean('droplets')
- json_data = {'droplets': self.data['droplets']}
- elif self.args.regions:
- self.load_from_digital_ocean('regions')
- json_data = {'regions': self.data['regions']}
- elif self.args.images:
- self.load_from_digital_ocean('images')
- json_data = {'images': self.data['images']}
- elif self.args.sizes:
- self.load_from_digital_ocean('sizes')
- json_data = {'sizes': self.data['sizes']}
- elif self.args.ssh_keys:
- self.load_from_digital_ocean('ssh_keys')
- json_data = {'ssh_keys': self.data['ssh_keys']}
- elif self.args.domains:
- self.load_from_digital_ocean('domains')
- json_data = {'domains': self.data['domains']}
- elif self.args.tags:
- self.load_from_digital_ocean('tags')
- json_data = {'tags': self.data['tags']}
- elif self.args.all:
- self.load_from_digital_ocean()
- json_data = self.data
- elif self.args.host:
- json_data = self.load_droplet_variables_for_host()
- else: # '--list' this is last to make it default
- self.load_from_digital_ocean('droplets')
- self.build_inventory()
- json_data = self.inventory
-
- if self.cache_refreshed:
- self.write_to_cache()
-
- if self.args.pretty:
- print(json.dumps(json_data, indent=2))
- else:
- print(json.dumps(json_data))
-
- ###########################################################################
- # Script configuration
- ###########################################################################
-
- def read_settings(self):
- """ Reads the settings from the digital_ocean.ini file """
- config = ConfigParser.ConfigParser()
- config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'digital_ocean.ini')
- config.read(config_path)
-
- # Credentials
- if config.has_option('digital_ocean', 'api_token'):
- self.api_token = config.get('digital_ocean', 'api_token')
-
- # Cache related
- if config.has_option('digital_ocean', 'cache_path'):
- self.cache_path = config.get('digital_ocean', 'cache_path')
- if config.has_option('digital_ocean', 'cache_max_age'):
- self.cache_max_age = config.getint('digital_ocean', 'cache_max_age')
-
- # Private IP Address
- if config.has_option('digital_ocean', 'use_private_network'):
- self.use_private_network = config.getboolean('digital_ocean', 'use_private_network')
-
- # Group variables
- if config.has_option('digital_ocean', 'group_variables'):
- self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables'))
-
- def read_environment(self):
- """ Reads the settings from environment variables """
- # Setup credentials
- if os.getenv("DO_API_TOKEN"):
- self.api_token = os.getenv("DO_API_TOKEN")
- if os.getenv("DO_API_KEY"):
- self.api_token = os.getenv("DO_API_KEY")
-
- def read_cli_args(self):
- """ Command line argument processing """
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')
-
- parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)')
- parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')
-
- parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')
- parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON')
- parser.add_argument('--regions', action='store_true', help='List Regions as JSON')
- parser.add_argument('--images', action='store_true', help='List Images as JSON')
- parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')
- parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')
- parser.add_argument('--domains', action='store_true', help='List Domains as JSON')
- parser.add_argument('--tags', action='store_true', help='List Tags as JSON')
-
- parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results')
-
- parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
- parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
- parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
- parser.add_argument('--refresh-cache', '-r', action='store_true', default=False,
- help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
-
- parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN')
- parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token')
-
- self.args = parser.parse_args()
-
- if self.args.api_token:
- self.api_token = self.args.api_token
-
- # Make --list default if none of the other commands are specified
- if (not self.args.droplets and not self.args.regions and
- not self.args.images and not self.args.sizes and
- not self.args.ssh_keys and not self.args.domains and
- not self.args.tags and
- not self.args.all and not self.args.host):
- self.args.list = True
-
- ###########################################################################
- # Data Management
- ###########################################################################
-
- def load_from_digital_ocean(self, resource=None):
- """Get JSON from DigitalOcean API """
- if self.args.force_cache and os.path.isfile(self.cache_filename):
- return
- # We always get fresh droplets
- if self.is_cache_valid() and not (resource == 'droplets' or resource is None):
- return
- if self.args.refresh_cache:
- resource = None
-
- if resource == 'droplets' or resource is None:
- self.data['droplets'] = self.manager.all_active_droplets()
- self.cache_refreshed = True
- if resource == 'regions' or resource is None:
- self.data['regions'] = self.manager.all_regions()
- self.cache_refreshed = True
- if resource == 'images' or resource is None:
- self.data['images'] = self.manager.all_images()
- self.cache_refreshed = True
- if resource == 'sizes' or resource is None:
- self.data['sizes'] = self.manager.sizes()
- self.cache_refreshed = True
- if resource == 'ssh_keys' or resource is None:
- self.data['ssh_keys'] = self.manager.all_ssh_keys()
- self.cache_refreshed = True
- if resource == 'domains' or resource is None:
- self.data['domains'] = self.manager.all_domains()
- self.cache_refreshed = True
- if resource == 'tags' or resource is None:
- self.data['tags'] = self.manager.all_tags()
- self.cache_refreshed = True
-
- def add_inventory_group(self, key):
- """ Method to create group dict """
- host_dict = {'hosts': [], 'vars': {}}
- self.inventory[key] = host_dict
- return
-
- def add_host(self, group, host):
- """ Helper method to reduce host duplication """
- if group not in self.inventory:
- self.add_inventory_group(group)
-
- if host not in self.inventory[group]['hosts']:
- self.inventory[group]['hosts'].append(host)
- return
-
- def build_inventory(self):
- """ Build Ansible inventory of droplets """
- self.inventory = {
- 'all': {
- 'hosts': [],
- 'vars': self.group_variables
- },
- '_meta': {'hostvars': {}}
- }
-
- # add all droplets by id and name
- for droplet in self.data['droplets']:
- for net in droplet['networks']['v4']:
- if net['type'] == 'public':
- dest = net['ip_address']
- else:
- continue
-
- self.inventory['all']['hosts'].append(dest)
-
- self.add_host(droplet['id'], dest)
-
- self.add_host(droplet['name'], dest)
-
- # groups that are always present
- for group in ('digital_ocean',
- 'region_' + droplet['region']['slug'],
- 'image_' + str(droplet['image']['id']),
- 'size_' + droplet['size']['slug'],
- 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),
- 'status_' + droplet['status']):
- self.add_host(group, dest)
-
- # groups that are not always present
- for group in (droplet['image']['slug'],
- droplet['image']['name']):
- if group:
- image = 'image_' + DigitalOceanInventory.to_safe(group)
- self.add_host(image, dest)
-
- if droplet['tags']:
- for tag in droplet['tags']:
- self.add_host(tag, dest)
-
- # hostvars
- info = self.do_namespace(droplet)
- self.inventory['_meta']['hostvars'][dest] = info
-
- def load_droplet_variables_for_host(self):
- """ Generate a JSON response to a --host call """
- host = int(self.args.host)
- droplet = self.manager.show_droplet(host)
- info = self.do_namespace(droplet)
- return {'droplet': info}
-
- ###########################################################################
- # Cache Management
- ###########################################################################
-
- def is_cache_valid(self):
- """ Determines if the cache files have expired, or if it is still valid """
- if os.path.isfile(self.cache_filename):
- mod_time = os.path.getmtime(self.cache_filename)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- return True
- return False
-
- def load_from_cache(self):
- """ Reads the data from the cache file and assigns it to member variables as Python Objects """
- try:
- with open(self.cache_filename, 'r') as cache:
- json_data = cache.read()
- data = json.loads(json_data)
- except IOError:
- data = {'data': {}, 'inventory': {}}
-
- self.data = data['data']
- self.inventory = data['inventory']
-
- def write_to_cache(self):
- """ Writes data in JSON format to a file """
- data = {'data': self.data, 'inventory': self.inventory}
- json_data = json.dumps(data, indent=2)
-
- with open(self.cache_filename, 'w') as cache:
- cache.write(json_data)
-
- ###########################################################################
- # Utilities
- ###########################################################################
- @staticmethod
- def to_safe(word):
- """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
- return re.sub(r"[^A-Za-z0-9\-.]", "_", word)
-
- @staticmethod
- def do_namespace(data):
- """ Returns a copy of the dictionary with all the keys put in a 'do_' namespace """
- info = {}
- for k, v in data.items():
- info['do_' + k] = v
- return info
-
-
-###########################################################################
-# Run the script
-DigitalOceanInventory()
diff --git a/contrib/inventory/docker.py b/contrib/inventory/docker.py
deleted file mode 100755
index 489c820a23..0000000000
--- a/contrib/inventory/docker.py
+++ /dev/null
@@ -1,905 +0,0 @@
-#!/usr/bin/env python
-#
-# (c) 2016 Paul Durivage <paul.durivage@gmail.com>
-# Chris Houseknecht <house@redhat.com>
-# James Tanner <jtanner@redhat.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-
-Docker Inventory Script
-=======================
-The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
-because the inventory is generated at run-time rather than being read from a static file. The script generates the
-inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
-script contacts can be defined using environment variables or a configuration file.
-
-Requirements
-------------
-
-Using the docker modules requires having docker-py <https://docker-py.readthedocs.io/en/stable/>
-installed on the host running Ansible. To install docker-py:
-
- pip install docker-py
-
-
-Run for Specific Host
----------------------
-When run for a specific container using the --host option this script returns the following hostvars:
-
-{
- "ansible_ssh_host": "",
- "ansible_ssh_port": 0,
- "docker_apparmorprofile": "",
- "docker_args": [],
- "docker_config": {
- "AttachStderr": false,
- "AttachStdin": false,
- "AttachStdout": false,
- "Cmd": [
- "/hello"
- ],
- "Domainname": "",
- "Entrypoint": null,
- "Env": null,
- "Hostname": "9f2f80b0a702",
- "Image": "hello-world",
- "Labels": {},
- "OnBuild": null,
- "OpenStdin": false,
- "StdinOnce": false,
- "Tty": false,
- "User": "",
- "Volumes": null,
- "WorkingDir": ""
- },
- "docker_created": "2016-04-18T02:05:59.659599249Z",
- "docker_driver": "aufs",
- "docker_execdriver": "native-0.2",
- "docker_execids": null,
- "docker_graphdriver": {
- "Data": null,
- "Name": "aufs"
- },
- "docker_hostconfig": {
- "Binds": null,
- "BlkioWeight": 0,
- "CapAdd": null,
- "CapDrop": null,
- "CgroupParent": "",
- "ConsoleSize": [
- 0,
- 0
- ],
- "ContainerIDFile": "",
- "CpuPeriod": 0,
- "CpuQuota": 0,
- "CpuShares": 0,
- "CpusetCpus": "",
- "CpusetMems": "",
- "Devices": null,
- "Dns": null,
- "DnsOptions": null,
- "DnsSearch": null,
- "ExtraHosts": null,
- "GroupAdd": null,
- "IpcMode": "",
- "KernelMemory": 0,
- "Links": null,
- "LogConfig": {
- "Config": {},
- "Type": "json-file"
- },
- "LxcConf": null,
- "Memory": 0,
- "MemoryReservation": 0,
- "MemorySwap": 0,
- "MemorySwappiness": null,
- "NetworkMode": "default",
- "OomKillDisable": false,
- "PidMode": "host",
- "PortBindings": null,
- "Privileged": false,
- "PublishAllPorts": false,
- "ReadonlyRootfs": false,
- "RestartPolicy": {
- "MaximumRetryCount": 0,
- "Name": ""
- },
- "SecurityOpt": [
- "label:disable"
- ],
- "UTSMode": "",
- "Ulimits": null,
- "VolumeDriver": "",
- "VolumesFrom": null
- },
- "docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname",
- "docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts",
- "docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14",
- "docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7",
- "docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log",
- "docker_mountlabel": "",
- "docker_mounts": [],
- "docker_name": "/hello-world",
- "docker_networksettings": {
- "Bridge": "",
- "EndpointID": "",
- "Gateway": "",
- "GlobalIPv6Address": "",
- "GlobalIPv6PrefixLen": 0,
- "HairpinMode": false,
- "IPAddress": "",
- "IPPrefixLen": 0,
- "IPv6Gateway": "",
- "LinkLocalIPv6Address": "",
- "LinkLocalIPv6PrefixLen": 0,
- "MacAddress": "",
- "Networks": {
- "bridge": {
- "EndpointID": "",
- "Gateway": "",
- "GlobalIPv6Address": "",
- "GlobalIPv6PrefixLen": 0,
- "IPAddress": "",
- "IPPrefixLen": 0,
- "IPv6Gateway": "",
- "MacAddress": ""
- }
- },
- "Ports": null,
- "SandboxID": "",
- "SandboxKey": "",
- "SecondaryIPAddresses": null,
- "SecondaryIPv6Addresses": null
- },
- "docker_path": "/hello",
- "docker_processlabel": "",
- "docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf",
- "docker_restartcount": 0,
- "docker_short_id": "9f2f80b0a7023",
- "docker_state": {
- "Dead": false,
- "Error": "",
- "ExitCode": 0,
- "FinishedAt": "2016-04-18T02:06:00.296619369Z",
- "OOMKilled": false,
- "Paused": false,
- "Pid": 0,
- "Restarting": false,
- "Running": false,
- "StartedAt": "2016-04-18T02:06:00.272065041Z",
- "Status": "exited"
- }
-}
-
-Groups
-------
-When run in --list mode (the default), container instances are grouped by:
-
- - container id
- - container name
- - container short id
- - image_name (image_<image name>)
- - stack_name (stack_<stack name>)
- - service_name (service_<service name>)
- - docker_host
- - running
- - stopped
-
-
-Configuration:
---------------
-You can control the behavior of the inventory script by passing arguments, defining environment variables, or
-creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence
-is command line args, then the docker.yml file and finally environment variables.
-
-Environment variables:
-......................
-
-To connect to a single Docker API the following variables can be defined in the environment to control the connection
-options. These are the same environment variables used by the Docker modules.
-
- DOCKER_HOST
- The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
-
- DOCKER_API_VERSION:
- The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
- by docker-py.
-
- DOCKER_TIMEOUT:
- The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
-
- DOCKER_TLS:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
- Defaults to False.
-
- DOCKER_TLS_VERIFY:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
- Default is False
-
- DOCKER_TLS_HOSTNAME:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
- to localhost.
-
- DOCKER_CERT_PATH:
- Path to the directory containing the client certificate, client key and CA certificate.
-
- DOCKER_SSL_VERSION:
- Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
- was 1.0
-
-In addition to the connection variables there are a couple variables used to control the execution and output of the
-script:
-
- DOCKER_CONFIG_FILE
- Path to the configuration file. Defaults to ./docker.yml.
-
- DOCKER_PRIVATE_SSH_PORT:
- The private port (container port) on which SSH is listening for connections. Defaults to 22.
-
- DOCKER_DEFAULT_IP:
- The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
-
-
-Configuration File
-..................
-
-Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
-
-The default name of the file is derived from the name of the inventory script. By default the script will look for
-basename of the script (i.e. docker) with an extension of '.yml'.
-
-You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
-
-Here's what you can define in docker_inventory.yml:
-
- defaults
- Defines a default connection. Defaults will be taken from this and applied to any values not provided
- for a host defined in the hosts list.
-
- hosts
- If you wish to get inventory from more than one Docker host, define a hosts list.
-
-For the default host and each host in the hosts list define the following attributes:
-
- host:
- description: The URL or Unix socket path used to connect to the Docker API.
- required: yes
-
- tls:
- description: Connect using TLS without verifying the authenticity of the Docker host server.
- default: false
- required: false
-
- tls_verify:
- description: Connect using TLS without verifying the authenticity of the Docker host server.
- default: false
- required: false
-
- cert_path:
- description: Path to the client's TLS certificate file.
- default: null
- required: false
-
- cacert_path:
- description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
- default: null
- required: false
-
- key_path:
- description: Path to the client's TLS key file.
- default: null
- required: false
-
- version:
- description: The Docker API version.
- required: false
- default: will be supplied by the docker-py module.
-
- timeout:
- description: The amount of time in seconds to wait on an API response.
- required: false
- default: 60
-
- default_ip:
- description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
- '0.0.0.0'.
- required: false
- default: 127.0.0.1
-
- private_ssh_port:
- description: The port containers use for SSH
- required: false
- default: 22
-
-Examples
---------
-
-# Connect to the Docker API on localhost port 4243 and format the JSON output
-DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
-
-# Any container's ssh port exposed on 0.0.0.0 will be mapped to
-# another IP address (where Ansible will attempt to connect via SSH)
-DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
-
-# Run as input to a playbook:
-ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
-
-# Simple playbook to invoke with the above example:
-
- - name: Test docker_inventory
- hosts: all
- connection: local
- gather_facts: no
- tasks:
- - debug: msg="Container - {{ inventory_hostname }}"
-
-'''
-
-import os
-import sys
-import json
-import argparse
-import re
-import yaml
-
-from collections import defaultdict
-# Manipulation of the path is needed because the docker-py
-# module is imported by the name docker, and because this file
-# is also named docker
-for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
- try:
- del sys.path[sys.path.index(path)]
- except Exception:
- pass
-
-HAS_DOCKER_PY = True
-HAS_DOCKER_ERROR = False
-
-try:
- from docker.errors import APIError, TLSParameterError
- from docker.tls import TLSConfig
- from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
-except ImportError as exc:
- HAS_DOCKER_ERROR = str(exc)
- HAS_DOCKER_PY = False
-
-# Client has recently been split into DockerClient and APIClient
-try:
- from docker import Client
-except ImportError as dummy:
- try:
- from docker import APIClient as Client
- except ImportError as exc:
- HAS_DOCKER_ERROR = str(exc)
- HAS_DOCKER_PY = False
-
- class Client:
- pass
-
-DEFAULT_DOCKER_CONFIG_FILE = os.path.splitext(os.path.basename(__file__))[0] + '.yml'
-DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
-DEFAULT_TLS = False
-DEFAULT_TLS_VERIFY = False
-DEFAULT_TLS_HOSTNAME = "localhost"
-DEFAULT_IP = '127.0.0.1'
-DEFAULT_SSH_PORT = '22'
-
-BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
-BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
-
-
-DOCKER_ENV_ARGS = dict(
- config_file='DOCKER_CONFIG_FILE',
- docker_host='DOCKER_HOST',
- api_version='DOCKER_API_VERSION',
- cert_path='DOCKER_CERT_PATH',
- ssl_version='DOCKER_SSL_VERSION',
- tls='DOCKER_TLS',
- tls_verify='DOCKER_TLS_VERIFY',
- tls_hostname='DOCKER_TLS_HOSTNAME',
- timeout='DOCKER_TIMEOUT',
- private_ssh_port='DOCKER_DEFAULT_SSH_PORT',
- default_ip='DOCKER_DEFAULT_IP',
-)
-
-
-def fail(msg):
- sys.stderr.write("%s\n" % msg)
- sys.exit(1)
-
-
-def log(msg, pretty_print=False):
- if pretty_print:
- print(json.dumps(msg, sort_keys=True, indent=2))
- else:
- print(msg + u'\n')
-
-
-class AnsibleDockerClient(Client):
- def __init__(self, auth_params, debug):
-
- self.auth_params = auth_params
- self.debug = debug
- self._connect_params = self._get_connect_params()
-
- try:
- super(AnsibleDockerClient, self).__init__(**self._connect_params)
- except APIError as exc:
- self.fail("Docker API error: %s" % exc)
- except Exception as exc:
- self.fail("Error connecting: %s" % exc)
-
- def fail(self, msg):
- fail(msg)
-
- def log(self, msg, pretty_print=False):
- if self.debug:
- log(msg, pretty_print)
-
- def _get_tls_config(self, **kwargs):
- self.log("get_tls_config:")
- for key in kwargs:
- self.log(" %s: %s" % (key, kwargs[key]))
- try:
- tls_config = TLSConfig(**kwargs)
- return tls_config
- except TLSParameterError as exc:
- self.fail("TLS config error: %s" % exc)
-
- def _get_connect_params(self):
- auth = self.auth_params
-
- self.log("auth params:")
- for key in auth:
- self.log(" %s: %s" % (key, auth[key]))
-
- if auth['tls'] or auth['tls_verify']:
- auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
-
- if auth['tls'] and auth['cert_path'] and auth['key_path']:
- # TLS with certs and no host verification
- tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
- verify=False,
- ssl_version=auth['ssl_version'])
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- if auth['tls']:
- # TLS with no certs and not host verification
- tls_config = self._get_tls_config(verify=False,
- ssl_version=auth['ssl_version'])
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
- # TLS with certs and host verification
- if auth['cacert_path']:
- tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
- ca_cert=auth['cacert_path'],
- verify=True,
- assert_hostname=auth['tls_hostname'],
- ssl_version=auth['ssl_version'])
- else:
- tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
- verify=True,
- assert_hostname=auth['tls_hostname'],
- ssl_version=auth['ssl_version'])
-
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- if auth['tls_verify'] and auth['cacert_path']:
- # TLS with cacert only
- tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
- assert_hostname=auth['tls_hostname'],
- verify=True,
- ssl_version=auth['ssl_version'])
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- if auth['tls_verify']:
- # TLS with verify and no certs
- tls_config = self._get_tls_config(verify=True,
- assert_hostname=auth['tls_hostname'],
- ssl_version=auth['ssl_version'])
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
- # No TLS
- return dict(base_url=auth['docker_host'],
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- def _handle_ssl_error(self, error):
- match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
- if match:
- msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
- "Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
- "You may also use TLS without verification by setting the tls parameter to true." \
- % (self.auth_params['tls_hostname'], match.group(1), match.group(1))
- self.fail(msg)
- self.fail("SSL Exception: %s" % (error))
-
-
-class EnvArgs(object):
- def __init__(self):
- self.config_file = None
- self.docker_host = None
- self.api_version = None
- self.cert_path = None
- self.ssl_version = None
- self.tls = None
- self.tls_verify = None
- self.tls_hostname = None
- self.timeout = None
- self.default_ssh_port = None
- self.default_ip = None
-
-
-class DockerInventory(object):
-
- def __init__(self):
- self._args = self._parse_cli_args()
- self._env_args = self._parse_env_args()
- self.groups = defaultdict(list)
- self.hostvars = defaultdict(dict)
-
- def run(self):
- config_from_file = self._parse_config_file()
- if not config_from_file:
- config_from_file = dict()
- docker_hosts = self.get_hosts(config_from_file)
-
- for host in docker_hosts:
- client = AnsibleDockerClient(host, self._args.debug)
- self.get_inventory(client, host)
-
- if not self._args.host:
- self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts]
- self.groups['_meta'] = dict(
- hostvars=self.hostvars
- )
- print(self._json_format_dict(self.groups, pretty_print=self._args.pretty))
- else:
- print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty))
-
- sys.exit(0)
-
- def get_inventory(self, client, host):
-
- ssh_port = host.get('default_ssh_port')
- default_ip = host.get('default_ip')
- hostname = host.get('docker_host')
-
- try:
- containers = client.containers(all=True)
- except Exception as exc:
- self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc)))
-
- for container in containers:
- id = container.get('Id')
- short_id = id[:13]
-
- try:
- name = container.get('Names', list()).pop(0).lstrip('/')
- except IndexError:
- name = short_id
-
- if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]):
- try:
- inspect = client.inspect_container(id)
- except Exception as exc:
- self.fail("Error inspecting container %s - %s" % (name, str(exc)))
-
- running = inspect.get('State', dict()).get('Running')
-
- # Add container to groups
- image_name = inspect.get('Config', dict()).get('Image')
- if image_name:
- self.groups["image_%s" % (image_name)].append(name)
-
- stack_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.stack.namespace')
- if stack_name:
- self.groups["stack_%s" % stack_name].append(name)
-
- service_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.swarm.service.name')
- if service_name:
- self.groups["service_%s" % service_name].append(name)
-
- self.groups[id].append(name)
- self.groups[name].append(name)
- if short_id not in self.groups:
- self.groups[short_id].append(name)
- self.groups[hostname].append(name)
-
- if running is True:
- self.groups['running'].append(name)
- else:
- self.groups['stopped'].append(name)
-
- # Figure ous ssh IP and Port
- try:
- # Lookup the public facing port Nat'ed to ssh port.
- port = client.port(container, ssh_port)[0]
- except (IndexError, AttributeError, TypeError):
- port = dict()
-
- try:
- ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
- except KeyError:
- ip = ''
-
- facts = dict(
- ansible_ssh_host=ip,
- ansible_ssh_port=port.get('HostPort', int()),
- docker_name=name,
- docker_short_id=short_id
- )
-
- for key in inspect:
- fact_key = self._slugify(key)
- facts[fact_key] = inspect.get(key)
-
- self.hostvars[name].update(facts)
-
- def _slugify(self, value):
- return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
-
- def get_hosts(self, config):
- '''
- Determine the list of docker hosts we need to talk to.
-
- :param config: dictionary read from config file. can be empty.
- :return: list of connection dictionaries
- '''
- hosts = list()
-
- hosts_list = config.get('hosts')
- defaults = config.get('defaults', dict())
- self.log('defaults:')
- self.log(defaults, pretty_print=True)
- def_host = defaults.get('host')
- def_tls = defaults.get('tls')
- def_tls_verify = defaults.get('tls_verify')
- def_tls_hostname = defaults.get('tls_hostname')
- def_ssl_version = defaults.get('ssl_version')
- def_cert_path = defaults.get('cert_path')
- def_cacert_path = defaults.get('cacert_path')
- def_key_path = defaults.get('key_path')
- def_version = defaults.get('version')
- def_timeout = defaults.get('timeout')
- def_ip = defaults.get('default_ip')
- def_ssh_port = defaults.get('private_ssh_port')
-
- if hosts_list:
- # use hosts from config file
- for host in hosts_list:
- docker_host = host.get('host') or def_host or self._args.docker_host or \
- self._env_args.docker_host or DEFAULT_DOCKER_HOST
- api_version = host.get('version') or def_version or self._args.api_version or \
- self._env_args.api_version or DEFAULT_DOCKER_API_VERSION
- tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \
- self._env_args.tls_hostname or DEFAULT_TLS_HOSTNAME
- tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \
- self._env_args.tls_verify or DEFAULT_TLS_VERIFY
- tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
- ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \
- self._env_args.ssl_version
-
- cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \
- self._env_args.cert_path
- if cert_path and cert_path == self._env_args.cert_path:
- cert_path = os.path.join(cert_path, 'cert.pem')
-
- cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \
- self._env_args.cert_path
- if cacert_path and cacert_path == self._env_args.cert_path:
- cacert_path = os.path.join(cacert_path, 'ca.pem')
-
- key_path = host.get('key_path') or def_key_path or self._args.key_path or \
- self._env_args.cert_path
- if key_path and key_path == self._env_args.cert_path:
- key_path = os.path.join(key_path, 'key.pem')
-
- timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \
- DEFAULT_TIMEOUT_SECONDS
- default_ip = host.get('default_ip') or def_ip or self._env_args.default_ip or \
- self._args.default_ip_address or DEFAULT_IP
- default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \
- DEFAULT_SSH_PORT
- host_dict = dict(
- docker_host=docker_host,
- api_version=api_version,
- tls=tls,
- tls_verify=tls_verify,
- tls_hostname=tls_hostname,
- cert_path=cert_path,
- cacert_path=cacert_path,
- key_path=key_path,
- ssl_version=ssl_version,
- timeout=timeout,
- default_ip=default_ip,
- default_ssh_port=default_ssh_port,
- )
- hosts.append(host_dict)
- else:
- # use default definition
- docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST
- api_version = def_version or self._args.api_version or self._env_args.api_version or \
- DEFAULT_DOCKER_API_VERSION
- tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname or \
- DEFAULT_TLS_HOSTNAME
- tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY
- tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
- ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version
-
- cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path
- if cert_path and cert_path == self._env_args.cert_path:
- cert_path = os.path.join(cert_path, 'cert.pem')
-
- cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path
- if cacert_path and cacert_path == self._env_args.cert_path:
- cacert_path = os.path.join(cacert_path, 'ca.pem')
-
- key_path = def_key_path or self._args.key_path or self._env_args.cert_path
- if key_path and key_path == self._env_args.cert_path:
- key_path = os.path.join(key_path, 'key.pem')
-
- timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS
- default_ip = def_ip or self._env_args.default_ip or self._args.default_ip_address or DEFAULT_IP
- default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT
- host_dict = dict(
- docker_host=docker_host,
- api_version=api_version,
- tls=tls,
- tls_verify=tls_verify,
- tls_hostname=tls_hostname,
- cert_path=cert_path,
- cacert_path=cacert_path,
- key_path=key_path,
- ssl_version=ssl_version,
- timeout=timeout,
- default_ip=default_ip,
- default_ssh_port=default_ssh_port,
- )
- hosts.append(host_dict)
- self.log("hosts: ")
- self.log(hosts, pretty_print=True)
- return hosts
-
- def _parse_config_file(self):
- config = dict()
- config_file = DEFAULT_DOCKER_CONFIG_FILE
-
- if self._args.config_file:
- config_file = self._args.config_file
- elif self._env_args.config_file:
- config_file = self._env_args.config_file
-
- config_file = os.path.abspath(config_file)
-
- if os.path.isfile(config_file):
- with open(config_file) as f:
- try:
- config = yaml.safe_load(f.read())
- except Exception as exc:
- self.fail("Error: parsing %s - %s" % (config_file, str(exc)))
- else:
- msg = "Error: config file given by {} does not exist - " + config_file
- if self._args.config_file:
- self.fail(msg.format('command line argument'))
- elif self._env_args.config_file:
- self.fail(msg.format(DOCKER_ENV_ARGS.get('config_file')))
- else:
- self.log(msg.format('DEFAULT_DOCKER_CONFIG_FILE'))
- return config
-
- def log(self, msg, pretty_print=False):
- if self._args.debug:
- log(msg, pretty_print)
-
- def fail(self, msg):
- fail(msg)
-
- def _parse_env_args(self):
- args = EnvArgs()
- for key, value in DOCKER_ENV_ARGS.items():
- if os.environ.get(value):
- val = os.environ.get(value)
- if val in BOOLEANS_TRUE:
- val = True
- if val in BOOLEANS_FALSE:
- val = False
- setattr(args, key, val)
- return args
-
- def _parse_cli_args(self):
- # Parse command line arguments
-
- parser = argparse.ArgumentParser(
- description='Return Ansible inventory for one or more Docker hosts.')
- parser.add_argument('--list', action='store_true', default=True,
- help='List all containers (default: True)')
- parser.add_argument('--debug', action='store_true', default=False,
- help='Send debug messages to STDOUT')
- parser.add_argument('--host', action='store',
- help='Only get information for a specific container.')
- parser.add_argument('--pretty', action='store_true', default=False,
- help='Pretty print JSON output(default: False)')
- parser.add_argument('--config-file', action='store', default=None,
- help="Name of the config file to use. Default is %s" % (DEFAULT_DOCKER_CONFIG_FILE))
- parser.add_argument('--docker-host', action='store', default=None,
- help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s"
- % (DEFAULT_DOCKER_HOST))
- parser.add_argument('--tls-hostname', action='store', default=None,
- help="Host name to expect in TLS certs. Defaults to %s" % DEFAULT_TLS_HOSTNAME)
- parser.add_argument('--api-version', action='store', default=None,
- help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION))
- parser.add_argument('--timeout', action='store', default=None,
- help="Docker connection timeout in seconds. Defaults to %s"
- % (DEFAULT_TIMEOUT_SECONDS))
- parser.add_argument('--cacert-path', action='store', default=None,
- help="Path to the TLS certificate authority pem file.")
- parser.add_argument('--cert-path', action='store', default=None,
- help="Path to the TLS certificate pem file.")
- parser.add_argument('--key-path', action='store', default=None,
- help="Path to the TLS encryption key pem file.")
- parser.add_argument('--ssl-version', action='store', default=None,
- help="TLS version number")
- parser.add_argument('--tls', action='store_true', default=None,
- help="Use TLS. Defaults to %s" % (DEFAULT_TLS))
- parser.add_argument('--tls-verify', action='store_true', default=None,
- help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY))
- parser.add_argument('--private-ssh-port', action='store', default=None,
- help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT))
- parser.add_argument('--default-ip-address', action='store', default=None,
- help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP))
- return parser.parse_args()
-
- def _json_format_dict(self, data, pretty_print=False):
- # format inventory data for output
- if pretty_print:
- return json.dumps(data, sort_keys=True, indent=4)
- else:
- return json.dumps(data)
-
-
-def main():
-
- if not HAS_DOCKER_PY:
- fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR))
-
- DockerInventory().run()
-
-
-main()
diff --git a/contrib/inventory/docker.yml b/contrib/inventory/docker.yml
deleted file mode 100644
index 97239392d1..0000000000
--- a/contrib/inventory/docker.yml
+++ /dev/null
@@ -1,74 +0,0 @@
-# This is the configuration file for the Docker inventory script: docker_inventory.py.
-#
-# You can define the following in this file:
-#
-# defaults
-# Defines a default connection. Defaults will be taken from this and applied to any values not provided
-# for a host defined in the hosts list.
-#
-# hosts
-# If you wish to get inventory from more than one Docker host, define a hosts list.
-#
-# For the default host and each host in the hosts list define the following attributes:
-#
-# host:
-# description: The URL or Unix socket path used to connect to the Docker API.
-# required: yes
-#
-# tls:
-# description: Connect using TLS without verifying the authenticity of the Docker host server.
-# default: false
-# required: false
-#
-# tls_verify:
-# description: Connect using TLS without verifying the authenticity of the Docker host server.
-# default: false
-# required: false
-#
-# cert_path:
-# description: Path to the client's TLS certificate file.
-# default: null
-# required: false
-#
-# cacert_path:
-# description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
-# default: null
-# required: false
-#
-# key_path:
-# description: Path to the client's TLS key file.
-# default: null
-# required: false
-#
-# version:
-# description: The Docker API version.
-# required: false
-# default: will be supplied by the docker-py module.
-#
-# timeout:
-# description: The amount of time in seconds to wait on an API response.
-# required: false
-# default: 60
-#
-# default_ip:
-# description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
-# '0.0.0.0'.
-# required: false
-# default: 127.0.0.1
-#
-# private_ssh_port:
-# description: The port containers use for SSH
-# required: false
-# default: 22
-
-#defaults:
-# host: unix:///var/run/docker.sock
-# private_ssh_port: 22
-# default_ip: 127.0.0.1
-
-#hosts:
-# - host: tcp://10.45.5.16:4243
-# private_ssh_port: 2022
-# default_ip: 172.16.3.45
-# - host: tcp://localhost:4243
-# private_ssh_port: 2029
diff --git a/contrib/inventory/fleet.py b/contrib/inventory/fleet.py
deleted file mode 100755
index dd0d4f7168..0000000000
--- a/contrib/inventory/fleet.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-"""
-fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and
-returns it under the host group 'coreos'
-"""
-
-# Copyright (C) 2014 Andrew Rothstein <andrew.rothstein at gmail.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Thanks to the vagrant.py inventory script for giving me the basic structure
-# of this.
-#
-
-import sys
-import subprocess
-import re
-import string
-from optparse import OptionParser
-import json
-
-# Options
-# ------------------------------
-
-parser = OptionParser(usage="%prog [options] --list | --host <machine>")
-parser.add_option('--list', default=False, dest="list", action="store_true",
- help="Produce a JSON consumable grouping of servers in your fleet")
-parser.add_option('--host', default=None, dest="host",
- help="Generate additional host specific details for given host for Ansible")
-(options, args) = parser.parse_args()
-
-#
-# helper functions
-#
-
-
-def get_ssh_config():
- configs = []
- for box in list_running_boxes():
- config = get_a_ssh_config(box)
- configs.append(config)
- return configs
-
-
-# list all the running instances in the fleet
-def list_running_boxes():
- boxes = []
- for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
- matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line)
- if matcher and matcher.group(1) != "IP":
- boxes.append(matcher.group(1))
-
- return boxes
-
-
-def get_a_ssh_config(box_name):
- config = {}
- config['Host'] = box_name
- config['ansible_ssh_user'] = 'core'
- config['ansible_python_interpreter'] = '/opt/bin/python'
- return config
-
-
-# List out servers that vagrant has running
-# ------------------------------
-if options.list:
- ssh_config = get_ssh_config()
- hosts = {'coreos': []}
-
- for data in ssh_config:
- hosts['coreos'].append(data['Host'])
-
- print(json.dumps(hosts))
- sys.exit(1)
-
-# Get out the host details
-# ------------------------------
-elif options.host:
- result = {}
- ssh_config = get_ssh_config()
-
- details = filter(lambda x: (x['Host'] == options.host), ssh_config)
- if len(details) > 0:
- # pass through the port, in case it's non standard.
- result = details[0]
-
- print(json.dumps(result))
- sys.exit(1)
-
-
-# Print out help
-# ------------------------------
-else:
- parser.print_help()
- sys.exit(1)
diff --git a/contrib/inventory/foreman.ini b/contrib/inventory/foreman.ini
deleted file mode 100644
index d157963848..0000000000
--- a/contrib/inventory/foreman.ini
+++ /dev/null
@@ -1,200 +0,0 @@
-# Foreman inventory (https://github.com/theforeman/foreman_ansible_inventory)
-#
-# This script can be used as an Ansible dynamic inventory.
-# The connection parameters are set up via *foreman.ini*
-# This is how the script founds the configuration file in
-# order of discovery.
-#
-# * `/etc/ansible/foreman.ini`
-# * Current directory of your inventory script.
-# * `FOREMAN_INI_PATH` environment variable.
-#
-# ## Variables and Parameters
-#
-# The data returned from Foreman for each host is stored in a foreman
-# hash so they're available as *host_vars* along with the parameters
-# of the host and it's hostgroups:
-#
-# "foo.example.com": {
-# "foreman": {
-# "architecture_id": 1,
-# "architecture_name": "x86_64",
-# "build": false,
-# "build_status": 0,
-# "build_status_label": "Installed",
-# "capabilities": [
-# "build",
-# "image"
-# ],
-# "compute_profile_id": 4,
-# "hostgroup_name": "webtier/myapp",
-# "id": 70,
-# "image_name": "debian8.1",
-# ...
-# "uuid": "50197c10-5ebb-b5cf-b384-a1e203e19e77"
-# },
-# "foreman_params": {
-# "testparam1": "foobar",
-# "testparam2": "small",
-# ...
-# }
-#
-# and could therefore be used in Ansible like:
-#
-# - debug: msg="From Foreman host {{ foreman['uuid'] }}"
-#
-# Which yields
-#
-# TASK [test_foreman : debug] ****************************************************
-# ok: [foo.example.com] => {
-# "msg": "From Foreman host 50190bd1-052a-a34a-3c9c-df37a39550bf"
-# }
-#
-# ## Automatic Ansible groups
-#
-# The inventory will provide a set of groups, by default prefixed by
-# 'foreman_'. If you want to customize this prefix, change the
-# group_prefix option in /etc/ansible/foreman.ini. The rest of this
-# guide will assume the default prefix of 'foreman'
-#
-# The hostgroup, location, organization, content view, and lifecycle
-# environment of each host are created as Ansible groups with a
-# foreman_<grouptype> prefix, all lowercase and problematic parameters
-# removed. So e.g. the foreman hostgroup
-#
-# myapp / webtier / datacenter1
-#
-# would turn into the Ansible group:
-#
-# foreman_hostgroup_myapp_webtier_datacenter1
-#
-# If the parameter want_hostcollections is set to true, the
-# collections each host is in are created as Ansible groups with a
-# foreman_hostcollection prefix, all lowercase and problematic
-# parameters removed. So e.g. the Foreman host collection
-#
-# Patch Window Thursday
-#
-# would turn into the Ansible group:
-#
-# foreman_hostcollection_patchwindowthursday
-#
-# If the parameter host_filters is set, it will be used as the
-# "search" parameter for the /api/v2/hosts call. This can be used to
-# restrict the list of returned host, as shown below.
-#
-# Furthermore Ansible groups can be created on the fly using the
-# *group_patterns* variable in *foreman.ini* so that you can build up
-# hierarchies using parameters on the hostgroup and host variables.
-#
-# Lets assume you have a host that is built using this nested hostgroup:
-#
-# myapp / webtier / datacenter1
-#
-# and each of the hostgroups defines a parameters respectively:
-#
-# myapp: app_param = myapp
-# webtier: tier_param = webtier
-# datacenter1: dc_param = datacenter1
-#
-# The host is also in a subnet called "mysubnet" and provisioned via an image
-# then *group_patterns* like:
-#
-# [ansible]
-# group_patterns = ["{app_param}-{tier_param}-{dc_param}",
-# "{app_param}-{tier_param}",
-# "{app_param}",
-# "{subnet_name}-{provision_method}"]
-#
-# would put the host into the additional Ansible groups:
-#
-# - myapp-webtier-datacenter1
-# - myapp-webtier
-# - myapp
-# - mysubnet-image
-#
-# by recursively resolving the hostgroups, getting the parameter keys
-# and values and doing a Python *string.format()* like replacement on
-# it.
-#
-[foreman]
-url = http://localhost:3000/
-user = foreman
-password = secret
-ssl_verify = True
-
-# Foreman 1.24 introduces a new reports API to improve performance of the inventory script.
-# Note: This requires foreman_ansible plugin installed.
-# Set to False if you want to use the old API. Defaults to True.
-
-use_reports_api = True
-
-# Retrieve only hosts from the organization "Web Engineering".
-# host_filters = organization="Web Engineering"
-
-# Retrieve only hosts from the organization "Web Engineering" that are
-# also in the host collection "Apache Servers".
-# host_filters = organization="Web Engineering" and host_collection="Apache Servers"
-
-# Foreman Inventory report related configuration options.
-# Configs that default to True :
-# want_organization , want_location, want_ipv4, want_host_group, want_subnet, want_smart_proxies, want_facts
-# Configs that default to False :
-# want_ipv6, want_subnet_v6, want_content_facet_attributes, want_host_params
-
-[report]
-# want_organization = True
-# want_location = True
-# want_ipv4 = True
-# want_ipv6 = False
-# want_host_group = True
-# want_subnet = True
-# want_subnet_v6 = False
-# want_smart_proxies = True
-# want_content_facet_attributes = False
-# want_host_params = False
-
-# use this config to determine if facts are to be fetched in the report and stored on the hosts.
-# want_facts = False
-
-# Upon receiving a request to return inventory report, Foreman schedules a report generation job.
-# The script then polls the report_data endpoint repeatedly to check if the job is complete and retrieves data
-# poll_interval allows to define the polling interval between 2 calls to the report_data endpoint while polling.
-# Defaults to 10 seconds
-
-# poll_interval = 10
-
-[ansible]
-group_patterns = ["{app}-{tier}-{color}",
- "{app}-{color}",
- "{app}",
- "{tier}"]
-group_prefix = foreman_
-
-# Whether to fetch facts from Foreman and store them on the host
-want_facts = True
-
-# Whether to create Ansible groups for host collections. Only tested
-# with Katello (Red Hat Satellite). Disabled by default to not break
-# the script for stand-alone Foreman.
-want_hostcollections = False
-
-# Whether to interpret global parameters value as JSON (if possible, else
-# take as is). Only tested with Katello (Red Hat Satellite).
-# This allows to define lists and dictionaries (and more complicated structures)
-# variables by entering them as JSON string in Foreman parameters.
-# Disabled by default as the change would else not be backward compatible.
-rich_params = False
-
-# Whether to populate the ansible_ssh_host variable to explicitly specify the
-# connection target. Only tested with Katello (Red Hat Satellite).
-# If the foreman 'ip' fact exists then the ansible_ssh_host varibale is populated
-# to permit connections where DNS resolution fails.
-want_ansible_ssh_host = False
-
-[cache]
-path = .
-max_age = 60
-
-# Whether to scan foreman to add recently created hosts in inventory cache
-scan_new_hosts = True
diff --git a/contrib/inventory/foreman.py b/contrib/inventory/foreman.py
deleted file mode 100755
index 343cf26c9d..0000000000
--- a/contrib/inventory/foreman.py
+++ /dev/null
@@ -1,662 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-#
-# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>,
-# Daniel Lobato Garcia <dlobatog@redhat.com>
-#
-# This script is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with it. If not, see <http://www.gnu.org/licenses/>.
-#
-# This is somewhat based on cobbler inventory
-
-# Stdlib imports
-# __future__ imports must occur at the beginning of file
-from __future__ import print_function
-import json
-import argparse
-import copy
-import os
-import re
-import sys
-from time import time, sleep
-from collections import defaultdict
-from distutils.version import LooseVersion, StrictVersion
-
-# 3rd party imports
-import requests
-if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
- print('This script requires python-requests 1.1 as a minimum version')
- sys.exit(1)
-
-from requests.auth import HTTPBasicAuth
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.six.moves import configparser as ConfigParser
-
-
-def json_format_dict(data, pretty=False):
- """Converts a dict to a JSON object and dumps it as a formatted string"""
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-class ForemanInventory(object):
-
- def __init__(self):
- self.inventory = defaultdict(list) # A list of groups and the hosts in that group
- self.cache = dict() # Details about hosts in the inventory
- self.params = dict() # Params of each host
- self.facts = dict() # Facts of each host
- self.hostgroups = dict() # host groups
- self.hostcollections = dict() # host collections
- self.session = None # Requests session
- self.config_paths = [
- "/etc/ansible/foreman.ini",
- os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini',
- ]
- env_value = os.environ.get('FOREMAN_INI_PATH')
- if env_value is not None:
- self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
-
- def read_settings(self):
- """Reads the settings from the foreman.ini file"""
-
- config = ConfigParser.SafeConfigParser()
- config.read(self.config_paths)
-
- # Foreman API related
- try:
- self.foreman_url = config.get('foreman', 'url')
- self.foreman_user = config.get('foreman', 'user')
- self.foreman_pw = config.get('foreman', 'password', raw=True)
- self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
- print("Error parsing configuration: %s" % e, file=sys.stderr)
- return False
-
- # Inventory Report Related
- try:
- self.foreman_use_reports_api = config.getboolean('foreman', 'use_reports_api')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.foreman_use_reports_api = True
-
- try:
- self.want_organization = config.getboolean('report', 'want_organization')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_organization = True
-
- try:
- self.want_location = config.getboolean('report', 'want_location')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_location = True
-
- try:
- self.want_IPv4 = config.getboolean('report', 'want_ipv4')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_IPv4 = True
-
- try:
- self.want_IPv6 = config.getboolean('report', 'want_ipv6')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_IPv6 = False
-
- try:
- self.want_host_group = config.getboolean('report', 'want_host_group')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_host_group = True
-
- try:
- self.want_host_params = config.getboolean('report', 'want_host_params')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_host_params = False
-
- try:
- self.want_subnet = config.getboolean('report', 'want_subnet')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_subnet = True
-
- try:
- self.want_subnet_v6 = config.getboolean('report', 'want_subnet_v6')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_subnet_v6 = False
-
- try:
- self.want_smart_proxies = config.getboolean('report', 'want_smart_proxies')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_smart_proxies = True
-
- try:
- self.want_content_facet_attributes = config.getboolean('report', 'want_content_facet_attributes')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_content_facet_attributes = False
-
- try:
- self.report_want_facts = config.getboolean('report', 'want_facts')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.report_want_facts = True
-
- try:
- self.poll_interval = config.getint('report', 'poll_interval')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.poll_interval = 10
-
- # Ansible related
- try:
- group_patterns = config.get('ansible', 'group_patterns')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- group_patterns = "[]"
-
- self.group_patterns = json.loads(group_patterns)
-
- try:
- self.group_prefix = config.get('ansible', 'group_prefix')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.group_prefix = "foreman_"
-
- try:
- self.want_facts = config.getboolean('ansible', 'want_facts')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_facts = True
-
- self.want_facts = self.want_facts and self.report_want_facts
-
- try:
- self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_hostcollections = False
-
- try:
- self.want_ansible_ssh_host = config.getboolean('ansible', 'want_ansible_ssh_host')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.want_ansible_ssh_host = False
-
- # Do we want parameters to be interpreted if possible as JSON? (no by default)
- try:
- self.rich_params = config.getboolean('ansible', 'rich_params')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.rich_params = False
-
- try:
- self.host_filters = config.get('foreman', 'host_filters')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.host_filters = None
-
- # Cache related
- try:
- cache_path = os.path.expanduser(config.get('cache', 'path'))
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- cache_path = '.'
- (script, ext) = os.path.splitext(os.path.basename(__file__))
- self.cache_path_cache = cache_path + "/%s.cache" % script
- self.cache_path_inventory = cache_path + "/%s.index" % script
- self.cache_path_params = cache_path + "/%s.params" % script
- self.cache_path_facts = cache_path + "/%s.facts" % script
- self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
- try:
- self.cache_max_age = config.getint('cache', 'max_age')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.cache_max_age = 60
- try:
- self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
- self.scan_new_hosts = False
-
- return True
-
- def parse_cli_args(self):
- """Command line argument processing"""
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman')
- parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
- parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to foreman (default: False - use cache files)')
- self.args = parser.parse_args()
-
- def _get_session(self):
- if not self.session:
- self.session = requests.session()
- self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw)
- self.session.verify = self.foreman_ssl_verify
- return self.session
-
- def _get_json(self, url, ignore_errors=None, params=None):
- if params is None:
- params = {}
- params['per_page'] = 250
-
- page = 1
- results = []
- s = self._get_session()
- while True:
- params['page'] = page
- ret = s.get(url, params=params)
- if ignore_errors and ret.status_code in ignore_errors:
- break
- ret.raise_for_status()
- json = ret.json()
- # /hosts/:id has not results key
- if 'results' not in json:
- return json
- # Facts are returned as dict in results not list
- if isinstance(json['results'], dict):
- return json['results']
- # List of all hosts is returned paginaged
- results = results + json['results']
- if len(results) >= json['subtotal']:
- break
- page += 1
- if len(json['results']) == 0:
- print("Did not make any progress during loop. "
- "expected %d got %d" % (json['total'], len(results)),
- file=sys.stderr)
- break
- return results
-
- def _use_inventory_report(self):
- if not self.foreman_use_reports_api:
- return False
- status_url = "%s/api/v2/status" % self.foreman_url
- result = self._get_json(status_url)
- foreman_version = (LooseVersion(result.get('version')) >= LooseVersion('1.24.0'))
- return foreman_version
-
- def _fetch_params(self):
- options, params = ("no", "yes"), dict()
- params["Organization"] = options[self.want_organization]
- params["Location"] = options[self.want_location]
- params["IPv4"] = options[self.want_IPv4]
- params["IPv6"] = options[self.want_IPv6]
- params["Facts"] = options[self.want_facts]
- params["Host Group"] = options[self.want_host_group]
- params["Host Collections"] = options[self.want_hostcollections]
- params["Subnet"] = options[self.want_subnet]
- params["Subnet v6"] = options[self.want_subnet_v6]
- params["Smart Proxies"] = options[self.want_smart_proxies]
- params["Content Attributes"] = options[self.want_content_facet_attributes]
- params["Host Parameters"] = options[self.want_host_params]
- if self.host_filters:
- params["Hosts"] = self.host_filters
- return params
-
- def _post_request(self):
- url = "%s/ansible/api/v2/ansible_inventories/schedule" % self.foreman_url
- session = self._get_session()
- params = {'input_values': self._fetch_params()}
- ret = session.post(url, json=params)
- if not ret:
- raise Exception("Error scheduling inventory report on foreman. Please check foreman logs!")
- url = "{0}/{1}".format(self.foreman_url, ret.json().get('data_url'))
- response = session.get(url)
- while response:
- if response.status_code != 204:
- break
- else:
- sleep(self.poll_interval)
- response = session.get(url)
- if not response:
- raise Exception("Error receiving inventory report from foreman. Please check foreman logs!")
- else:
- return response.json()
-
- def _get_hosts(self):
- url = "%s/api/v2/hosts" % self.foreman_url
-
- params = {}
- if self.host_filters:
- params['search'] = self.host_filters
-
- return self._get_json(url, params=params)
-
- def _get_host_data_by_id(self, hid):
- url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
- return self._get_json(url)
-
- def _get_facts_by_id(self, hid):
- url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
- return self._get_json(url)
-
- def _resolve_params(self, host_params):
- """Convert host params to dict"""
- params = {}
-
- for param in host_params:
- name = param['name']
- if self.rich_params:
- try:
- params[name] = json.loads(param['value'])
- except ValueError:
- params[name] = param['value']
- else:
- params[name] = param['value']
-
- return params
-
- def _get_facts(self, host):
- """Fetch all host facts of the host"""
- if not self.want_facts:
- return {}
-
- ret = self._get_facts_by_id(host['id'])
- if len(ret.values()) == 0:
- facts = {}
- elif len(ret.values()) == 1:
- facts = list(ret.values())[0]
- else:
- raise ValueError("More than one set of facts returned for '%s'" % host)
- return facts
-
- def write_to_cache(self, data, filename):
- """Write data in JSON format to a file"""
- json_data = json_format_dict(data, True)
- cache = open(filename, 'w')
- cache.write(json_data)
- cache.close()
-
- def _write_cache(self):
- self.write_to_cache(self.cache, self.cache_path_cache)
- self.write_to_cache(self.inventory, self.cache_path_inventory)
- self.write_to_cache(self.params, self.cache_path_params)
- self.write_to_cache(self.facts, self.cache_path_facts)
- self.write_to_cache(self.hostcollections, self.cache_path_hostcollections)
-
- def to_safe(self, word):
- '''Converts 'bad' characters in a string to underscores
- so they can be used as Ansible groups
-
- >>> ForemanInventory.to_safe("foo-bar baz")
- 'foo_barbaz'
- '''
- regex = r"[^A-Za-z0-9\_]"
- return re.sub(regex, "_", word.replace(" ", ""))
-
- def update_cache(self, scan_only_new_hosts=False):
- """Make calls to foreman and save the output in a cache"""
- use_inventory_report = self._use_inventory_report()
- if use_inventory_report:
- self._update_cache_inventory(scan_only_new_hosts)
- else:
- self._update_cache_host_api(scan_only_new_hosts)
-
- def _update_cache_inventory(self, scan_only_new_hosts):
- self.groups = dict()
- self.hosts = dict()
- try:
- inventory_report_response = self._post_request()
- except Exception:
- self._update_cache_host_api(scan_only_new_hosts)
- return
- host_data = json.loads(inventory_report_response)
- for host in host_data:
- if not(host) or (host["name"] in self.cache.keys() and scan_only_new_hosts):
- continue
- dns_name = host['name']
-
- host_params = host.pop('host_parameters', {})
- fact_list = host.pop('facts', {})
- content_facet_attributes = host.get('content_attributes', {}) or {}
-
- # Create ansible groups for hostgroup
- group = 'host_group'
- val = host.get(group)
- if val:
- safe_key = self.to_safe('%s%s_%s' % (
- to_text(self.group_prefix),
- group,
- to_text(val).lower()
- ))
- self.inventory[safe_key].append(dns_name)
-
- # Create ansible groups for environment, location and organization
- for group in ['environment', 'location', 'organization']:
- val = host.get('%s' % group)
- if val:
- safe_key = self.to_safe('%s%s_%s' % (
- to_text(self.group_prefix),
- group,
- to_text(val).lower()
- ))
- self.inventory[safe_key].append(dns_name)
-
- for group in ['lifecycle_environment', 'content_view']:
- val = content_facet_attributes.get('%s_name' % group)
- if val:
- safe_key = self.to_safe('%s%s_%s' % (
- to_text(self.group_prefix),
- group,
- to_text(val).lower()
- ))
- self.inventory[safe_key].append(dns_name)
-
- params = host_params
-
- # Ansible groups by parameters in host groups and Foreman host
- # attributes.
- groupby = dict()
- for k, v in params.items():
- groupby[k] = self.to_safe(to_text(v))
-
- # The name of the ansible groups is given by group_patterns:
- for pattern in self.group_patterns:
- try:
- key = pattern.format(**groupby)
- self.inventory[key].append(dns_name)
- except KeyError:
- pass # Host not part of this group
-
- if self.want_hostcollections:
- hostcollections = host.get('host_collections')
-
- if hostcollections:
- # Create Ansible groups for host collections
- for hostcollection in hostcollections:
- safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection.lower()))
- self.inventory[safe_key].append(dns_name)
-
- self.hostcollections[dns_name] = hostcollections
-
- self.cache[dns_name] = host
- self.params[dns_name] = params
- self.facts[dns_name] = fact_list
- self.inventory['all'].append(dns_name)
- self._write_cache()
-
- def _update_cache_host_api(self, scan_only_new_hosts):
- """Make calls to foreman and save the output in a cache"""
-
- self.groups = dict()
- self.hosts = dict()
-
- for host in self._get_hosts():
- if host['name'] in self.cache.keys() and scan_only_new_hosts:
- continue
- dns_name = host['name']
-
- host_data = self._get_host_data_by_id(host['id'])
- host_params = host_data.get('all_parameters', {})
-
- # Create ansible groups for hostgroup
- group = 'hostgroup'
- val = host.get('%s_title' % group) or host.get('%s_name' % group)
- if val:
- safe_key = self.to_safe('%s%s_%s' % (
- to_text(self.group_prefix),
- group,
- to_text(val).lower()
- ))
- self.inventory[safe_key].append(dns_name)
-
- # Create ansible groups for environment, location and organization
- for group in ['environment', 'location', 'organization']:
- val = host.get('%s_name' % group)
- if val:
- safe_key = self.to_safe('%s%s_%s' % (
- to_text(self.group_prefix),
- group,
- to_text(val).lower()
- ))
- self.inventory[safe_key].append(dns_name)
-
- for group in ['lifecycle_environment', 'content_view']:
- val = host.get('content_facet_attributes', {}).get('%s_name' % group)
- if val:
- safe_key = self.to_safe('%s%s_%s' % (
- to_text(self.group_prefix),
- group,
- to_text(val).lower()
- ))
- self.inventory[safe_key].append(dns_name)
-
- params = self._resolve_params(host_params)
-
- # Ansible groups by parameters in host groups and Foreman host
- # attributes.
- groupby = dict()
- for k, v in params.items():
- groupby[k] = self.to_safe(to_text(v))
-
- # The name of the ansible groups is given by group_patterns:
- for pattern in self.group_patterns:
- try:
- key = pattern.format(**groupby)
- self.inventory[key].append(dns_name)
- except KeyError:
- pass # Host not part of this group
-
- if self.want_hostcollections:
- hostcollections = host_data.get('host_collections')
-
- if hostcollections:
- # Create Ansible groups for host collections
- for hostcollection in hostcollections:
- safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower()))
- self.inventory[safe_key].append(dns_name)
-
- self.hostcollections[dns_name] = hostcollections
-
- self.cache[dns_name] = host
- self.params[dns_name] = params
- self.facts[dns_name] = self._get_facts(host)
- self.inventory['all'].append(dns_name)
- self._write_cache()
-
- def is_cache_valid(self):
- """Determines if the cache is still valid"""
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if (os.path.isfile(self.cache_path_inventory) and
- os.path.isfile(self.cache_path_params) and
- os.path.isfile(self.cache_path_facts)):
- return True
- return False
-
- def load_inventory_from_cache(self):
- """Read the index from the cache file sets self.index"""
-
- with open(self.cache_path_inventory, 'r') as fp:
- self.inventory = json.load(fp)
-
- def load_params_from_cache(self):
- """Read the index from the cache file sets self.index"""
-
- with open(self.cache_path_params, 'r') as fp:
- self.params = json.load(fp)
-
- def load_facts_from_cache(self):
- """Read the index from the cache file sets self.facts"""
-
- if not self.want_facts:
- return
- with open(self.cache_path_facts, 'r') as fp:
- self.facts = json.load(fp)
-
- def load_hostcollections_from_cache(self):
- """Read the index from the cache file sets self.hostcollections"""
-
- if not self.want_hostcollections:
- return
- with open(self.cache_path_hostcollections, 'r') as fp:
- self.hostcollections = json.load(fp)
-
- def load_cache_from_cache(self):
- """Read the cache from the cache file sets self.cache"""
-
- with open(self.cache_path_cache, 'r') as fp:
- self.cache = json.load(fp)
-
- def get_inventory(self):
- if self.args.refresh_cache or not self.is_cache_valid():
- self.update_cache()
- else:
- self.load_inventory_from_cache()
- self.load_params_from_cache()
- self.load_facts_from_cache()
- self.load_hostcollections_from_cache()
- self.load_cache_from_cache()
- if self.scan_new_hosts:
- self.update_cache(True)
-
- def get_host_info(self):
- """Get variables about a specific host"""
-
- if not self.cache or len(self.cache) == 0:
- # Need to load index from cache
- self.load_cache_from_cache()
-
- if self.args.host not in self.cache:
- # try updating the cache
- self.update_cache()
-
- if self.args.host not in self.cache:
- # host might not exist anymore
- return json_format_dict({}, True)
-
- return json_format_dict(self.cache[self.args.host], True)
-
- def _print_data(self):
- data_to_print = ""
- if self.args.host:
- data_to_print += self.get_host_info()
- else:
- self.inventory['_meta'] = {'hostvars': {}}
- for hostname in self.cache:
- self.inventory['_meta']['hostvars'][hostname] = {
- 'foreman': self.cache[hostname],
- 'foreman_params': self.params[hostname],
- }
- if self.want_ansible_ssh_host and 'ip' in self.cache[hostname]:
- self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.cache[hostname]['ip']
- if self.want_facts:
- self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname]
-
- data_to_print += json_format_dict(self.inventory, True)
-
- print(data_to_print)
-
- def run(self):
- # Read settings and parse CLI arguments
- if not self.read_settings():
- return False
- self.parse_cli_args()
- self.get_inventory()
- self._print_data()
- return True
-
-
-if __name__ == '__main__':
- sys.exit(not ForemanInventory().run())
diff --git a/contrib/inventory/freeipa.py b/contrib/inventory/freeipa.py
deleted file mode 100755
index cb5ccc071a..0000000000
--- a/contrib/inventory/freeipa.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-import argparse
-from distutils.version import LooseVersion
-import json
-import os
-import sys
-from ipalib import api, errors, __version__ as IPA_VERSION
-from ansible.module_utils.six import u
-
-
-def initialize():
- '''
- This function initializes the FreeIPA/IPA API. This function requires
- no arguments. A kerberos key must be present in the users keyring in
- order for this to work. IPA default configuration directory is /etc/ipa,
- this path could be overridden with IPA_CONFDIR environment variable.
- '''
-
- api.bootstrap(context='cli')
-
- if not os.path.isdir(api.env.confdir):
- print("WARNING: IPA configuration directory (%s) is missing. "
- "Environment variable IPA_CONFDIR could be used to override "
- "default path." % api.env.confdir)
-
- if LooseVersion(IPA_VERSION) >= LooseVersion('4.6.2'):
- # With ipalib < 4.6.0 'server' and 'domain' have default values
- # ('localhost:8888', 'example.com'), newer versions don't and
- # DNS autodiscovery is broken, then one of jsonrpc_uri / xmlrpc_uri is
- # required.
- # ipalib 4.6.0 is unusable (https://pagure.io/freeipa/issue/7132)
- # that's why 4.6.2 is explicitely tested.
- if 'server' not in api.env or 'domain' not in api.env:
- sys.exit("ERROR: ('jsonrpc_uri' or 'xmlrpc_uri') or 'domain' are not "
- "defined in '[global]' section of '%s' nor in '%s'." %
- (api.env.conf, api.env.conf_default))
-
- api.finalize()
- try:
- api.Backend.rpcclient.connect()
- except AttributeError:
- # FreeIPA < 4.0 compatibility
- api.Backend.xmlclient.connect()
-
- return api
-
-
-def list_groups(api):
- '''
- This function prints a list of all host groups. This function requires
- one argument, the FreeIPA/IPA API object.
- '''
-
- inventory = {}
- hostvars = {}
-
- result = api.Command.hostgroup_find(all=True)['result']
-
- for hostgroup in result:
- # Get direct and indirect members (nested hostgroups) of hostgroup
- members = []
-
- if 'member_host' in hostgroup:
- members = [host for host in hostgroup['member_host']]
- if 'memberindirect_host' in hostgroup:
- members += (host for host in hostgroup['memberindirect_host'])
- inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]}
-
- for member in members:
- hostvars[member] = {}
-
- inventory['_meta'] = {'hostvars': hostvars}
- inv_string = json.dumps(inventory, indent=1, sort_keys=True)
- print(inv_string)
-
- return None
-
-
-def parse_args():
- '''
- This function parses the arguments that were passed in via the command line.
- This function expects no arguments.
- '''
-
- parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA '
- 'inventory module')
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('--list', action='store_true',
- help='List active servers')
- group.add_argument('--host', help='List details about the specified host')
-
- return parser.parse_args()
-
-
-def get_host_attributes(api, host):
- """
- This function expects one string, this hostname to lookup variables for.
- Args:
- api: FreeIPA API Object
- host: Name of Hostname
-
- Returns: Dict of Host vars if found else None
- """
- try:
- result = api.Command.host_show(u(host))['result']
- if 'usercertificate' in result:
- del result['usercertificate']
- return json.dumps(result, indent=1)
- except errors.NotFound as e:
- return {}
-
-
-if __name__ == '__main__':
- args = parse_args()
- api = initialize()
-
- if args.host:
- print(get_host_attributes(api, args.host))
- elif args.list:
- list_groups(api)
diff --git a/contrib/inventory/gce.ini b/contrib/inventory/gce.ini
deleted file mode 100644
index af27a9c4ab..0000000000
--- a/contrib/inventory/gce.ini
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# The GCE inventory script has the following dependencies:
-# 1. A valid Google Cloud Platform account with Google Compute Engine
-# enabled. See https://cloud.google.com
-# 2. An OAuth2 Service Account flow should be enabled. This will generate
-# a private key file that the inventory script will use for API request
-# authorization. See https://developers.google.com/accounts/docs/OAuth2
-# 3. Convert the private key from PKCS12 to PEM format
-# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
-# > -nodes -nocerts | openssl rsa -out pkey.pem
-# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org
-#
-# (See ansible/test/gce_tests.py comments for full install instructions)
-#
-# Author: Eric Johnson <erjohnso@google.com>
-# Contributors: John Roach <johnroach1985@gmail.com>
-
-[gce]
-# GCE Service Account configuration information can be stored in the
-# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
-# exist in your PYTHONPATH and be picked up automatically with an import
-# statement in the inventory script. However, you can specify an absolute
-# path to the secrets.py file with 'libcloud_secrets' parameter.
-# This option will be deprecated in a future release.
-libcloud_secrets =
-
-# If you are not going to use a 'secrets.py' file, you can set the necessary
-# authorization parameters here.
-# You can add multiple gce projects to by using a comma separated list. Make
-# sure that the service account used has permissions on said projects.
-gce_service_account_email_address =
-gce_service_account_pem_file_path =
-gce_project_id =
-gce_zone =
-
-# Filter inventory based on state. Leave undefined to return instances regardless of state.
-# example: Uncomment to only return inventory in the running or provisioning state
-#instance_states = RUNNING,PROVISIONING
-
-# Filter inventory based on instance tags. Leave undefined to return instances regardless of tags.
-# example: Uncomment to only return inventory with the http-server or https-server tag
-#instance_tags = http-server,https-server
-
-
-[inventory]
-# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should
-# contain the instance internal or external address. Values may be either
-# 'internal' or 'external'. If 'external' is specified but no external instance
-# address exists, the internal address will be used.
-# The INVENTORY_IP_TYPE environment variable will override this value.
-inventory_ip_type =
-
-[cache]
-# directory in which cache should be created
-cache_path = ~/.ansible/tmp
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-# To disable the cache, set this value to 0
-cache_max_age = 300
diff --git a/contrib/inventory/gce.py b/contrib/inventory/gce.py
deleted file mode 100755
index 0a7df3f52a..0000000000
--- a/contrib/inventory/gce.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright: (c) 2013, Google Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-'''
-GCE external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API requests
-Google Compute Engine via the libcloud library. Full install/configuration
-instructions for the gce* modules can be found in the comments of
-ansible/test/gce_tests.py.
-
-When run against a specific host, this script returns the following variables
-based on the data obtained from the libcloud Node object:
- - gce_uuid
- - gce_id
- - gce_image
- - gce_machine_type
- - gce_private_ip
- - gce_public_ip
- - gce_name
- - gce_description
- - gce_status
- - gce_zone
- - gce_tags
- - gce_metadata
- - gce_network
- - gce_subnetwork
-
-When run in --list mode, instances are grouped by the following categories:
- - zone:
- zone group name examples are us-central1-b, europe-west1-a, etc.
- - instance tags:
- An entry is created for each tag. For example, if you have two instances
- with a common tag called 'foo', they will both be grouped together under
- the 'tag_foo' name.
- - network name:
- the name of the network is appended to 'network_' (e.g. the 'default'
- network will result in a group named 'network_default')
- - machine type
- types follow a pattern like n1-standard-4, g1-small, etc.
- - running status:
- group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- - image:
- when using an ephemeral/scratch disk, this will be set to the image name
- used when creating the instance (e.g. debian-7-wheezy-v20130816). when
- your instance was created with a root persistent disk it will be set to
- 'persistent_disk' since there is no current way to determine the image.
-
-Examples:
- Execute uname on all instances in the us-central1-a zone
- $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
-
- Use the GCE inventory script to print out instance specific information
- $ contrib/inventory/gce.py --host my_instance
-
-Author: Eric Johnson <erjohnso@google.com>
-Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>,
- John Roach <johnroach1985@gmail.com>
-Version: 0.0.4
-'''
-
-try:
- import pkg_resources
-except ImportError:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. We don't
- # fail here as there is code that better expresses the errors where the
- # library is used.
- pass
-
-USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
-USER_AGENT_VERSION = "v2"
-
-import sys
-import os
-import argparse
-
-from time import time
-
-from ansible.module_utils.six.moves import configparser
-
-import logging
-logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
-
-import json
-
-try:
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- _ = Provider.GCE
-except Exception:
- sys.exit("GCE inventory script requires libcloud >= 0.13")
-
-
-class CloudInventoryCache(object):
- def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
- cache_max_age=300):
- cache_dir = os.path.expanduser(cache_path)
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
- self.cache_path_cache = os.path.join(cache_dir, cache_name)
-
- self.cache_max_age = cache_max_age
-
- def is_valid(self, max_age=None):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if max_age is None:
- max_age = self.cache_max_age
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + max_age) > current_time:
- return True
-
- return False
-
- def get_all_data_from_cache(self, filename=''):
- ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
-
- data = ''
- if not filename:
- filename = self.cache_path_cache
- with open(filename, 'r') as cache:
- data = cache.read()
- return json.loads(data)
-
- def write_to_cache(self, data, filename=''):
- ''' Writes data to file as JSON. Returns True. '''
- if not filename:
- filename = self.cache_path_cache
- json_data = json.dumps(data)
- with open(filename, 'w') as cache:
- cache.write(json_data)
- return True
-
-
-class GceInventory(object):
- def __init__(self):
- # Cache object
- self.cache = None
- # dictionary containing inventory read from disk
- self.inventory = {}
-
- # Read settings and parse CLI arguments
- self.parse_cli_args()
- self.config = self.get_config()
- self.drivers = self.get_gce_drivers()
- self.ip_type = self.get_inventory_options()
- if self.ip_type:
- self.ip_type = self.ip_type.lower()
-
- # Cache management
- start_inventory_time = time()
- cache_used = False
- if self.args.refresh_cache or not self.cache.is_valid():
- self.do_api_calls_update_cache()
- else:
- self.load_inventory_from_cache()
- cache_used = True
- self.inventory['_meta']['stats'] = {'use_cache': True}
- self.inventory['_meta']['stats'] = {
- 'inventory_load_time': time() - start_inventory_time,
- 'cache_used': cache_used
- }
-
- # Just display data for specific host
- if self.args.host:
- print(self.json_format_dict(
- self.inventory['_meta']['hostvars'][self.args.host],
- pretty=self.args.pretty))
- else:
- # Otherwise, assume user wants all instances grouped
- zones = self.parse_env_zones()
- print(self.json_format_dict(self.inventory,
- pretty=self.args.pretty))
- sys.exit(0)
-
- def get_config(self):
- """
- Reads the settings from the gce.ini file.
-
- Populates a ConfigParser object with defaults and
- attempts to read an .ini-style configuration from the filename
- specified in GCE_INI_PATH. If the environment variable is
- not present, the filename defaults to gce.ini in the current
- working directory.
- """
- gce_ini_default_path = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "gce.ini")
- gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
-
- # Create a ConfigParser.
- # This provides empty defaults to each key, so that environment
- # variable configuration (as opposed to INI configuration) is able
- # to work.
- config = configparser.ConfigParser(defaults={
- 'gce_service_account_email_address': '',
- 'gce_service_account_pem_file_path': '',
- 'gce_project_id': '',
- 'gce_zone': '',
- 'libcloud_secrets': '',
- 'instance_tags': '',
- 'inventory_ip_type': '',
- 'cache_path': '~/.ansible/tmp',
- 'cache_max_age': '300'
- })
- if 'gce' not in config.sections():
- config.add_section('gce')
- if 'inventory' not in config.sections():
- config.add_section('inventory')
- if 'cache' not in config.sections():
- config.add_section('cache')
-
- config.read(gce_ini_path)
-
- #########
- # Section added for processing ini settings
- #########
-
- # Set the instance_states filter based on config file options
- self.instance_states = []
- if config.has_option('gce', 'instance_states'):
- states = config.get('gce', 'instance_states')
- # Ignore if instance_states is an empty string.
- if states:
- self.instance_states = states.split(',')
-
- # Set the instance_tags filter, env var overrides config from file
- # and cli param overrides all
- if self.args.instance_tags:
- self.instance_tags = self.args.instance_tags
- else:
- self.instance_tags = os.environ.get(
- 'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
- if self.instance_tags:
- self.instance_tags = self.instance_tags.split(',')
-
- # Caching
- cache_path = config.get('cache', 'cache_path')
- cache_max_age = config.getint('cache', 'cache_max_age')
- # TOOD(supertom): support project-specific caches
- cache_name = 'ansible-gce.cache'
- self.cache = CloudInventoryCache(cache_path=cache_path,
- cache_max_age=cache_max_age,
- cache_name=cache_name)
- return config
-
- def get_inventory_options(self):
- """Determine inventory options. Environment variables always
- take precedence over configuration files."""
- ip_type = self.config.get('inventory', 'inventory_ip_type')
- # If the appropriate environment variables are set, they override
- # other configuration
- ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
- return ip_type
-
- def get_gce_drivers(self):
- """Determine the GCE authorization settings and return a list of
- libcloud drivers.
- """
- # Attempt to get GCE params from a configuration file, if one
- # exists.
- secrets_path = self.config.get('gce', 'libcloud_secrets')
- secrets_found = False
-
- try:
- import secrets
- args = list(secrets.GCE_PARAMS)
- kwargs = secrets.GCE_KEYWORD_PARAMS
- secrets_found = True
- except Exception:
- pass
-
- if not secrets_found and secrets_path:
- if not secrets_path.endswith('secrets.py'):
- err = "Must specify libcloud secrets file as "
- err += "/absolute/path/to/secrets.py"
- sys.exit(err)
- sys.path.append(os.path.dirname(secrets_path))
- try:
- import secrets
- args = list(getattr(secrets, 'GCE_PARAMS', []))
- kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
- secrets_found = True
- except Exception:
- pass
-
- if not secrets_found:
- args = [
- self.config.get('gce', 'gce_service_account_email_address'),
- self.config.get('gce', 'gce_service_account_pem_file_path')
- ]
- kwargs = {'project': self.config.get('gce', 'gce_project_id'),
- 'datacenter': self.config.get('gce', 'gce_zone')}
-
- # If the appropriate environment variables are set, they override
- # other configuration; process those into our args and kwargs.
- args[0] = os.environ.get('GCE_EMAIL', args[0])
- args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
- args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
-
- kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
- kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
-
- gce_drivers = []
- projects = kwargs['project'].split(',')
- for project in projects:
- kwargs['project'] = project
- gce = get_driver(Provider.GCE)(*args, **kwargs)
- gce.connection.user_agent_append(
- '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
- )
- gce_drivers.append(gce)
- return gce_drivers
-
- def parse_env_zones(self):
- '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
- If provided, this will be used to filter the results of the grouped_instances call'''
- import csv
- reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
- zones = [r for r in reader]
- return [z for z in zones[0]]
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(
- description='Produce an Ansible Inventory file based on GCE')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all information about an instance')
- parser.add_argument('--instance-tags', action='store',
- help='Only include instances with this tags, separated by comma')
- parser.add_argument('--pretty', action='store_true', default=False,
- help='Pretty format (default: False)')
- parser.add_argument(
- '--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests (default: False - use cache files)')
- self.args = parser.parse_args()
-
- def node_to_dict(self, inst):
- md = {}
-
- if inst is None:
- return {}
-
- if 'items' in inst.extra['metadata']:
- for entry in inst.extra['metadata']['items']:
- md[entry['key']] = entry['value']
-
- net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
- subnet = None
- if 'subnetwork' in inst.extra['networkInterfaces'][0]:
- subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
- # default to exernal IP unless user has specified they prefer internal
- if self.ip_type == 'internal':
- ssh_host = inst.private_ips[0]
- else:
- ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
-
- return {
- 'gce_uuid': inst.uuid,
- 'gce_id': inst.id,
- 'gce_image': inst.image,
- 'gce_machine_type': inst.size,
- 'gce_private_ip': inst.private_ips[0],
- 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
- 'gce_name': inst.name,
- 'gce_description': inst.extra['description'],
- 'gce_status': inst.extra['status'],
- 'gce_zone': inst.extra['zone'].name,
- 'gce_tags': inst.extra['tags'],
- 'gce_metadata': md,
- 'gce_network': net,
- 'gce_subnetwork': subnet,
- # Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': ssh_host
- }
-
- def load_inventory_from_cache(self):
- ''' Loads inventory from JSON on disk. '''
-
- try:
- self.inventory = self.cache.get_all_data_from_cache()
- hosts = self.inventory['_meta']['hostvars']
- except Exception as e:
- print(
- "Invalid inventory file %s. Please rebuild with -refresh-cache option."
- % (self.cache.cache_path_cache))
- raise
-
- def do_api_calls_update_cache(self):
- ''' Do API calls and save data in cache. '''
- zones = self.parse_env_zones()
- data = self.group_instances(zones)
- self.cache.write_to_cache(data)
- self.inventory = data
-
- def list_nodes(self):
- all_nodes = []
- params, more_results = {'maxResults': 500}, True
- while more_results:
- for driver in self.drivers:
- driver.connection.gce_params = params
- all_nodes.extend(driver.list_nodes())
- more_results = 'pageToken' in params
- return all_nodes
-
- def group_instances(self, zones=None):
- '''Group all instances'''
- groups = {}
- meta = {}
- meta["hostvars"] = {}
-
- for node in self.list_nodes():
-
- # This check filters on the desired instance states defined in the
- # config file with the instance_states config option.
- #
- # If the instance_states list is _empty_ then _ALL_ states are returned.
- #
- # If the instance_states list is _populated_ then check the current
- # state against the instance_states list
- if self.instance_states and not node.extra['status'] in self.instance_states:
- continue
-
- # This check filters on the desired instance tags defined in the
- # config file with the instance_tags config option, env var GCE_INSTANCE_TAGS,
- # or as the cli param --instance-tags.
- #
- # If the instance_tags list is _empty_ then _ALL_ instances are returned.
- #
- # If the instance_tags list is _populated_ then check the current
- # instance tags against the instance_tags list. If the instance has
- # at least one tag from the instance_tags list, it is returned.
- if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']):
- continue
-
- name = node.name
-
- meta["hostvars"][name] = self.node_to_dict(node)
-
- zone = node.extra['zone'].name
-
- # To avoid making multiple requests per zone
- # we list all nodes and then filter the results
- if zones and zone not in zones:
- continue
-
- if zone in groups:
- groups[zone].append(name)
- else:
- groups[zone] = [name]
-
- tags = node.extra['tags']
- for t in tags:
- if t.startswith('group-'):
- tag = t[6:]
- else:
- tag = 'tag_%s' % t
- if tag in groups:
- groups[tag].append(name)
- else:
- groups[tag] = [name]
-
- net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
- net = 'network_%s' % net
- if net in groups:
- groups[net].append(name)
- else:
- groups[net] = [name]
-
- machine_type = node.size
- if machine_type in groups:
- groups[machine_type].append(name)
- else:
- groups[machine_type] = [name]
-
- image = node.image or 'persistent_disk'
- if image in groups:
- groups[image].append(name)
- else:
- groups[image] = [name]
-
- status = node.extra['status']
- stat = 'status_%s' % status.lower()
- if stat in groups:
- groups[stat].append(name)
- else:
- groups[stat] = [name]
-
- for private_ip in node.private_ips:
- groups[private_ip] = [name]
-
- if len(node.public_ips) >= 1:
- for public_ip in node.public_ips:
- groups[public_ip] = [name]
-
- groups["_meta"] = meta
-
- return groups
-
- def json_format_dict(self, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-# Run the script
-if __name__ == '__main__':
- GceInventory()
diff --git a/contrib/inventory/infoblox.py b/contrib/inventory/infoblox.py
deleted file mode 100755
index 7eb4a6a418..0000000000
--- a/contrib/inventory/infoblox.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python
-#
-# (c) 2018, Red Hat, Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-import os
-import sys
-import json
-import argparse
-
-from ansible.parsing.dataloader import DataLoader
-from ansible.module_utils.six import iteritems
-from ansible.module_utils._text import to_text
-from ansible.module_utils.net_tools.nios.api import WapiInventory
-from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
-
-
-CONFIG_FILES = [
- '/etc/ansible/infoblox.yaml',
- '/etc/ansible/infoblox.yml'
-]
-
-
-def parse_args():
- parser = argparse.ArgumentParser()
-
- parser.add_argument('--list', action='store_true',
- help='List host records from NIOS for use in Ansible')
-
- parser.add_argument('--host',
- help='List meta data about single host (not used)')
-
- return parser.parse_args()
-
-
-def main():
- args = parse_args()
-
- for config_file in CONFIG_FILES:
- if os.path.exists(config_file):
- break
- else:
- sys.stdout.write('unable to locate config file at /etc/ansible/infoblox.yaml\n')
- sys.exit(-1)
-
- try:
- loader = DataLoader()
- config = loader.load_from_file(config_file)
- provider = config.get('provider') or {}
- wapi = WapiInventory(provider)
- except Exception as exc:
- sys.stdout.write(to_text(exc))
- sys.exit(-1)
-
- if args.host:
- host_filter = {'name': args.host}
- else:
- host_filter = {}
-
- config_filters = config.get('filters')
-
- if config_filters.get('view') is not None:
- host_filter['view'] = config_filters['view']
-
- if config_filters.get('extattrs'):
- extattrs = normalize_extattrs(config_filters['extattrs'])
- else:
- extattrs = {}
-
- hostvars = {}
- inventory = {
- '_meta': {
- 'hostvars': hostvars
- }
- }
-
- return_fields = ['name', 'view', 'extattrs', 'ipv4addrs']
-
- hosts = wapi.get_object('record:host',
- host_filter,
- extattrs=extattrs,
- return_fields=return_fields)
-
- if hosts:
- for item in hosts:
- view = item['view']
- name = item['name']
-
- if view not in inventory:
- inventory[view] = {'hosts': []}
-
- inventory[view]['hosts'].append(name)
-
- hostvars[name] = {
- 'view': view
- }
-
- if item.get('extattrs'):
- for key, value in iteritems(flatten_extattrs(item['extattrs'])):
- if key.startswith('ansible_'):
- hostvars[name][key] = value
- else:
- if 'extattrs' not in hostvars[name]:
- hostvars[name]['extattrs'] = {}
- hostvars[name]['extattrs'][key] = value
-
- sys.stdout.write(json.dumps(inventory, indent=4))
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/contrib/inventory/infoblox.yaml b/contrib/inventory/infoblox.yaml
deleted file mode 100644
index c1be5324ac..0000000000
--- a/contrib/inventory/infoblox.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-# This file provides the configuration information for the Infoblox dynamic
-# inventory script that is used to dynamically pull host information from NIOS.
-# This file should be copied to /etc/ansible/infoblox.yaml in order for the
-# dynamic script to find it.
-
-# Sets the provider arguments for authenticating to the Infoblox server to
-# retrieve inventory hosts. Provider arguments can also be set using
-# environment variables. Supported environment variables all start with
-# INFOBLOX_{{ name }}. For instance, to set the host provider value, the
-# environment variable would be INFOBLOX_HOST.
-provider:
- host: <SERVER_IP>
- username: <USERNAME>
- password: <PASSWORD>
-
-# Filters allow the dynamic inventory script to restrict the set of hosts that
-# are returned from the Infoblox server.
-filters:
- # restrict returned hosts by extensible attributes
- extattrs: {}
-
- # restrict returned hosts to a specified DNS view
- view: null
diff --git a/contrib/inventory/jail.py b/contrib/inventory/jail.py
deleted file mode 100755
index 9a2ccf18fe..0000000000
--- a/contrib/inventory/jail.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import Popen, PIPE
-import sys
-import json
-
-result = {}
-result['all'] = {}
-
-pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
-result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
-result['all']['vars'] = {}
-result['all']['vars']['ansible_connection'] = 'jail'
-
-if len(sys.argv) == 2 and sys.argv[1] == '--list':
- print(json.dumps(result))
-elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print(json.dumps({'ansible_connection': 'jail'}))
-else:
- sys.stderr.write("Need an argument, either --list or --host <host>\n")
diff --git a/contrib/inventory/landscape.py b/contrib/inventory/landscape.py
deleted file mode 100755
index 9aa660bef8..0000000000
--- a/contrib/inventory/landscape.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Dynamic inventory script which lets you use nodes discovered by Canonical's
-# Landscape (http://www.ubuntu.com/management/landscape-features).
-#
-# Requires the `landscape_api` Python module
-# See:
-# - https://landscape.canonical.com/static/doc/api/api-client-package.html
-# - https://landscape.canonical.com/static/doc/api/python-api.html
-#
-# Environment variables
-# ---------------------
-# - `LANDSCAPE_API_URI`
-# - `LANDSCAPE_API_KEY`
-# - `LANDSCAPE_API_SECRET`
-# - `LANDSCAPE_API_SSL_CA_FILE` (optional)
-
-
-import argparse
-import collections
-import os
-import sys
-
-from landscape_api.base import API, HTTPError
-
-import json
-
-_key = 'landscape'
-
-
-class EnvironmentConfig(object):
- uri = os.getenv('LANDSCAPE_API_URI')
- access_key = os.getenv('LANDSCAPE_API_KEY')
- secret_key = os.getenv('LANDSCAPE_API_SECRET')
- ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE')
-
-
-def _landscape_client():
- env = EnvironmentConfig()
- return API(
- uri=env.uri,
- access_key=env.access_key,
- secret_key=env.secret_key,
- ssl_ca_file=env.ssl_ca_file)
-
-
-def get_landscape_members_data():
- return _landscape_client().get_computers()
-
-
-def get_nodes(data):
- return [node['hostname'] for node in data]
-
-
-def get_groups(data):
- groups = collections.defaultdict(list)
-
- for node in data:
- for value in node['tags']:
- groups[value].append(node['hostname'])
-
- return groups
-
-
-def get_meta(data):
- meta = {'hostvars': {}}
- for node in data:
- meta['hostvars'][node['hostname']] = {'tags': node['tags']}
- return meta
-
-
-def print_list():
- data = get_landscape_members_data()
- nodes = get_nodes(data)
- groups = get_groups(data)
- meta = get_meta(data)
- inventory_data = {_key: nodes, '_meta': meta}
- inventory_data.update(groups)
- print(json.dumps(inventory_data))
-
-
-def print_host(host):
- data = get_landscape_members_data()
- meta = get_meta(data)
- print(json.dumps(meta['hostvars'][host]))
-
-
-def get_args(args_list):
- parser = argparse.ArgumentParser(
- description='ansible inventory script reading from landscape cluster')
- mutex_group = parser.add_mutually_exclusive_group(required=True)
- help_list = 'list all hosts from landscape cluster'
- mutex_group.add_argument('--list', action='store_true', help=help_list)
- help_host = 'display variables for a host'
- mutex_group.add_argument('--host', help=help_host)
- return parser.parse_args(args_list)
-
-
-def main(args_list):
- args = get_args(args_list)
- if args.list:
- print_list()
- if args.host:
- print_host(args.host)
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
diff --git a/contrib/inventory/libcloud.ini b/contrib/inventory/libcloud.ini
deleted file mode 100644
index 7592c41cd0..0000000000
--- a/contrib/inventory/libcloud.ini
+++ /dev/null
@@ -1,15 +0,0 @@
-# Ansible Apache Libcloud Generic inventory script
-
-[driver]
-provider = CLOUDSTACK
-host =
-path =
-secure = True
-verify_ssl_cert = True
-
-key =
-secret =
-
-[cache]
-cache_path=/path/to/your/cache
-cache_max_age=60
diff --git a/contrib/inventory/libvirt_lxc.py b/contrib/inventory/libvirt_lxc.py
deleted file mode 100755
index c0d84dbaaf..0000000000
--- a/contrib/inventory/libvirt_lxc.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import Popen, PIPE
-import sys
-import json
-
-result = {}
-result['all'] = {}
-
-pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True)
-result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
-result['all']['vars'] = {}
-result['all']['vars']['ansible_connection'] = 'libvirt_lxc'
-
-if len(sys.argv) == 2 and sys.argv[1] == '--list':
- print(json.dumps(result))
-elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print(json.dumps({'ansible_connection': 'libvirt_lxc'}))
-else:
- sys.stderr.write("Need an argument, either --list or --host <host>\n")
diff --git a/contrib/inventory/linode.ini b/contrib/inventory/linode.ini
deleted file mode 100644
index c925d970e9..0000000000
--- a/contrib/inventory/linode.ini
+++ /dev/null
@@ -1,18 +0,0 @@
-# Ansible Linode external inventory script settings
-#
-
-[linode]
-
-# API calls to Linode are slow. For this reason, we cache the results of an API
-# call. Set this to the path you want cache files to be written to. Two files
-# will be written to this directory:
-# - ansible-Linode.cache
-# - ansible-Linode.index
-cache_path = /tmp
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-cache_max_age = 300
-
-# If set to true use the hosts public ip in the dictionary instead of the label
-use_public_ip = false \ No newline at end of file
diff --git a/contrib/inventory/linode.py b/contrib/inventory/linode.py
deleted file mode 100755
index f4ae302816..0000000000
--- a/contrib/inventory/linode.py
+++ /dev/null
@@ -1,348 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Linode external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API request to
-Linode using the Chube library.
-
-NOTE: This script assumes Ansible is being executed where Chube is already
-installed and has a valid config at ~/.chube. If not, run:
-
- pip install chube
- echo -e "---\napi_key: <YOUR API KEY GOES HERE>" > ~/.chube
-
-For more details, see: https://github.com/exosite/chube
-
-NOTE: By default, this script also assumes that the Linodes in your account all have
-labels that correspond to hostnames that are in your resolver search path.
-Your resolver search path resides in /etc/hosts.
-Optionally, if you would like to use the hosts public IP instead of it's label use
-the following setting in linode.ini:
-
- use_public_ip = true
-
-When run against a specific host, this script returns the following variables:
-
- - api_id
- - datacenter_id
- - datacenter_city (lowercase city name of data center, e.g. 'tokyo')
- - label
- - display_group
- - create_dt
- - total_hd
- - total_xfer
- - total_ram
- - status
- - public_ip (The first public IP found)
- - private_ip (The first private IP found, or empty string if none)
- - alert_cpu_enabled
- - alert_cpu_threshold
- - alert_diskio_enabled
- - alert_diskio_threshold
- - alert_bwin_enabled
- - alert_bwin_threshold
- - alert_bwout_enabled
- - alert_bwout_threshold
- - alert_bwquota_enabled
- - alert_bwquota_threshold
- - backup_weekly_daily
- - backup_window
- - watchdog
-
-Peter Sankauskas did most of the legwork here with his linode plugin; I
-just adapted that for Linode.
-'''
-
-# (c) 2013, Dan Slimmon
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-# Standard imports
-import os
-import re
-import sys
-import argparse
-from time import time
-
-import json
-
-try:
- from chube import load_chube_config
- from chube import api as chube_api
- from chube.datacenter import Datacenter
- from chube.linode_obj import Linode
-except Exception:
- try:
- # remove local paths and other stuff that may
- # cause an import conflict, as chube is sensitive
- # to name collisions on importing
- old_path = sys.path
- sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))]
-
- from chube import load_chube_config
- from chube import api as chube_api
- from chube.datacenter import Datacenter
- from chube.linode_obj import Linode
-
- sys.path = old_path
- except Exception as e:
- raise Exception("could not import chube")
-
-load_chube_config()
-
-# Imports for ansible
-from ansible.module_utils.six.moves import configparser as ConfigParser
-
-
-class LinodeInventory(object):
- def _empty_inventory(self):
- return {"_meta": {"hostvars": {}}}
-
- def __init__(self):
- """Main execution path."""
- # Inventory grouped by display group
- self.inventory = self._empty_inventory()
- # Index of label to Linode ID
- self.index = {}
- # Local cache of Datacenter objects populated by populate_datacenter_cache()
- self._datacenter_cache = None
-
- # Read settings and parse CLI arguments
- self.read_settings()
- self.parse_cli_args()
-
- # Cache
- if self.args.refresh_cache:
- self.do_api_calls_update_cache()
- elif not self.is_cache_valid():
- self.do_api_calls_update_cache()
-
- # Data to print
- if self.args.host:
- data_to_print = self.get_host_info()
- elif self.args.list:
- # Display list of nodes for inventory
- if len(self.inventory) == 1:
- data_to_print = self.get_inventory_from_cache()
- else:
- data_to_print = self.json_format_dict(self.inventory, True)
-
- print(data_to_print)
-
- def is_cache_valid(self):
- """Determines if the cache file has expired, or if it is still valid."""
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if os.path.isfile(self.cache_path_index):
- return True
- return False
-
- def read_settings(self):
- """Reads the settings from the .ini file."""
- config = ConfigParser.SafeConfigParser()
- config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini')
-
- # Cache related
- cache_path = config.get('linode', 'cache_path')
- self.cache_path_cache = cache_path + "/ansible-linode.cache"
- self.cache_path_index = cache_path + "/ansible-linode.index"
- self.cache_max_age = config.getint('linode', 'cache_max_age')
- self.use_public_ip = config.getboolean('linode', 'use_public_ip')
-
- def parse_cli_args(self):
- """Command line argument processing"""
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode')
- parser.add_argument('--list', action='store_true', default=True,
- help='List nodes (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all the variables about a specific node')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to Linode (default: False - use cache files)')
- self.args = parser.parse_args()
-
- def do_api_calls_update_cache(self):
- """Do API calls, and save data in cache files."""
- self.get_nodes()
- self.write_to_cache(self.inventory, self.cache_path_cache)
- self.write_to_cache(self.index, self.cache_path_index)
-
- def get_nodes(self):
- """Makes an Linode API call to get the list of nodes."""
- try:
- for node in Linode.search(status=Linode.STATUS_RUNNING):
- self.add_node(node)
- except chube_api.linode_api.ApiError as e:
- sys.exit("Looks like Linode's API is down:\n %s" % e)
-
- def get_node(self, linode_id):
- """Gets details about a specific node."""
- try:
- return Linode.find(api_id=linode_id)
- except chube_api.linode_api.ApiError as e:
- sys.exit("Looks like Linode's API is down:\n%s" % e)
-
- def populate_datacenter_cache(self):
- """Creates self._datacenter_cache, containing all Datacenters indexed by ID."""
- self._datacenter_cache = {}
- dcs = Datacenter.search()
- for dc in dcs:
- self._datacenter_cache[dc.api_id] = dc
-
- def get_datacenter_city(self, node):
- """Returns a the lowercase city name of the node's data center."""
- if self._datacenter_cache is None:
- self.populate_datacenter_cache()
- location = self._datacenter_cache[node.datacenter_id].location
- location = location.lower()
- location = location.split(",")[0]
- return location
-
- def add_node(self, node):
- """Adds an node to the inventory and index."""
- if self.use_public_ip:
- dest = self.get_node_public_ip(node)
- else:
- dest = node.label
-
- # Add to index
- self.index[dest] = node.api_id
-
- # Inventory: Group by node ID (always a group of 1)
- self.inventory[node.api_id] = [dest]
-
- # Inventory: Group by datacenter city
- self.push(self.inventory, self.get_datacenter_city(node), dest)
-
- # Inventory: Group by display group
- self.push(self.inventory, node.display_group, dest)
-
- # Inventory: Add a "linode" global tag group
- self.push(self.inventory, "linode", dest)
-
- # Add host info to hostvars
- self.inventory["_meta"]["hostvars"][dest] = self._get_host_info(node)
-
- def get_node_public_ip(self, node):
- """Returns a the public IP address of the node"""
- return [addr.address for addr in node.ipaddresses if addr.is_public][0]
-
- def get_host_info(self):
- """Get variables about a specific host."""
-
- if len(self.index) == 0:
- # Need to load index from cache
- self.load_index_from_cache()
-
- if self.args.host not in self.index:
- # try updating the cache
- self.do_api_calls_update_cache()
- if self.args.host not in self.index:
- # host might not exist anymore
- return self.json_format_dict({}, True)
-
- node_id = self.index[self.args.host]
- node = self.get_node(node_id)
-
- return self.json_format_dict(self._get_host_info(node), True)
-
- def _get_host_info(self, node):
- node_vars = {}
- for direct_attr in [
- "api_id",
- "datacenter_id",
- "label",
- "display_group",
- "create_dt",
- "total_hd",
- "total_xfer",
- "total_ram",
- "status",
- "alert_cpu_enabled",
- "alert_cpu_threshold",
- "alert_diskio_enabled",
- "alert_diskio_threshold",
- "alert_bwin_enabled",
- "alert_bwin_threshold",
- "alert_bwout_enabled",
- "alert_bwout_threshold",
- "alert_bwquota_enabled",
- "alert_bwquota_threshold",
- "backup_weekly_daily",
- "backup_window",
- "watchdog"
- ]:
- node_vars[direct_attr] = getattr(node, direct_attr)
-
- node_vars["datacenter_city"] = self.get_datacenter_city(node)
- node_vars["public_ip"] = self.get_node_public_ip(node)
-
- # Set the SSH host information, so these inventory items can be used if
- # their labels aren't FQDNs
- node_vars['ansible_ssh_host'] = node_vars["public_ip"]
- node_vars['ansible_host'] = node_vars["public_ip"]
-
- private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public]
-
- if private_ips:
- node_vars["private_ip"] = private_ips[0]
-
- return node_vars
-
- def push(self, my_dict, key, element):
- """Pushed an element onto an array that may not have been defined in the dict."""
- if key in my_dict:
- my_dict[key].append(element)
- else:
- my_dict[key] = [element]
-
- def get_inventory_from_cache(self):
- """Reads the inventory from the cache file and returns it as a JSON object."""
- cache = open(self.cache_path_cache, 'r')
- json_inventory = cache.read()
- return json_inventory
-
- def load_index_from_cache(self):
- """Reads the index from the cache file and sets self.index."""
- cache = open(self.cache_path_index, 'r')
- json_index = cache.read()
- self.index = json.loads(json_index)
-
- def write_to_cache(self, data, filename):
- """Writes data in JSON format to a file."""
- json_data = self.json_format_dict(data, True)
- cache = open(filename, 'w')
- cache.write(json_data)
- cache.close()
-
- def to_safe(self, word):
- """Escapes any characters that would be invalid in an ansible group name."""
- return re.sub(r"[^A-Za-z0-9\-]", "_", word)
-
- def json_format_dict(self, data, pretty=False):
- """Converts a dict to a JSON object and dumps it as a formatted string."""
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-LinodeInventory()
diff --git a/contrib/inventory/lxc_inventory.py b/contrib/inventory/lxc_inventory.py
deleted file mode 100755
index 86a6b22c2d..0000000000
--- a/contrib/inventory/lxc_inventory.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-#
-# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH
-# <florian@hastexo.com>
-# Based in part on:
-# libvirt_lxc.py, (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-"""
-Ansible inventory script for LXC containers. Requires Python
-bindings for LXC API.
-
-In LXC, containers can be grouped by setting the lxc.group option,
-which may be found more than once in a container's
-configuration. So, we enumerate all containers, fetch their list
-of groups, and then build the dictionary in the way Ansible expects
-it.
-"""
-from __future__ import print_function
-
-import sys
-import lxc
-import json
-
-
-def build_dict():
- """Returns a dictionary keyed to the defined LXC groups. All
- containers, including the ones not in any group, are included in the
- "all" group."""
- # Enumerate all containers, and list the groups they are in. Also,
- # implicitly add every container to the 'all' group.
- containers = dict([(c,
- ['all'] +
- (lxc.Container(c).get_config_item('lxc.group') or []))
- for c in lxc.list_containers()])
-
- # Extract the groups, flatten the list, and remove duplicates
- groups = set(sum([g for g in containers.values()], []))
-
- # Create a dictionary for each group (including the 'all' group
- return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
- 'vars': {'ansible_connection': 'lxc'}}) for g in groups])
-
-
-def main(argv):
- """Returns a JSON dictionary as expected by Ansible"""
- result = build_dict()
- if len(argv) == 2 and argv[1] == '--list':
- json.dump(result, sys.stdout)
- elif len(argv) == 3 and argv[1] == '--host':
- json.dump({'ansible_connection': 'lxc'}, sys.stdout)
- else:
- print("Need an argument, either --list or --host <host>", file=sys.stderr)
-
-
-if __name__ == '__main__':
- main(sys.argv)
diff --git a/contrib/inventory/lxd.ini b/contrib/inventory/lxd.ini
deleted file mode 100644
index 5398e7d021..0000000000
--- a/contrib/inventory/lxd.ini
+++ /dev/null
@@ -1,13 +0,0 @@
-# LXD external inventory script settings
-
-[lxd]
-
-# The default resource
-#resource = local:
-
-# The group name to add the hosts to
-#group = lxd
-
-# The connection type to return for these hosts - lxd hasn't been tested yet
-#connection = lxd
-connection = smart
diff --git a/contrib/inventory/lxd.py b/contrib/inventory/lxd.py
deleted file mode 100755
index 2cb1354277..0000000000
--- a/contrib/inventory/lxd.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2013, Michael Scherer <misc@zarb.org>
-# (c) 2014, Hiroaki Nakamura <hnakamur@gmail.com>
-# (c) 2016, Andew Clarke <andrew@oscailte.org>
-#
-# This file is based on https://github.com/ansible/ansible/blob/devel/plugins/inventory/libvirt_lxc.py which is part of Ansible,
-# and https://github.com/hnakamur/lxc-ansible-playbooks/blob/master/provisioning/inventory-lxc.py
-#
-# NOTE, this file has some obvious limitations, improvements welcome
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from subprocess import Popen, PIPE
-import distutils.spawn
-import sys
-import json
-
-from ansible.module_utils.six.moves import configparser
-
-# Set up defaults
-resource = 'local:'
-group = 'lxd'
-connection = 'lxd'
-hosts = {}
-result = {}
-
-# Read the settings from the lxd.ini file
-config = configparser.SafeConfigParser()
-config.read(os.path.dirname(os.path.realpath(__file__)) + '/lxd.ini')
-if config.has_option('lxd', 'resource'):
- resource = config.get('lxd', 'resource')
-if config.has_option('lxd', 'group'):
- group = config.get('lxd', 'group')
-if config.has_option('lxd', 'connection'):
- connection = config.get('lxd', 'connection')
-
-# Ensure executable exists
-if distutils.spawn.find_executable('lxc'):
-
- # Set up containers result and hosts array
- result[group] = {}
- result[group]['hosts'] = []
-
- # Run the command and load json result
- pipe = Popen(['lxc', 'list', resource, '--format', 'json'], stdout=PIPE, universal_newlines=True)
- lxdjson = json.load(pipe.stdout)
-
- # Iterate the json lxd output
- for item in lxdjson:
-
- # Check state and network
- if 'state' in item and item['state'] is not None and 'network' in item['state']:
- network = item['state']['network']
-
- # Check for eth0 and addresses
- if 'eth0' in network and 'addresses' in network['eth0']:
- addresses = network['eth0']['addresses']
-
- # Iterate addresses
- for address in addresses:
-
- # Only return inet family addresses
- if 'family' in address and address['family'] == 'inet':
- if 'address' in address:
- ip = address['address']
- name = item['name']
-
- # Add the host to the results and the host array
- result[group]['hosts'].append(name)
- hosts[name] = ip
-
- # Set the other containers result values
- result[group]['vars'] = {}
- result[group]['vars']['ansible_connection'] = connection
-
-# Process arguments
-if len(sys.argv) == 2 and sys.argv[1] == '--list':
- print(json.dumps(result))
-elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- if sys.argv[2] == 'localhost':
- print(json.dumps({'ansible_connection': 'local'}))
- else:
- if connection == 'lxd':
- print(json.dumps({'ansible_connection': connection}))
- else:
- print(json.dumps({'ansible_connection': connection, 'ansible_host': hosts[sys.argv[2]]}))
-else:
- print("Need an argument, either --list or --host <host>")
diff --git a/contrib/inventory/mdt.ini b/contrib/inventory/mdt.ini
deleted file mode 100644
index c401c0ce17..0000000000
--- a/contrib/inventory/mdt.ini
+++ /dev/null
@@ -1,17 +0,0 @@
-[mdt]
-
-# Set the MDT server to connect to
-server = localhost.example.com
-
-# Set the MDT Instance
-instance = EXAMPLEINSTANCE
-
-# Set the MDT database
-database = MDTDB
-
-# Configure login credentials
-user = local.domain\admin
-password = adminpassword
-
-[tower]
-groupname = mdt
diff --git a/contrib/inventory/mdt_dynamic_inventory.py b/contrib/inventory/mdt_dynamic_inventory.py
deleted file mode 100755
index 154aac4f8d..0000000000
--- a/contrib/inventory/mdt_dynamic_inventory.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2016, Julian Barnett <jbarnett@tableau.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-MDT external inventory script
-=================================
-author: J Barnett 06/23/2016 01:15
-maintainer: J Barnett (github @jbarnett1981)
-'''
-
-import argparse
-import json
-import pymssql
-from ansible.module_utils.six.moves import configparser
-
-
-class MDTInventory(object):
-
- def __init__(self):
- ''' Main execution path '''
- self.conn = None
-
- # Initialize empty inventory
- self.inventory = self._empty_inventory()
-
- # Read CLI arguments
- self.read_settings()
- self.parse_cli_args()
-
- # Get Hosts
- if self.args.list:
- self.get_hosts()
-
- # Get specific host vars
- if self.args.host:
- self.get_hosts(self.args.host)
-
- def _connect(self, query):
- '''
- Connect to MDT and dump contents of dbo.ComputerIdentity database
- '''
- if not self.conn:
- self.conn = pymssql.connect(server=self.mdt_server + "\\" + self.mdt_instance, user=self.mdt_user, password=self.mdt_password,
- database=self.mdt_database)
- cursor = self.conn.cursor()
- cursor.execute(query)
- self.mdt_dump = cursor.fetchall()
- self.conn.close()
-
- def get_hosts(self, hostname=False):
- '''
- Gets host from MDT Database
- '''
- if hostname:
- query = ("SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role "
- "FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID where t1.Description = '%s'" % hostname)
- else:
- query = 'SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID'
- self._connect(query)
-
- # Configure to group name configured in Ansible Tower for this inventory
- groupname = self.mdt_groupname
-
- # Initialize empty host list
- hostlist = []
-
- # Parse through db dump and populate inventory
- for hosts in self.mdt_dump:
- self.inventory['_meta']['hostvars'][hosts[1]] = {'id': hosts[0], 'name': hosts[1], 'mac': hosts[2], 'role': hosts[3]}
- hostlist.append(hosts[1])
- self.inventory[groupname] = hostlist
-
- # Print it all out
- print(json.dumps(self.inventory, indent=2))
-
- def _empty_inventory(self):
- '''
- Create empty inventory dictionary
- '''
- return {"_meta": {"hostvars": {}}}
-
- def read_settings(self):
- '''
- Reads the settings from the mdt.ini file
- '''
- config = configparser.SafeConfigParser()
- config.read('mdt.ini')
-
- # MDT Server and instance and database
- self.mdt_server = config.get('mdt', 'server')
- self.mdt_instance = config.get('mdt', 'instance')
- self.mdt_database = config.get('mdt', 'database')
-
- # MDT Login credentials
- if config.has_option('mdt', 'user'):
- self.mdt_user = config.get('mdt', 'user')
- if config.has_option('mdt', 'password'):
- self.mdt_password = config.get('mdt', 'password')
-
- # Group name in Tower
- if config.has_option('tower', 'groupname'):
- self.mdt_groupname = config.get('tower', 'groupname')
-
- def parse_cli_args(self):
- '''
- Command line argument processing
- '''
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on MDT')
- parser.add_argument('--list', action='store_true', default=False, help='List instances')
- parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
- self.args = parser.parse_args()
-
-
-if __name__ == "__main__":
- # Run the script
- MDTInventory()
diff --git a/contrib/inventory/nagios_livestatus.ini b/contrib/inventory/nagios_livestatus.ini
deleted file mode 100644
index 320f11f35c..0000000000
--- a/contrib/inventory/nagios_livestatus.ini
+++ /dev/null
@@ -1,41 +0,0 @@
-# Ansible Nagios external inventory script settings
-#
-# To get all available possibilities, check following URL:
-# http://www.naemon.org/documentation/usersguide/livestatus.html
-# https://mathias-kettner.de/checkmk_livestatus.html
-#
-
-[local]
-# Livestatus URI
-# Example for default naemon livestatus unix socket :
-# livestatus_uri=unix:/var/cache/naemon/live
-
-[remote]
-
-# default field name for host: name
-# Uncomment to override:
-# host_field=address
-#
-# default field group for host: groups
-# Uncomment to override:
-# group_field=state
-# default fields retrieved: address, alias, display_name, children, parents
-# To override, uncomment the following line
-# fields_to_retrieve=address,alias,display_name
-#
-# default variable prefix: livestatus_
-# To override, uncomment the following line
-# var_prefix=naemon_
-#
-# default filter: None
-#
-# Uncomment to override
-#
-# All host with state = OK
-# host_filter=state = 0
-# Warning: for the moment, you can use only one filter at a time. You cannot combine various conditions.
-#
-# All host in groups Linux
-# host_filter=groups >= Linux
-#
-livestatus_uri=tcp:192.168.66.137:6557
diff --git a/contrib/inventory/nagios_livestatus.py b/contrib/inventory/nagios_livestatus.py
deleted file mode 100755
index 25c043b5c0..0000000000
--- a/contrib/inventory/nagios_livestatus.py
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2015, Yannig Perre <yannig.perre@gmail.com>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-Nagios livestatus inventory script. Before using this script, please
-update nagios_livestatus.ini file.
-
-Livestatus is a nagios/naemon/shinken module which let you retrieve
-informations stored in the monitoring core.
-
-This plugin inventory need livestatus API for python. Please install it
-before using this script (apt/pip/yum/...).
-
-Checkmk livestatus: https://mathias-kettner.de/checkmk_livestatus.html
-Livestatus API: http://www.naemon.org/documentation/usersguide/livestatus.html
-'''
-
-import os
-import re
-import argparse
-import sys
-
-from ansible.module_utils.six.moves import configparser
-import json
-
-try:
- from mk_livestatus import Socket
-except ImportError:
- sys.exit("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus")
-
-
-class NagiosLivestatusInventory(object):
-
- def parse_ini_file(self):
- config = configparser.SafeConfigParser()
- config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_livestatus.ini')
- for section in config.sections():
- if not config.has_option(section, 'livestatus_uri'):
- continue
-
- # If fields_to_retrieve is not set, using default fields
- fields_to_retrieve = self.default_fields_to_retrieve
- if config.has_option(section, 'fields_to_retrieve'):
- fields_to_retrieve = [field.strip() for field in config.get(section, 'fields_to_retrieve').split(',')]
- fields_to_retrieve = tuple(fields_to_retrieve)
-
- # default section values
- section_values = {
- 'var_prefix': 'livestatus_',
- 'host_filter': None,
- 'host_field': 'name',
- 'group_field': 'groups'
- }
- for key, value in section_values.items():
- if config.has_option(section, key):
- section_values[key] = config.get(section, key).strip()
-
- # Retrieving livestatus string connection
- livestatus_uri = config.get(section, 'livestatus_uri')
- backend_definition = None
-
- # Local unix socket
- unix_match = re.match('unix:(.*)', livestatus_uri)
- if unix_match is not None:
- backend_definition = {'connection': unix_match.group(1)}
-
- # Remote tcp connection
- tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri)
- if tcp_match is not None:
- backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))}
-
- # No valid livestatus_uri => exiting
- if backend_definition is None:
- raise Exception('livestatus_uri field is invalid (%s). Expected: unix:/path/to/live or tcp:host:port' % livestatus_uri)
-
- # Updating backend_definition with current value
- backend_definition['name'] = section
- backend_definition['fields'] = fields_to_retrieve
- for key, value in section_values.items():
- backend_definition[key] = value
-
- self.backends.append(backend_definition)
-
- def parse_options(self):
- parser = argparse.ArgumentParser()
- parser.add_argument('--host', nargs=1)
- parser.add_argument('--list', action='store_true')
- parser.add_argument('--pretty', action='store_true')
- self.options = parser.parse_args()
-
- def add_host(self, hostname, group):
- if group not in self.result:
- self.result[group] = {}
- self.result[group]['hosts'] = []
- if hostname not in self.result[group]['hosts']:
- self.result[group]['hosts'].append(hostname)
-
- def query_backend(self, backend, host=None):
- '''Query a livestatus backend'''
- hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field'])
-
- if backend['host_filter'] is not None:
- hosts_request = hosts_request.filter(backend['host_filter'])
-
- if host is not None:
- hosts_request = hosts_request.filter('name = ' + host[0])
-
- hosts_request._columns += backend['fields']
-
- hosts = hosts_request.call()
- for host in hosts:
- hostname = host[backend['host_field']]
- hostgroups = host[backend['group_field']]
- if not isinstance(hostgroups, list):
- hostgroups = [hostgroups]
- self.add_host(hostname, 'all')
- self.add_host(hostname, backend['name'])
- for group in hostgroups:
- self.add_host(hostname, group)
- for field in backend['fields']:
- var_name = backend['var_prefix'] + field
- if hostname not in self.result['_meta']['hostvars']:
- self.result['_meta']['hostvars'][hostname] = {}
- self.result['_meta']['hostvars'][hostname][var_name] = host[field]
-
- def __init__(self):
-
- self.defaultgroup = 'group_all'
- self.default_fields_to_retrieve = ('address', 'alias', 'display_name', 'childs', 'parents')
- self.backends = []
- self.options = None
-
- self.parse_ini_file()
- self.parse_options()
-
- self.result = {}
- self.result['_meta'] = {}
- self.result['_meta']['hostvars'] = {}
- self.json_indent = None
- if self.options.pretty:
- self.json_indent = 2
-
- if len(self.backends) == 0:
- sys.exit("Error: Livestatus configuration is missing. See nagios_livestatus.ini.")
-
- for backend in self.backends:
- self.query_backend(backend, self.options.host)
-
- if self.options.host:
- print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent))
- elif self.options.list:
- print(json.dumps(self.result, indent=self.json_indent))
- else:
- sys.exit("usage: --list or --host HOSTNAME [--pretty]")
-
-
-NagiosLivestatusInventory()
diff --git a/contrib/inventory/nagios_ndo.ini b/contrib/inventory/nagios_ndo.ini
deleted file mode 100644
index 1e133a29f3..0000000000
--- a/contrib/inventory/nagios_ndo.ini
+++ /dev/null
@@ -1,10 +0,0 @@
-# Ansible Nagios external inventory script settings
-#
-
-[ndo]
-# NDO database URI
-# Make sure that data is returned as strings and not bytes if using python 3.
-# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html
-# for supported databases and URI format.
-# Example for mysqlclient module :
-database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1
diff --git a/contrib/inventory/nagios_ndo.py b/contrib/inventory/nagios_ndo.py
deleted file mode 100755
index 0f89ede659..0000000000
--- a/contrib/inventory/nagios_ndo.py
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-Nagios NDO external inventory script.
-========================================
-
-Returns hosts and hostgroups from Nagios NDO.
-
-Configuration is read from `nagios_ndo.ini`.
-"""
-
-import os
-import argparse
-import sys
-from ansible.module_utils.six.moves import configparser
-import json
-
-try:
- from sqlalchemy import text
- from sqlalchemy.engine import create_engine
-except ImportError:
- sys.exit("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
-
-
-class NagiosNDOInventory(object):
-
- def read_settings(self):
- config = configparser.SafeConfigParser()
- config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini')
- if config.has_option('ndo', 'database_uri'):
- self.ndo_database_uri = config.get('ndo', 'database_uri')
-
- def read_cli(self):
- parser = argparse.ArgumentParser()
- parser.add_argument('--host', nargs=1)
- parser.add_argument('--list', action='store_true')
- self.options = parser.parse_args()
-
- def get_hosts(self):
- engine = create_engine(self.ndo_database_uri)
- connection = engine.connect()
- select_hosts = text("SELECT display_name \
- FROM nagios_hosts")
- select_hostgroups = text("SELECT alias \
- FROM nagios_hostgroups")
- select_hostgroup_hosts = text("SELECT h.display_name \
- FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \
- WHERE hgm.hostgroup_id = hg.hostgroup_id \
- AND hgm.host_object_id = h.host_object_id \
- AND hg.alias =:hostgroup_alias")
-
- hosts = connection.execute(select_hosts)
- self.result['all']['hosts'] = [host['display_name'] for host in hosts]
-
- for hostgroup in connection.execute(select_hostgroups):
- hostgroup_alias = hostgroup['alias']
- self.result[hostgroup_alias] = {}
- hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias)
- self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts]
-
- def __init__(self):
-
- self.defaultgroup = 'group_all'
- self.ndo_database_uri = None
- self.options = None
-
- self.read_settings()
- self.read_cli()
-
- self.result = {}
- self.result['all'] = {}
- self.result['all']['hosts'] = []
- self.result['_meta'] = {}
- self.result['_meta']['hostvars'] = {}
-
- if self.ndo_database_uri:
- self.get_hosts()
- if self.options.host:
- print(json.dumps({}))
- elif self.options.list:
- print(json.dumps(self.result))
- else:
- sys.exit("usage: --list or --host HOSTNAME")
- else:
- sys.exit("Error: Database configuration is missing. See nagios_ndo.ini.")
-
-
-NagiosNDOInventory()
diff --git a/contrib/inventory/nsot.py b/contrib/inventory/nsot.py
deleted file mode 100755
index 6b09704d3a..0000000000
--- a/contrib/inventory/nsot.py
+++ /dev/null
@@ -1,344 +0,0 @@
-#!/usr/bin/env python
-
-'''
-nsot
-====
-
-Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox
-
-Features
---------
-
-* Define host groups in form of NSoT device attribute criteria
-
-* All parameters defined by the spec as of 2015-09-05 are supported.
-
- + ``--list``: Returns JSON hash of host groups -> hosts and top-level
- ``_meta`` -> ``hostvars`` which correspond to all device attributes.
-
- Group vars can be specified in the YAML configuration, noted below.
-
- + ``--host <hostname>``: Returns JSON hash where every item is a device
- attribute.
-
-* In addition to all attributes assigned to resource being returned, script
- will also append ``site_id`` and ``id`` as facts to utilize.
-
-
-Configuration
-------------
-
-Since it'd be annoying and failure prone to guess where you're configuration
-file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it.
-
-This file should adhere to the YAML spec. All top-level variable must be
-desired Ansible group-name hashed with single 'query' item to define the NSoT
-attribute query.
-
-Queries follow the normal NSoT query syntax, `shown here`_
-
-.. _shown here: https://github.com/dropbox/pynsot#set-queries
-
-.. code:: yaml
-
- routers:
- query: 'deviceType=ROUTER'
- vars:
- a: b
- c: d
-
- juniper_fw:
- query: 'deviceType=FIREWALL manufacturer=JUNIPER'
-
- not_f10:
- query: '-manufacturer=FORCE10'
-
-The inventory will automatically use your ``.pynsotrc`` like normal pynsot from
-cli would, so make sure that's configured appropriately.
-
-.. note::
-
- Attributes I'm showing above are influenced from ones that the Trigger
- project likes. As is the spirit of NSoT, use whichever attributes work best
- for your workflow.
-
-If config file is blank or absent, the following default groups will be
-created:
-
-* ``routers``: deviceType=ROUTER
-* ``switches``: deviceType=SWITCH
-* ``firewalls``: deviceType=FIREWALL
-
-These are likely not useful for everyone so please use the configuration. :)
-
-.. note::
-
- By default, resources will only be returned for what your default
- site is set for in your ``~/.pynsotrc``.
-
- If you want to specify, add an extra key under the group for ``site: n``.
-
-Output Examples
----------------
-
-Here are some examples shown from just calling the command directly::
-
- $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.'
- {
- "routers": {
- "hosts": [
- "test1.example.com"
- ],
- "vars": {
- "cool_level": "very",
- "group": "routers"
- }
- },
- "firewalls": {
- "hosts": [
- "test2.example.com"
- ],
- "vars": {
- "cool_level": "enough",
- "group": "firewalls"
- }
- },
- "_meta": {
- "hostvars": {
- "test2.example.com": {
- "make": "SRX",
- "site_id": 1,
- "id": 108
- },
- "test1.example.com": {
- "make": "MX80",
- "site_id": 1,
- "id": 107
- }
- }
- },
- "rtr_and_fw": {
- "hosts": [
- "test1.example.com",
- "test2.example.com"
- ],
- "vars": {}
- }
- }
-
-
- $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.'
- {
- "make": "MX80",
- "site_id": 1,
- "id": 107
- }
-
-'''
-
-from __future__ import print_function
-import sys
-import os
-import pkg_resources
-import argparse
-import json
-import yaml
-from textwrap import dedent
-from pynsot.client import get_api_client
-from pynsot.app import HttpServerError
-from click.exceptions import UsageError
-
-from ansible.module_utils.six import string_types
-
-
-def warning(*objs):
- print("WARNING: ", *objs, file=sys.stderr)
-
-
-class NSoTInventory(object):
- '''NSoT Client object for gather inventory'''
-
- def __init__(self):
- self.config = dict()
- config_env = os.environ.get('NSOT_INVENTORY_CONFIG')
- if config_env:
- try:
- config_file = os.path.abspath(config_env)
- except IOError: # If file non-existent, use default config
- self._config_default()
- except Exception as e:
- sys.exit('%s\n' % e)
-
- with open(config_file) as f:
- try:
- self.config.update(yaml.safe_load(f))
- except TypeError: # If empty file, use default config
- warning('Empty config file')
- self._config_default()
- except Exception as e:
- sys.exit('%s\n' % e)
- else: # Use defaults if env var missing
- self._config_default()
- self.groups = self.config.keys()
- self.client = get_api_client()
- self._meta = {'hostvars': dict()}
-
- def _config_default(self):
- default_yaml = '''
- ---
- routers:
- query: deviceType=ROUTER
- switches:
- query: deviceType=SWITCH
- firewalls:
- query: deviceType=FIREWALL
- '''
- self.config = yaml.safe_load(dedent(default_yaml))
-
- def do_list(self):
- '''Direct callback for when ``--list`` is provided
-
- Relies on the configuration generated from init to run
- _inventory_group()
- '''
- inventory = dict()
- for group, contents in self.config.items():
- group_response = self._inventory_group(group, contents)
- inventory.update(group_response)
- inventory.update({'_meta': self._meta})
- return json.dumps(inventory)
-
- def do_host(self, host):
- return json.dumps(self._hostvars(host))
-
- def _hostvars(self, host):
- '''Return dictionary of all device attributes
-
- Depending on number of devices in NSoT, could be rather slow since this
- has to request every device resource to filter through
- '''
- device = [i for i in self.client.devices.get()
- if host in i['hostname']][0]
- attributes = device['attributes']
- attributes.update({'site_id': device['site_id'], 'id': device['id']})
- return attributes
-
- def _inventory_group(self, group, contents):
- '''Takes a group and returns inventory for it as dict
-
- :param group: Group name
- :type group: str
- :param contents: The contents of the group's YAML config
- :type contents: dict
-
- contents param should look like::
-
- {
- 'query': 'xx',
- 'vars':
- 'a': 'b'
- }
-
- Will return something like::
-
- { group: {
- hosts: [],
- vars: {},
- }
- '''
- query = contents.get('query')
- hostvars = contents.get('vars', dict())
- site = contents.get('site', dict())
- obj = {group: dict()}
- obj[group]['hosts'] = []
- obj[group]['vars'] = hostvars
- try:
- assert isinstance(query, string_types)
- except Exception:
- sys.exit('ERR: Group queries must be a single string\n'
- ' Group: %s\n'
- ' Query: %s\n' % (group, query)
- )
- try:
- if site:
- site = self.client.sites(site)
- devices = site.devices.query.get(query=query)
- else:
- devices = self.client.devices.query.get(query=query)
- except HttpServerError as e:
- if '500' in str(e.response):
- _site = 'Correct site id?'
- _attr = 'Queried attributes actually exist?'
- questions = _site + '\n' + _attr
- sys.exit('ERR: 500 from server.\n%s' % questions)
- else:
- raise
- except UsageError:
- sys.exit('ERR: Could not connect to server. Running?')
-
- # Would do a list comprehension here, but would like to save code/time
- # and also acquire attributes in this step
- for host in devices:
- # Iterate through each device that matches query, assign hostname
- # to the group's hosts array and then use this single iteration as
- # a chance to update self._meta which will be used in the final
- # return
- hostname = host['hostname']
- obj[group]['hosts'].append(hostname)
- attributes = host['attributes']
- attributes.update({'site_id': host['site_id'], 'id': host['id']})
- self._meta['hostvars'].update({hostname: attributes})
-
- return obj
-
-
-def parse_args():
- desc = __doc__.splitlines()[4] # Just to avoid being redundant
-
- # Establish parser with options and error out if no action provided
- parser = argparse.ArgumentParser(
- description=desc,
- conflict_handler='resolve',
- )
-
- # Arguments
- #
- # Currently accepting (--list | -l) and (--host | -h)
- # These must not be allowed together
- parser.add_argument(
- '--list', '-l',
- help='Print JSON object containing hosts to STDOUT',
- action='store_true',
- dest='list_', # Avoiding syntax highlighting for list
- )
-
- parser.add_argument(
- '--host', '-h',
- help='Print JSON object containing hostvars for <host>',
- action='store',
- )
- args = parser.parse_args()
-
- if not args.list_ and not args.host: # Require at least one option
- parser.exit(status=1, message='No action requested')
-
- if args.list_ and args.host: # Do not allow multiple options
- parser.exit(status=1, message='Too many actions requested')
-
- return args
-
-
-def main():
- '''Set up argument handling and callback routing'''
- args = parse_args()
- client = NSoTInventory()
-
- # Callback condition
- if args.list_:
- print(client.do_list())
- elif args.host:
- print(client.do_host(args.host))
-
-
-if __name__ == '__main__':
- main()
diff --git a/contrib/inventory/nsot.yaml b/contrib/inventory/nsot.yaml
deleted file mode 100644
index ebddbc8234..0000000000
--- a/contrib/inventory/nsot.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-juniper_routers:
- query: 'deviceType=ROUTER manufacturer=JUNIPER'
- vars:
- group: juniper_routers
- netconf: true
- os: junos
-
-cisco_asa:
- query: 'manufacturer=CISCO deviceType=FIREWALL'
- vars:
- group: cisco_asa
- routed_vpn: false
- stateful: true
-
-old_cisco_asa:
- query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+'
- vars:
- old_nat: true
-
-not_f10:
- query: '-manufacturer=FORCE10'
diff --git a/contrib/inventory/openshift.py b/contrib/inventory/openshift.py
deleted file mode 100755
index c0aa4f1b89..0000000000
--- a/contrib/inventory/openshift.py
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-inventory: openshift
-short_description: Openshift gears external inventory script
-description:
- - Generates inventory of Openshift gears using the REST interface
- - this permit to reuse playbook to setup an Openshift gear
-version_added: None
-author: Michael Scherer
-'''
-
-import json
-import os
-import os.path
-import sys
-import StringIO
-
-from ansible.module_utils.urls import open_url
-from ansible.module_utils.six.moves import configparser as ConfigParser
-
-configparser = None
-
-
-def get_from_rhc_config(variable):
- global configparser
- CONF_FILE = os.path.expanduser('~/.openshift/express.conf')
- if os.path.exists(CONF_FILE):
- if not configparser:
- ini_str = '[root]\n' + open(CONF_FILE, 'r').read()
- configparser = ConfigParser.SafeConfigParser()
- configparser.readfp(StringIO.StringIO(ini_str))
- try:
- return configparser.get('root', variable)
- except ConfigParser.NoOptionError:
- return None
-
-
-def get_config(env_var, config_var):
- result = os.getenv(env_var)
- if not result:
- result = get_from_rhc_config(config_var)
- if not result:
- sys.exit("failed=True msg='missing %s'" % env_var)
- return result
-
-
-def get_json_from_api(url, username, password):
- headers = {'Accept': 'application/json; version=1.5'}
- response = open_url(url, headers=headers, url_username=username, url_password=password)
- return json.loads(response.read())['data']
-
-
-username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin')
-password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password')
-broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server')
-
-
-response = get_json_from_api(broker_url + '/domains', username, password)
-
-response = get_json_from_api("%s/domains/%s/applications" %
- (broker_url, response[0]['id']), username, password)
-
-result = {}
-for app in response:
-
- # ssh://520311404832ce3e570000ff@blog-johndoe.example.org
- (user, host) = app['ssh_url'][6:].split('@')
- app_name = host.split('-')[0]
-
- result[app_name] = {}
- result[app_name]['hosts'] = []
- result[app_name]['hosts'].append(host)
- result[app_name]['vars'] = {}
- result[app_name]['vars']['ansible_ssh_user'] = user
-
-if len(sys.argv) == 2 and sys.argv[1] == '--list':
- print(json.dumps(result))
-elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print(json.dumps({}))
-else:
- print("Need an argument, either --list or --host <host>")
diff --git a/contrib/inventory/openstack.yml b/contrib/inventory/openstack.yml
deleted file mode 100644
index 8053fb8fda..0000000000
--- a/contrib/inventory/openstack.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-clouds:
- vexxhost:
- profile: vexxhost
- auth:
- project_name: 39e296b2-fc96-42bf-8091-cb742fa13da9
- username: fb886a9b-c37b-442a-9be3-964bed961e04
- password: fantastic-password1
- rax:
- profile: rackspace
- auth:
- username: example
- password: spectacular-password
- project_id: 2352426
- region_name: DFW,ORD,IAD
- devstack:
- auth:
- auth_url: https://devstack.example.com
- username: stack
- password: stack
- project_name: stack
-ansible:
- use_hostnames: True
- expand_hostvars: False
- fail_on_errors: True
diff --git a/contrib/inventory/openstack_inventory.py b/contrib/inventory/openstack_inventory.py
deleted file mode 100755
index ab2d96cb8b..0000000000
--- a/contrib/inventory/openstack_inventory.py
+++ /dev/null
@@ -1,272 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
-# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
-# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2016, Rackspace Australia
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-
-# The OpenStack Inventory module uses os-client-config for configuration.
-# https://github.com/openstack/os-client-config
-# This means it will either:
-# - Respect normal OS_* environment variables like other OpenStack tools
-# - Read values from a clouds.yaml file.
-# If you want to configure via clouds.yaml, you can put the file in:
-# - Current directory
-# - ~/.config/openstack/clouds.yaml
-# - /etc/openstack/clouds.yaml
-# - /etc/ansible/openstack.yml
-# The clouds.yaml file can contain entries for multiple clouds and multiple
-# regions of those clouds. If it does, this inventory module will by default
-# connect to all of them and present them as one contiguous inventory. You
-# can limit to one cloud by passing the `--cloud` parameter, or use the
-# OS_CLOUD environment variable. If caching is enabled, and a cloud is
-# selected, then per-cloud cache folders will be used.
-#
-# See the adjacent openstack.yml file for an example config file
-# There are two ansible inventory specific options that can be set in
-# the inventory section.
-# expand_hostvars controls whether or not the inventory will make extra API
-# calls to fill out additional information about each server
-# use_hostnames changes the behavior from registering every host with its UUID
-# and making a group of its hostname to only doing this if the
-# hostname in question has more than one server
-# fail_on_errors causes the inventory to fail and return no hosts if one cloud
-# has failed (for example, bad credentials or being offline).
-# When set to False, the inventory will return hosts from
-# whichever other clouds it can contact. (Default: True)
-#
-# Also it is possible to pass the correct user by setting an ansible_user: $myuser
-# metadata attribute.
-
-import argparse
-import collections
-import os
-import sys
-import time
-from distutils.version import StrictVersion
-from io import StringIO
-
-import json
-
-import openstack as sdk
-from openstack.cloud import inventory as sdk_inventory
-from openstack.config import loader as cloud_config
-
-CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
-
-
-def get_groups_from_server(server_vars, namegroup=True):
- groups = []
-
- region = server_vars['region']
- cloud = server_vars['cloud']
- metadata = server_vars.get('metadata', {})
-
- # Create a group for the cloud
- groups.append(cloud)
-
- # Create a group on region
- if region:
- groups.append(region)
-
- # And one by cloud_region
- groups.append("%s_%s" % (cloud, region))
-
- # Check if group metadata key in servers' metadata
- if 'group' in metadata:
- groups.append(metadata['group'])
-
- for extra_group in metadata.get('groups', '').split(','):
- if extra_group:
- groups.append(extra_group.strip())
-
- groups.append('instance-%s' % server_vars['id'])
- if namegroup:
- groups.append(server_vars['name'])
-
- for key in ('flavor', 'image'):
- if 'name' in server_vars[key]:
- groups.append('%s-%s' % (key, server_vars[key]['name']))
-
- for key, value in iter(metadata.items()):
- groups.append('meta-%s_%s' % (key, value))
-
- az = server_vars.get('az', None)
- if az:
- # Make groups for az, region_az and cloud_region_az
- groups.append(az)
- groups.append('%s_%s' % (region, az))
- groups.append('%s_%s_%s' % (cloud, region, az))
- return groups
-
-
-def get_host_groups(inventory, refresh=False, cloud=None):
- (cache_file, cache_expiration_time) = get_cache_settings(cloud)
- if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
- groups = to_json(get_host_groups_from_cloud(inventory))
- with open(cache_file, 'w') as f:
- f.write(groups)
- else:
- with open(cache_file, 'r') as f:
- groups = f.read()
- return groups
-
-
-def append_hostvars(hostvars, groups, key, server, namegroup=False):
- hostvars[key] = dict(
- ansible_ssh_host=server['interface_ip'],
- ansible_host=server['interface_ip'],
- openstack=server)
-
- metadata = server.get('metadata', {})
- if 'ansible_user' in metadata:
- hostvars[key]['ansible_user'] = metadata['ansible_user']
-
- for group in get_groups_from_server(server, namegroup=namegroup):
- groups[group].append(key)
-
-
-def get_host_groups_from_cloud(inventory):
- groups = collections.defaultdict(list)
- firstpass = collections.defaultdict(list)
- hostvars = {}
- list_args = {}
- if hasattr(inventory, 'extra_config'):
- use_hostnames = inventory.extra_config['use_hostnames']
- list_args['expand'] = inventory.extra_config['expand_hostvars']
- if StrictVersion(sdk.version.__version__) >= StrictVersion("0.13.0"):
- list_args['fail_on_cloud_config'] = \
- inventory.extra_config['fail_on_errors']
- else:
- use_hostnames = False
-
- for server in inventory.list_hosts(**list_args):
-
- if 'interface_ip' not in server:
- continue
- firstpass[server['name']].append(server)
- for name, servers in firstpass.items():
- if len(servers) == 1 and use_hostnames:
- append_hostvars(hostvars, groups, name, servers[0])
- else:
- server_ids = set()
- # Trap for duplicate results
- for server in servers:
- server_ids.add(server['id'])
- if len(server_ids) == 1 and use_hostnames:
- append_hostvars(hostvars, groups, name, servers[0])
- else:
- for server in servers:
- append_hostvars(
- hostvars, groups, server['id'], server,
- namegroup=True)
- groups['_meta'] = {'hostvars': hostvars}
- return groups
-
-
-def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
- ''' Determines if cache file has expired, or if it is still valid '''
- if refresh:
- return True
- if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
- mod_time = os.path.getmtime(cache_file)
- current_time = time.time()
- if (mod_time + cache_expiration_time) > current_time:
- return False
- return True
-
-
-def get_cache_settings(cloud=None):
- config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
- if cloud:
- config = cloud_config.OpenStackConfig(
- config_files=config_files).get_one(cloud=cloud)
- else:
- config = cloud_config.OpenStackConfig(
- config_files=config_files).get_all()[0]
- # For inventory-wide caching
- cache_expiration_time = config.get_cache_expiration_time()
- cache_path = config.get_cache_path()
- if cloud:
- cache_path = '{0}_{1}'.format(cache_path, cloud)
- if not os.path.exists(cache_path):
- os.makedirs(cache_path)
- cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
- return (cache_file, cache_expiration_time)
-
-
-def to_json(in_dict):
- return json.dumps(in_dict, sort_keys=True, indent=2)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
- parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'),
- help='Cloud name (default: None')
- parser.add_argument('--private',
- action='store_true',
- help='Use private address for ansible host')
- parser.add_argument('--refresh', action='store_true',
- help='Refresh cached information')
- parser.add_argument('--debug', action='store_true', default=False,
- help='Enable debug output')
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('--list', action='store_true',
- help='List active servers')
- group.add_argument('--host', help='List details about the specific host')
-
- return parser.parse_args()
-
-
-def main():
- args = parse_args()
- try:
- # openstacksdk library may write to stdout, so redirect this
- sys.stdout = StringIO()
- config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
- sdk.enable_logging(debug=args.debug)
- inventory_args = dict(
- refresh=args.refresh,
- config_files=config_files,
- private=args.private,
- cloud=args.cloud,
- )
- if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'):
- inventory_args.update(dict(
- config_key='ansible',
- config_defaults={
- 'use_hostnames': False,
- 'expand_hostvars': True,
- 'fail_on_errors': True,
- }
- ))
-
- inventory = sdk_inventory.OpenStackInventory(**inventory_args)
-
- sys.stdout = sys.__stdout__
- if args.list:
- output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud)
- elif args.host:
- output = to_json(inventory.get_host(args.host))
- print(output)
- except sdk.exceptions.OpenStackCloudException as e:
- sys.stderr.write('%s\n' % e.message)
- sys.exit(1)
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/contrib/inventory/openvz.py b/contrib/inventory/openvz.py
deleted file mode 100755
index 5ea039c827..0000000000
--- a/contrib/inventory/openvz.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# openvz.py
-#
-# Copyright 2014 jordonr <jordon@beamsyn.net>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# Inspired by libvirt_lxc.py inventory script
-# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py
-#
-# Groups are determined by the description field of openvz guests
-# multiple groups can be separated by commas: webserver,dbserver
-
-from subprocess import Popen, PIPE
-import sys
-import json
-
-
-# List openvz hosts
-vzhosts = ['vzhost1', 'vzhost2', 'vzhost3']
-# Add openvz hosts to the inventory and Add "_meta" trick
-inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
-# default group, when description not defined
-default_group = ['vzguest']
-
-
-def get_guests():
- # Loop through vzhosts
- for h in vzhosts:
- # SSH to vzhost and get the list of guests in json
- pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True)
-
- # Load Json info of guests
- json_data = json.loads(pipe.stdout.read())
-
- # loop through guests
- for j in json_data:
- # Add information to host vars
- inventory['_meta']['hostvars'][j['hostname']] = {
- 'ctid': j['ctid'],
- 'veid': j['veid'],
- 'vpsid': j['vpsid'],
- 'private_path': j['private'],
- 'root_path': j['root'],
- 'ip': j['ip']
- }
-
- # determine group from guest description
- if j['description'] is not None:
- groups = j['description'].split(",")
- else:
- groups = default_group
-
- # add guest to inventory
- for g in groups:
- if g not in inventory:
- inventory[g] = {'hosts': []}
-
- inventory[g]['hosts'].append(j['hostname'])
-
- return inventory
-
-
-if len(sys.argv) == 2 and sys.argv[1] == '--list':
- inv_json = get_guests()
- print(json.dumps(inv_json, sort_keys=True))
-elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print(json.dumps({}))
-else:
- print("Need an argument, either --list or --host <host>")
diff --git a/contrib/inventory/ovirt.ini b/contrib/inventory/ovirt.ini
deleted file mode 100644
index d9aaf8a73e..0000000000
--- a/contrib/inventory/ovirt.ini
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-# Author: Josha Inglis <jinglis@iix.net> based on the gce.ini by Eric Johnson <erjohnso@google.com>
-
-[ovirt]
-# For ovirt.py script, which can be used with Python SDK version 3
-# Service Account configuration information can be stored in the
-# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
-# exist in your PYTHONPATH and be picked up automatically with an import
-# statement in the inventory script. However, you can specify an absolute
-# path to the secrets.py file with 'libcloud_secrets' parameter.
-ovirt_api_secrets =
-
-# If you are not going to use a 'secrets.py' file, you can set the necessary
-# authorization parameters here.
-ovirt_url =
-ovirt_username =
-ovirt_password =
-ovirt_ca_file =
diff --git a/contrib/inventory/ovirt.py b/contrib/inventory/ovirt.py
deleted file mode 100755
index f97ab7a24a..0000000000
--- a/contrib/inventory/ovirt.py
+++ /dev/null
@@ -1,289 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015 IIX Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-ovirt external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API requests to
-oVirt via the ovirt-engine-sdk-python library.
-
-When run against a specific host, this script returns the following variables
-based on the data obtained from the ovirt_sdk Node object:
- - ovirt_uuid
- - ovirt_id
- - ovirt_image
- - ovirt_machine_type
- - ovirt_ips
- - ovirt_name
- - ovirt_description
- - ovirt_status
- - ovirt_zone
- - ovirt_tags
- - ovirt_stats
-
-When run in --list mode, instances are grouped by the following categories:
-
- - zone:
- zone group name.
- - instance tags:
- An entry is created for each tag. For example, if you have two instances
- with a common tag called 'foo', they will both be grouped together under
- the 'tag_foo' name.
- - network name:
- the name of the network is appended to 'network_' (e.g. the 'default'
- network will result in a group named 'network_default')
- - running status:
- group name prefixed with 'status_' (e.g. status_up, status_down,..)
-
-Examples:
- Execute uname on all instances in the us-central1-a zone
- $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a"
-
- Use the ovirt inventory script to print out instance specific information
- $ contrib/inventory/ovirt.py --host my_instance
-
-Author: Josha Inglis <jinglis@iix.net> based on the gce.py by Eric Johnson <erjohnso@google.com>
-Version: 0.0.1
-"""
-
-USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin"
-USER_AGENT_VERSION = "v1"
-
-import sys
-import os
-import argparse
-from collections import defaultdict
-from ansible.module_utils.six.moves import configparser as ConfigParser
-
-import json
-
-try:
- # noinspection PyUnresolvedReferences
- from ovirtsdk.api import API
- # noinspection PyUnresolvedReferences
- from ovirtsdk.xml import params
-except ImportError:
- print("ovirt inventory script requires ovirt-engine-sdk-python")
- sys.exit(1)
-
-
-class OVirtInventory(object):
- def __init__(self):
- # Read settings and parse CLI arguments
- self.args = self.parse_cli_args()
- self.driver = self.get_ovirt_driver()
-
- # Just display data for specific host
- if self.args.host:
- print(self.json_format_dict(
- self.node_to_dict(self.get_instance(self.args.host)),
- pretty=self.args.pretty
- ))
- sys.exit(0)
-
- # Otherwise, assume user wants all instances grouped
- print(
- self.json_format_dict(
- data=self.group_instances(),
- pretty=self.args.pretty
- )
- )
- sys.exit(0)
-
- @staticmethod
- def get_ovirt_driver():
- """
- Determine the ovirt authorization settings and return a ovirt_sdk driver.
-
- :rtype : ovirtsdk.api.API
- """
- kwargs = {}
-
- ovirt_ini_default_path = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "ovirt.ini")
- ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path)
-
- # Create a ConfigParser.
- # This provides empty defaults to each key, so that environment
- # variable configuration (as opposed to INI configuration) is able
- # to work.
- config = ConfigParser.SafeConfigParser(defaults={
- 'ovirt_url': '',
- 'ovirt_username': '',
- 'ovirt_password': '',
- 'ovirt_api_secrets': '',
- })
- if 'ovirt' not in config.sections():
- config.add_section('ovirt')
- config.read(ovirt_ini_path)
-
- # Attempt to get ovirt params from a configuration file, if one
- # exists.
- secrets_path = config.get('ovirt', 'ovirt_api_secrets')
- secrets_found = False
- try:
- # noinspection PyUnresolvedReferences,PyPackageRequirements
- import secrets
-
- kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
- secrets_found = True
- except ImportError:
- pass
-
- if not secrets_found and secrets_path:
- if not secrets_path.endswith('secrets.py'):
- err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py"
- print(err)
- sys.exit(1)
- sys.path.append(os.path.dirname(secrets_path))
- try:
- # noinspection PyUnresolvedReferences,PyPackageRequirements
- import secrets
-
- kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
- except ImportError:
- pass
- if not secrets_found:
- kwargs = {
- 'url': config.get('ovirt', 'ovirt_url'),
- 'username': config.get('ovirt', 'ovirt_username'),
- 'password': config.get('ovirt', 'ovirt_password'),
- }
-
- # If the appropriate environment variables are set, they override
- # other configuration; process those into our args and kwargs.
- kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url'])
- kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None)
- kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None)
-
- # Retrieve and return the ovirt driver.
- return API(insecure=True, **kwargs)
-
- @staticmethod
- def parse_cli_args():
- """
- Command line argument processing
-
- :rtype : argparse.Namespace
- """
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt')
- parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
- parser.add_argument('--host', action='store', help='Get all information about an instance')
- parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)')
- return parser.parse_args()
-
- def node_to_dict(self, inst):
- """
- :type inst: params.VM
- """
- if inst is None:
- return {}
-
- inst.get_custom_properties()
- ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \
- if inst.get_guest_info() is not None else []
- stats = {}
- for stat in inst.get_statistics().list():
- stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum()
-
- return {
- 'ovirt_uuid': inst.get_id(),
- 'ovirt_id': inst.get_id(),
- 'ovirt_image': inst.get_os().get_type(),
- 'ovirt_machine_type': self.get_machine_type(inst),
- 'ovirt_ips': ips,
- 'ovirt_name': inst.get_name(),
- 'ovirt_description': inst.get_description(),
- 'ovirt_status': inst.get_status().get_state(),
- 'ovirt_zone': inst.get_cluster().get_id(),
- 'ovirt_tags': self.get_tags(inst),
- 'ovirt_stats': stats,
- # Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': ips[0] if len(ips) > 0 else None
- }
-
- @staticmethod
- def get_tags(inst):
- """
- :type inst: params.VM
- """
- return [x.get_name() for x in inst.get_tags().list()]
-
- def get_machine_type(self, inst):
- inst_type = inst.get_instance_type()
- if inst_type:
- return self.driver.instancetypes.get(id=inst_type.id).name
-
- # noinspection PyBroadException,PyUnusedLocal
- def get_instance(self, instance_name):
- """Gets details about a specific instance """
- try:
- return self.driver.vms.get(name=instance_name)
- except Exception as e:
- return None
-
- def group_instances(self):
- """Group all instances"""
- groups = defaultdict(list)
- meta = {"hostvars": {}}
-
- for node in self.driver.vms.list():
- assert isinstance(node, params.VM)
- name = node.get_name()
-
- meta["hostvars"][name] = self.node_to_dict(node)
-
- zone = node.get_cluster().get_name()
- groups[zone].append(name)
-
- tags = self.get_tags(node)
- for t in tags:
- tag = 'tag_%s' % t
- groups[tag].append(name)
-
- nets = [x.get_name() for x in node.get_nics().list()]
- for net in nets:
- net = 'network_%s' % net
- groups[net].append(name)
-
- status = node.get_status().get_state()
- stat = 'status_%s' % status.lower()
- if stat in groups:
- groups[stat].append(name)
- else:
- groups[stat] = [name]
-
- groups["_meta"] = meta
-
- return groups
-
- @staticmethod
- def json_format_dict(data, pretty=False):
- """ Converts a dict to a JSON object and dumps it as a formatted
- string """
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-# Run the script
-OVirtInventory()
diff --git a/contrib/inventory/ovirt4.py b/contrib/inventory/ovirt4.py
deleted file mode 100755
index 74205ae449..0000000000
--- a/contrib/inventory/ovirt4.py
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2016 Red Hat, Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-"""
-oVirt dynamic inventory script
-=================================
-
-Generates dynamic inventory file for oVirt.
-
-Script will return following attributes for each virtual machine:
- - id
- - name
- - host
- - cluster
- - status
- - description
- - fqdn
- - os_type
- - template
- - tags
- - statistics
- - devices
-
-When run in --list mode, virtual machines are grouped by the following categories:
- - cluster
- - tag
- - status
-
- Note: If there is some virtual machine which has has more tags it will be in both tag
- records.
-
-Examples:
- # Execute update of system on webserver virtual machine:
-
- $ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest"
-
- # Get webserver virtual machine information:
-
- $ contrib/inventory/ovirt4.py --host webserver
-
-Author: Ondra Machacek (@machacekondra)
-"""
-
-import argparse
-import os
-import sys
-
-from collections import defaultdict
-
-from ansible.module_utils.six.moves import configparser
-
-import json
-
-try:
- import ovirtsdk4 as sdk
- import ovirtsdk4.types as otypes
-except ImportError:
- print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0')
- sys.exit(1)
-
-
-def parse_args():
- """
- Create command line parser for oVirt dynamic inventory script.
- """
- parser = argparse.ArgumentParser(
- description='Ansible dynamic inventory script for oVirt.',
- )
- parser.add_argument(
- '--list',
- action='store_true',
- default=True,
- help='Get data of all virtual machines (default: True).',
- )
- parser.add_argument(
- '--host',
- help='Get data of virtual machines running on specified host.',
- )
- parser.add_argument(
- '--pretty',
- action='store_true',
- default=False,
- help='Pretty format (default: False).',
- )
- return parser.parse_args()
-
-
-def create_connection():
- """
- Create a connection to oVirt engine API.
- """
- # Get the path of the configuration file, by default use
- # 'ovirt.ini' file in script directory:
- default_path = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- 'ovirt.ini',
- )
- config_path = os.environ.get('OVIRT_INI_PATH', default_path)
-
- # Create parser and add ovirt section if it doesn't exist:
- config = configparser.SafeConfigParser(
- defaults={
- 'ovirt_url': os.environ.get('OVIRT_URL'),
- 'ovirt_username': os.environ.get('OVIRT_USERNAME'),
- 'ovirt_password': os.environ.get('OVIRT_PASSWORD'),
- 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''),
- }
- )
- if not config.has_section('ovirt'):
- config.add_section('ovirt')
- config.read(config_path)
-
- # Create a connection with options defined in ini file:
- return sdk.Connection(
- url=config.get('ovirt', 'ovirt_url'),
- username=config.get('ovirt', 'ovirt_username'),
- password=config.get('ovirt', 'ovirt_password', raw=True),
- ca_file=config.get('ovirt', 'ovirt_ca_file') or None,
- insecure=not config.get('ovirt', 'ovirt_ca_file'),
- )
-
-
-def get_dict_of_struct(connection, vm):
- """
- Transform SDK Vm Struct type to Python dictionary.
- """
- if vm is None:
- return dict()
-
- vms_service = connection.system_service().vms_service()
- clusters_service = connection.system_service().clusters_service()
- vm_service = vms_service.vm_service(vm.id)
- devices = vm_service.reported_devices_service().list()
- tags = vm_service.tags_service().list()
- stats = vm_service.statistics_service().list()
- labels = vm_service.affinity_labels_service().list()
- groups = clusters_service.cluster_service(
- vm.cluster.id
- ).affinity_groups_service().list()
-
- return {
- 'id': vm.id,
- 'name': vm.name,
- 'host': connection.follow_link(vm.host).name if vm.host else None,
- 'cluster': connection.follow_link(vm.cluster).name,
- 'status': str(vm.status),
- 'description': vm.description,
- 'fqdn': vm.fqdn,
- 'os_type': vm.os.type,
- 'template': connection.follow_link(vm.template).name,
- 'tags': [tag.name for tag in tags],
- 'affinity_labels': [label.name for label in labels],
- 'affinity_groups': [
- group.name for group in groups
- if vm.name in [vm.name for vm in connection.follow_link(group.vms)]
- ],
- 'statistics': dict(
- (stat.name, stat.values[0].datum) for stat in stats if stat.values
- ),
- 'devices': dict(
- (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
- ),
- 'ansible_host': next((device.ips[0].address for device in devices if device.ips), None)
- }
-
-
-def get_data(connection, vm_name=None):
- """
- Obtain data of `vm_name` if specified, otherwise obtain data of all vms.
- """
- vms_service = connection.system_service().vms_service()
- clusters_service = connection.system_service().clusters_service()
-
- if vm_name:
- vm = vms_service.list(search='name=%s' % vm_name) or [None]
- data = get_dict_of_struct(
- connection=connection,
- vm=vm[0],
- )
- else:
- vms = dict()
- data = defaultdict(list)
- for vm in vms_service.list():
- name = vm.name
- vm_service = vms_service.vm_service(vm.id)
- cluster_service = clusters_service.cluster_service(vm.cluster.id)
-
- # Add vm to vms dict:
- vms[name] = get_dict_of_struct(connection, vm)
-
- # Add vm to cluster group:
- cluster_name = connection.follow_link(vm.cluster).name
- data['cluster_%s' % cluster_name].append(name)
-
- # Add vm to tag group:
- tags_service = vm_service.tags_service()
- for tag in tags_service.list():
- data['tag_%s' % tag.name].append(name)
-
- # Add vm to status group:
- data['status_%s' % vm.status].append(name)
-
- # Add vm to affinity group:
- for group in cluster_service.affinity_groups_service().list():
- if vm.name in [
- v.name for v in connection.follow_link(group.vms)
- ]:
- data['affinity_group_%s' % group.name].append(vm.name)
-
- # Add vm to affinity label group:
- affinity_labels_service = vm_service.affinity_labels_service()
- for label in affinity_labels_service.list():
- data['affinity_label_%s' % label.name].append(name)
-
- data["_meta"] = {
- 'hostvars': vms,
- }
-
- return data
-
-
-def main():
- args = parse_args()
- connection = create_connection()
-
- print(
- json.dumps(
- obj=get_data(
- connection=connection,
- vm_name=args.host,
- ),
- sort_keys=args.pretty,
- indent=args.pretty * 2,
- )
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/contrib/inventory/packet_net.ini b/contrib/inventory/packet_net.ini
deleted file mode 100644
index 6dcc027b15..0000000000
--- a/contrib/inventory/packet_net.ini
+++ /dev/null
@@ -1,53 +0,0 @@
-# Ansible Packet.net external inventory script settings
-#
-
-[packet]
-
-# Packet projects to get info for. Set this to 'all' to get info for all
-# projects in Packet and merge the results together. Alternatively, set
-# this to a comma separated list of projects. E.g. 'project-1,project-3,project-4'
-projects = all
-projects_exclude =
-
-# By default, packet devices in all state are returned. Specify
-# packet device states to return as a comma-separated list.
-# device_states = active, inactive, queued, provisioning
-
-# items per page to retrieve from packet api at a time
-items_per_page = 999
-
-# API calls to Packet are costly. For this reason, we cache the results of an API
-# call. Set this to the path you want cache files to be written to. Two files
-# will be written to this directory:
-# - ansible-packet.cache
-# - ansible-packet.index
-cache_path = ~/.ansible/tmp
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-# To disable the cache, set this value to 0
-cache_max_age = 300
-
-# Organize groups into a nested/hierarchy instead of a flat namespace.
-nested_groups = False
-
-# Replace - tags when creating groups to avoid issues with ansible
-replace_dash_in_groups = True
-
-# The packet inventory output can become very large. To manage its size,
-# configure which groups should be created.
-group_by_device_id = True
-group_by_hostname = True
-group_by_facility = True
-group_by_project = True
-group_by_operating_system = True
-group_by_plan_type = True
-group_by_tags = True
-group_by_tag_none = True
-
-# If you only want to include hosts that match a certain regular expression
-# pattern_include = staging-*
-
-# If you want to exclude any hosts that match a certain regular expression
-# pattern_exclude = staging-*
-
diff --git a/contrib/inventory/packet_net.py b/contrib/inventory/packet_net.py
deleted file mode 100755
index 22f989a9d9..0000000000
--- a/contrib/inventory/packet_net.py
+++ /dev/null
@@ -1,506 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Packet.net external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API request to
-Packet.net using the Packet library.
-
-NOTE: This script assumes Ansible is being executed where the environment
-variable needed for Packet API Token already been set:
- export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs
-
-This script also assumes there is a packet_net.ini file alongside it. To specify a
-different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable:
-
- export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini
-
-'''
-
-# (c) 2016, Peter Sankauskas
-# (c) 2017, Tomas Karasek
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-import sys
-import os
-import argparse
-import re
-from time import time
-
-from ansible.module_utils import six
-from ansible.module_utils.six.moves import configparser
-
-try:
- import packet
-except ImportError as e:
- sys.exit("failed=True msg='`packet-python` library required for this script'")
-
-import traceback
-
-
-import json
-
-
-ini_section = 'packet'
-
-
-class PacketInventory(object):
-
- def _empty_inventory(self):
- return {"_meta": {"hostvars": {}}}
-
- def __init__(self):
- ''' Main execution path '''
-
- # Inventory grouped by device IDs, tags, security groups, regions,
- # and availability zones
- self.inventory = self._empty_inventory()
-
- # Index of hostname (address) to device ID
- self.index = {}
-
- # Read settings and parse CLI arguments
- self.parse_cli_args()
- self.read_settings()
-
- # Cache
- if self.args.refresh_cache:
- self.do_api_calls_update_cache()
- elif not self.is_cache_valid():
- self.do_api_calls_update_cache()
-
- # Data to print
- if self.args.host:
- data_to_print = self.get_host_info()
-
- elif self.args.list:
- # Display list of devices for inventory
- if self.inventory == self._empty_inventory():
- data_to_print = self.get_inventory_from_cache()
- else:
- data_to_print = self.json_format_dict(self.inventory, True)
-
- print(data_to_print)
-
- def is_cache_valid(self):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if os.path.isfile(self.cache_path_index):
- return True
-
- return False
-
- def read_settings(self):
- ''' Reads the settings from the packet_net.ini file '''
- if six.PY3:
- config = configparser.ConfigParser()
- else:
- config = configparser.SafeConfigParser()
-
- _ini_path_raw = os.environ.get('PACKET_NET_INI_PATH')
-
- if _ini_path_raw:
- packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw))
- else:
- packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini')
- config.read(packet_ini_path)
-
- # items per page
- self.items_per_page = 999
- if config.has_option(ini_section, 'items_per_page'):
- config.get(ini_section, 'items_per_page')
-
- # Instance states to be gathered in inventory. Default is all of them.
- packet_valid_device_states = [
- 'active',
- 'inactive',
- 'queued',
- 'provisioning'
- ]
- self.packet_device_states = []
- if config.has_option(ini_section, 'device_states'):
- for device_state in config.get(ini_section, 'device_states').split(','):
- device_state = device_state.strip()
- if device_state not in packet_valid_device_states:
- continue
- self.packet_device_states.append(device_state)
- else:
- self.packet_device_states = packet_valid_device_states
-
- # Cache related
- cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path'))
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
-
- self.cache_path_cache = cache_dir + "/ansible-packet.cache"
- self.cache_path_index = cache_dir + "/ansible-packet.index"
- self.cache_max_age = config.getint(ini_section, 'cache_max_age')
-
- # Configure nested groups instead of flat namespace.
- if config.has_option(ini_section, 'nested_groups'):
- self.nested_groups = config.getboolean(ini_section, 'nested_groups')
- else:
- self.nested_groups = False
-
- # Replace dash or not in group names
- if config.has_option(ini_section, 'replace_dash_in_groups'):
- self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups')
- else:
- self.replace_dash_in_groups = True
-
- # Configure which groups should be created.
- group_by_options = [
- 'group_by_device_id',
- 'group_by_hostname',
- 'group_by_facility',
- 'group_by_project',
- 'group_by_operating_system',
- 'group_by_plan_type',
- 'group_by_tags',
- 'group_by_tag_none',
- ]
- for option in group_by_options:
- if config.has_option(ini_section, option):
- setattr(self, option, config.getboolean(ini_section, option))
- else:
- setattr(self, option, True)
-
- # Do we need to just include hosts that match a pattern?
- try:
- pattern_include = config.get(ini_section, 'pattern_include')
- if pattern_include and len(pattern_include) > 0:
- self.pattern_include = re.compile(pattern_include)
- else:
- self.pattern_include = None
- except configparser.NoOptionError:
- self.pattern_include = None
-
- # Do we need to exclude hosts that match a pattern?
- try:
- pattern_exclude = config.get(ini_section, 'pattern_exclude')
- if pattern_exclude and len(pattern_exclude) > 0:
- self.pattern_exclude = re.compile(pattern_exclude)
- else:
- self.pattern_exclude = None
- except configparser.NoOptionError:
- self.pattern_exclude = None
-
- # Projects
- self.projects = []
- configProjects = config.get(ini_section, 'projects')
- configProjects_exclude = config.get(ini_section, 'projects_exclude')
- if (configProjects == 'all'):
- for projectInfo in self.get_projects():
- if projectInfo.name not in configProjects_exclude:
- self.projects.append(projectInfo.name)
- else:
- self.projects = configProjects.split(",")
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet')
- parser.add_argument('--list', action='store_true', default=True,
- help='List Devices (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all the variables about a specific device')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to Packet (default: False - use cache files)')
- self.args = parser.parse_args()
-
- def do_api_calls_update_cache(self):
- ''' Do API calls to each region, and save data in cache files '''
-
- for projectInfo in self.get_projects():
- if projectInfo.name in self.projects:
- self.get_devices_by_project(projectInfo)
-
- self.write_to_cache(self.inventory, self.cache_path_cache)
- self.write_to_cache(self.index, self.cache_path_index)
-
- def connect(self):
- ''' create connection to api server'''
- token = os.environ.get('PACKET_API_TOKEN')
- if token is None:
- raise Exception("Error reading token from environment (PACKET_API_TOKEN)!")
- manager = packet.Manager(auth_token=token)
- return manager
-
- def get_projects(self):
- '''Makes a Packet API call to get the list of projects'''
-
- params = {
- 'per_page': self.items_per_page
- }
-
- try:
- manager = self.connect()
- projects = manager.list_projects(params=params)
- return projects
- except Exception as e:
- traceback.print_exc()
- self.fail_with_error(e, 'getting Packet projects')
-
- def get_devices_by_project(self, project):
- ''' Makes an Packet API call to the list of devices in a particular
- project '''
-
- params = {
- 'per_page': self.items_per_page
- }
-
- try:
- manager = self.connect()
- devices = manager.list_devices(project_id=project.id, params=params)
-
- for device in devices:
- self.add_device(device, project)
-
- except Exception as e:
- traceback.print_exc()
- self.fail_with_error(e, 'getting Packet devices')
-
- def fail_with_error(self, err_msg, err_operation=None):
- '''log an error to std err for ansible-playbook to consume and exit'''
- if err_operation:
- err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format(
- err_msg=err_msg, err_operation=err_operation)
- sys.stderr.write(err_msg)
- sys.exit(1)
-
- def get_device(self, device_id):
- manager = self.connect()
-
- device = manager.get_device(device_id)
- return device
-
- def add_device(self, device, project):
- ''' Adds a device to the inventory and index, as long as it is
- addressable '''
-
- # Only return devices with desired device states
- if device.state not in self.packet_device_states:
- return
-
- # Select the best destination address. Only include management
- # addresses as non-management (elastic) addresses need manual
- # host configuration to be routable.
- # See https://help.packet.net/article/54-elastic-ips.
- dest = None
- for ip_address in device.ip_addresses:
- if ip_address['public'] is True and \
- ip_address['address_family'] == 4 and \
- ip_address['management'] is True:
- dest = ip_address['address']
-
- if not dest:
- # Skip devices we cannot address (e.g. private VPC subnet)
- return
-
- # if we only want to include hosts that match a pattern, skip those that don't
- if self.pattern_include and not self.pattern_include.match(device.hostname):
- return
-
- # if we need to exclude hosts that match a pattern, skip those
- if self.pattern_exclude and self.pattern_exclude.match(device.hostname):
- return
-
- # Add to index
- self.index[dest] = [project.id, device.id]
-
- # Inventory: Group by device ID (always a group of 1)
- if self.group_by_device_id:
- self.inventory[device.id] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'devices', device.id)
-
- # Inventory: Group by device name (hopefully a group of 1)
- if self.group_by_hostname:
- self.push(self.inventory, device.hostname, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'hostnames', project.name)
-
- # Inventory: Group by project
- if self.group_by_project:
- self.push(self.inventory, project.name, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'projects', project.name)
-
- # Inventory: Group by facility
- if self.group_by_facility:
- self.push(self.inventory, device.facility['code'], dest)
- if self.nested_groups:
- if self.group_by_facility:
- self.push_group(self.inventory, project.name, device.facility['code'])
-
- # Inventory: Group by OS
- if self.group_by_operating_system:
- self.push(self.inventory, device.operating_system['slug'], dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'operating_systems', device.operating_system['slug'])
-
- # Inventory: Group by plan type
- if self.group_by_plan_type:
- self.push(self.inventory, device.plan['slug'], dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'plans', device.plan['slug'])
-
- # Inventory: Group by tag keys
- if self.group_by_tags:
- for k in device.tags:
- key = self.to_safe("tag_" + k)
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
-
- # Global Tag: devices without tags
- if self.group_by_tag_none and len(device.tags) == 0:
- self.push(self.inventory, 'tag_none', dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', 'tag_none')
-
- # Global Tag: tag all Packet devices
- self.push(self.inventory, 'packet', dest)
-
- self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device)
-
- def get_host_info_dict_from_device(self, device):
- device_vars = {}
- for key in vars(device):
- value = getattr(device, key)
- key = self.to_safe('packet_' + key)
-
- # Handle complex types
- if key == 'packet_state':
- device_vars[key] = device.state or ''
- elif key == 'packet_hostname':
- device_vars[key] = value
- elif isinstance(value, (int, bool)):
- device_vars[key] = value
- elif isinstance(value, six.string_types):
- device_vars[key] = value.strip()
- elif value is None:
- device_vars[key] = ''
- elif key == 'packet_facility':
- device_vars[key] = value['code']
- elif key == 'packet_operating_system':
- device_vars[key] = value['slug']
- elif key == 'packet_plan':
- device_vars[key] = value['slug']
- elif key == 'packet_tags':
- for k in value:
- key = self.to_safe('packet_tag_' + k)
- device_vars[key] = k
- else:
- pass
- # print key
- # print type(value)
- # print value
-
- return device_vars
-
- def get_host_info(self):
- ''' Get variables about a specific host '''
-
- if len(self.index) == 0:
- # Need to load index from cache
- self.load_index_from_cache()
-
- if self.args.host not in self.index:
- # try updating the cache
- self.do_api_calls_update_cache()
- if self.args.host not in self.index:
- # host might not exist anymore
- return self.json_format_dict({}, True)
-
- (project_id, device_id) = self.index[self.args.host]
-
- device = self.get_device(device_id)
- return self.json_format_dict(self.get_host_info_dict_from_device(device), True)
-
- def push(self, my_dict, key, element):
- ''' Push an element onto an array that may not have been defined in
- the dict '''
- group_info = my_dict.setdefault(key, [])
- if isinstance(group_info, dict):
- host_list = group_info.setdefault('hosts', [])
- host_list.append(element)
- else:
- group_info.append(element)
-
- def push_group(self, my_dict, key, element):
- ''' Push a group as a child of another group. '''
- parent_group = my_dict.setdefault(key, {})
- if not isinstance(parent_group, dict):
- parent_group = my_dict[key] = {'hosts': parent_group}
- child_groups = parent_group.setdefault('children', [])
- if element not in child_groups:
- child_groups.append(element)
-
- def get_inventory_from_cache(self):
- ''' Reads the inventory from the cache file and returns it as a JSON
- object '''
-
- cache = open(self.cache_path_cache, 'r')
- json_inventory = cache.read()
- return json_inventory
-
- def load_index_from_cache(self):
- ''' Reads the index from the cache file sets self.index '''
-
- cache = open(self.cache_path_index, 'r')
- json_index = cache.read()
- self.index = json.loads(json_index)
-
- def write_to_cache(self, data, filename):
- ''' Writes data in JSON format to a file '''
-
- json_data = self.json_format_dict(data, True)
- cache = open(filename, 'w')
- cache.write(json_data)
- cache.close()
-
- def uncammelize(self, key):
- temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
- return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
-
- def to_safe(self, word):
- ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
- regex = r"[^A-Za-z0-9\_"
- if not self.replace_dash_in_groups:
- regex += r"\-"
- return re.sub(regex + "]", "_", word)
-
- def json_format_dict(self, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-# Run the script
-PacketInventory()
diff --git a/contrib/inventory/proxmox.py b/contrib/inventory/proxmox.py
deleted file mode 100755
index 0538ca8a9b..0000000000
--- a/contrib/inventory/proxmox.py
+++ /dev/null
@@ -1,248 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE <gauthierl@lapth.cnrs.fr>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Updated 2016 by Matt Harris <matthaeus.harris@gmail.com>
-#
-# Added support for Proxmox VE 4.x
-# Added support for using the Notes field of a VM to define groups and variables:
-# A well-formatted JSON object in the Notes field will be added to the _meta
-# section for that VM. In addition, the "groups" key of this JSON object may be
-# used to specify group membership:
-#
-# { "groups": ["utility", "databases"], "a": false, "b": true }
-
-import json
-import os
-import sys
-from optparse import OptionParser
-
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-
-from ansible.module_utils.urls import open_url
-
-
-class ProxmoxNodeList(list):
- def get_names(self):
- return [node['node'] for node in self]
-
-
-class ProxmoxVM(dict):
- def get_variables(self):
- variables = {}
- for key, value in iteritems(self):
- variables['proxmox_' + key] = value
- return variables
-
-
-class ProxmoxVMList(list):
- def __init__(self, data=None):
- data = [] if data is None else data
-
- for item in data:
- self.append(ProxmoxVM(item))
-
- def get_names(self):
- return [vm['name'] for vm in self if vm['template'] != 1]
-
- def get_by_name(self, name):
- results = [vm for vm in self if vm['name'] == name]
- return results[0] if len(results) > 0 else None
-
- def get_variables(self):
- variables = {}
- for vm in self:
- variables[vm['name']] = vm.get_variables()
-
- return variables
-
-
-class ProxmoxPoolList(list):
- def get_names(self):
- return [pool['poolid'] for pool in self]
-
-
-class ProxmoxPool(dict):
- def get_members_name(self):
- return [member['name'] for member in self['members'] if member['template'] != 1]
-
-
-class ProxmoxAPI(object):
- def __init__(self, options):
- self.options = options
- self.credentials = None
-
- if not options.url:
- raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
- elif not options.username:
- raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
- elif not options.password:
- raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
-
- def auth(self):
- request_path = '{0}api2/json/access/ticket'.format(self.options.url)
-
- request_params = urlencode({
- 'username': self.options.username,
- 'password': self.options.password,
- })
-
- data = json.load(open_url(request_path, data=request_params))
-
- self.credentials = {
- 'ticket': data['data']['ticket'],
- 'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
- }
-
- def get(self, url, data=None):
- request_path = '{0}{1}'.format(self.options.url, url)
-
- headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])}
- request = open_url(request_path, data=data, headers=headers)
-
- response = json.load(request)
- return response['data']
-
- def nodes(self):
- return ProxmoxNodeList(self.get('api2/json/nodes'))
-
- def vms_by_type(self, node, type):
- return ProxmoxVMList(self.get('api2/json/nodes/{0}/{1}'.format(node, type)))
-
- def vm_description_by_type(self, node, vm, type):
- return self.get('api2/json/nodes/{0}/{1}/{2}/config'.format(node, type, vm))
-
- def node_qemu(self, node):
- return self.vms_by_type(node, 'qemu')
-
- def node_qemu_description(self, node, vm):
- return self.vm_description_by_type(node, vm, 'qemu')
-
- def node_lxc(self, node):
- return self.vms_by_type(node, 'lxc')
-
- def node_lxc_description(self, node, vm):
- return self.vm_description_by_type(node, vm, 'lxc')
-
- def pools(self):
- return ProxmoxPoolList(self.get('api2/json/pools'))
-
- def pool(self, poolid):
- return ProxmoxPool(self.get('api2/json/pools/{0}'.format(poolid)))
-
-
-def main_list(options):
- results = {
- 'all': {
- 'hosts': [],
- },
- '_meta': {
- 'hostvars': {},
- }
- }
-
- proxmox_api = ProxmoxAPI(options)
- proxmox_api.auth()
-
- for node in proxmox_api.nodes().get_names():
- qemu_list = proxmox_api.node_qemu(node)
- results['all']['hosts'] += qemu_list.get_names()
- results['_meta']['hostvars'].update(qemu_list.get_variables())
- lxc_list = proxmox_api.node_lxc(node)
- results['all']['hosts'] += lxc_list.get_names()
- results['_meta']['hostvars'].update(lxc_list.get_variables())
-
- for vm in results['_meta']['hostvars']:
- vmid = results['_meta']['hostvars'][vm]['proxmox_vmid']
- try:
- type = results['_meta']['hostvars'][vm]['proxmox_type']
- except KeyError:
- type = 'qemu'
- try:
- description = proxmox_api.vm_description_by_type(node, vmid, type)['description']
- except KeyError:
- description = None
-
- try:
- metadata = json.loads(description)
- except TypeError:
- metadata = {}
- except ValueError:
- metadata = {
- 'notes': description
- }
-
- if 'groups' in metadata:
- # print metadata
- for group in metadata['groups']:
- if group not in results:
- results[group] = {
- 'hosts': []
- }
- results[group]['hosts'] += [vm]
-
- results['_meta']['hostvars'][vm].update(metadata)
-
- # pools
- for pool in proxmox_api.pools().get_names():
- results[pool] = {
- 'hosts': proxmox_api.pool(pool).get_members_name(),
- }
-
- return results
-
-
-def main_host(options):
- proxmox_api = ProxmoxAPI(options)
- proxmox_api.auth()
-
- for node in proxmox_api.nodes().get_names():
- qemu_list = proxmox_api.node_qemu(node)
- qemu = qemu_list.get_by_name(options.host)
- if qemu:
- return qemu.get_variables()
-
- return {}
-
-
-def main():
- parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
- parser.add_option('--list', action="store_true", default=False, dest="list")
- parser.add_option('--host', dest="host")
- parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
- parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
- parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
- parser.add_option('--pretty', action="store_true", default=False, dest='pretty')
- (options, args) = parser.parse_args()
-
- if options.list:
- data = main_list(options)
- elif options.host:
- data = main_host(options)
- else:
- parser.print_help()
- sys.exit(1)
-
- indent = None
- if options.pretty:
- indent = 2
-
- print(json.dumps(data, indent=indent))
-
-
-if __name__ == '__main__':
- main()
diff --git a/contrib/inventory/rackhd.py b/contrib/inventory/rackhd.py
deleted file mode 100755
index e7a5cca5f5..0000000000
--- a/contrib/inventory/rackhd.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env python
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import requests
-import argparse
-
-RACKHD_URL = 'http://localhost:8080'
-
-
-class RackhdInventory(object):
- def __init__(self, nodeids):
- self._inventory = {}
- for nodeid in nodeids:
- self._load_inventory_data(nodeid)
- inventory = {}
- for (nodeid, info) in self._inventory.items():
- inventory[nodeid] = (self._format_output(nodeid, info))
- print(json.dumps(inventory))
-
- def _load_inventory_data(self, nodeid):
- info = {}
- info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid)
- info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid)
-
- results = {}
- for (key, url) in info.items():
- r = requests.get(url, verify=False)
- results[key] = r.text
- self._inventory[nodeid] = results
-
- def _format_output(self, nodeid, info):
- try:
- node_info = json.loads(info['lookup'])
- ipaddress = ''
- if len(node_info) > 0:
- ipaddress = node_info[0]['ipAddress']
- output = {'hosts': [ipaddress], 'vars': {}}
- for (key, result) in info.items():
- output['vars'][key] = json.loads(result)
- output['vars']['ansible_ssh_user'] = 'monorail'
- except KeyError:
- pass
- return output
-
-
-def parse_args():
- parser = argparse.ArgumentParser()
- parser.add_argument('--host')
- parser.add_argument('--list', action='store_true')
- return parser.parse_args()
-
-
-try:
- # check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
- RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
-except Exception:
- # use default values
- pass
-
-# Use the nodeid specified in the environment to limit the data returned
-# or return data for all available nodes
-nodeids = []
-
-if (parse_args().host):
- try:
- nodeids += parse_args().host.split(',')
- RackhdInventory(nodeids)
- except Exception:
- pass
-if (parse_args().list):
- try:
- url = RACKHD_URL + '/api/common/nodes'
- r = requests.get(url, verify=False)
- data = json.loads(r.text)
- for entry in data:
- if entry['type'] == 'compute':
- nodeids.append(entry['id'])
- RackhdInventory(nodeids)
- except Exception:
- pass
diff --git a/contrib/inventory/rax.ini b/contrib/inventory/rax.ini
deleted file mode 100644
index 15948e7b2e..0000000000
--- a/contrib/inventory/rax.ini
+++ /dev/null
@@ -1,66 +0,0 @@
-# Ansible Rackspace external inventory script settings
-#
-
-[rax]
-
-# Environment Variable: RAX_CREDS_FILE
-#
-# An optional configuration that points to a pyrax-compatible credentials
-# file.
-#
-# If not supplied, rax.py will look for a credentials file
-# at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
-# and therefore requires a file formatted per the SDK's specifications.
-#
-# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
-# creds_file = ~/.rackspace_cloud_credentials
-
-# Environment Variable: RAX_REGION
-#
-# An optional environment variable to narrow inventory search
-# scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
-# datacenter) and optionally accepts a comma-separated list.
-# regions = IAD,ORD,DFW
-
-# Environment Variable: RAX_ENV
-#
-# A configuration that will use an environment as configured in
-# ~/.pyrax.cfg, see
-# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
-# env = prod
-
-# Environment Variable: RAX_META_PREFIX
-# Default: meta
-#
-# A configuration that changes the prefix used for meta key/value groups.
-# For compatibility with ec2.py set to "tag"
-# meta_prefix = meta
-
-# Environment Variable: RAX_ACCESS_NETWORK
-# Default: public
-#
-# A configuration that will tell the inventory script to use a specific
-# server network to determine the ansible_ssh_host value. If no address
-# is found, ansible_ssh_host will not be set. Accepts a comma-separated
-# list of network names, the first found wins.
-# access_network = public
-
-# Environment Variable: RAX_ACCESS_IP_VERSION
-# Default: 4
-#
-# A configuration related to "access_network" that will attempt to
-# determine the ansible_ssh_host value for either IPv4 or IPv6. If no
-# address is found, ansible_ssh_host will not be set.
-# Acceptable values are: 4 or 6. Values other than 4 or 6
-# will be ignored, and 4 will be used. Accepts a comma separated list,
-# the first found wins.
-# access_ip_version = 4
-
-# Environment Variable: RAX_CACHE_MAX_AGE
-# Default: 600
-#
-# A configuration the changes the behavior or the inventory cache.
-# Inventory listing performed before this value will be returned from
-# the cache instead of making a full request for all inventory. Setting
-# this value to 0 will force a full request.
-# cache_max_age = 600 \ No newline at end of file
diff --git a/contrib/inventory/rax.py b/contrib/inventory/rax.py
deleted file mode 100755
index c6d512cd12..0000000000
--- a/contrib/inventory/rax.py
+++ /dev/null
@@ -1,470 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2013, Jesse Keating <jesse.keating@rackspace.com,
-# Paul Durivage <paul.durivage@rackspace.com>,
-# Matt Martz <matt@sivel.net>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-Rackspace Cloud Inventory
-
-Authors:
- Jesse Keating <jesse.keating@rackspace.com,
- Paul Durivage <paul.durivage@rackspace.com>,
- Matt Martz <matt@sivel.net>
-
-
-Description:
- Generates inventory that Ansible can understand by making API request to
- Rackspace Public Cloud API
-
- When run against a specific host, this script returns variables similar to:
- rax_os-ext-sts_task_state
- rax_addresses
- rax_links
- rax_image
- rax_os-ext-sts_vm_state
- rax_flavor
- rax_id
- rax_rax-bandwidth_bandwidth
- rax_user_id
- rax_os-dcf_diskconfig
- rax_accessipv4
- rax_accessipv6
- rax_progress
- rax_os-ext-sts_power_state
- rax_metadata
- rax_status
- rax_updated
- rax_hostid
- rax_name
- rax_created
- rax_tenant_id
- rax_loaded
-
-Configuration:
- rax.py can be configured using a rax.ini file or via environment
- variables. The rax.ini file should live in the same directory along side
- this script.
-
- The section header for configuration values related to this
- inventory plugin is [rax]
-
- [rax]
- creds_file = ~/.rackspace_cloud_credentials
- regions = IAD,ORD,DFW
- env = prod
- meta_prefix = meta
- access_network = public
- access_ip_version = 4
-
- Each of these configurations also has a corresponding environment variable.
- An environment variable will override a configuration file value.
-
- creds_file:
- Environment Variable: RAX_CREDS_FILE
-
- An optional configuration that points to a pyrax-compatible credentials
- file.
-
- If not supplied, rax.py will look for a credentials file
- at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
- and therefore requires a file formatted per the SDK's specifications.
-
- https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
-
- regions:
- Environment Variable: RAX_REGION
-
- An optional environment variable to narrow inventory search
- scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
- datacenter) and optionally accepts a comma-separated list.
-
- environment:
- Environment Variable: RAX_ENV
-
- A configuration that will use an environment as configured in
- ~/.pyrax.cfg, see
- https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
-
- meta_prefix:
- Environment Variable: RAX_META_PREFIX
- Default: meta
-
- A configuration that changes the prefix used for meta key/value groups.
- For compatibility with ec2.py set to "tag"
-
- access_network:
- Environment Variable: RAX_ACCESS_NETWORK
- Default: public
-
- A configuration that will tell the inventory script to use a specific
- server network to determine the ansible_ssh_host value. If no address
- is found, ansible_ssh_host will not be set. Accepts a comma-separated
- list of network names, the first found wins.
-
- access_ip_version:
- Environment Variable: RAX_ACCESS_IP_VERSION
- Default: 4
-
- A configuration related to "access_network" that will attempt to
- determine the ansible_ssh_host value for either IPv4 or IPv6. If no
- address is found, ansible_ssh_host will not be set.
- Acceptable values are: 4 or 6. Values other than 4 or 6
- will be ignored, and 4 will be used. Accepts a comma-separated list,
- the first found wins.
-
-Examples:
- List server instances
- $ RAX_CREDS_FILE=~/.raxpub rax.py --list
-
- List servers in ORD datacenter only
- $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
-
- List servers in ORD and DFW datacenters
- $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
-
- Get server details for server named "server.example.com"
- $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
-
- Use the instance private IP to connect (instead of public IP)
- $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list
-"""
-
-import os
-import re
-import sys
-import argparse
-import warnings
-import collections
-
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.six.moves import configparser as ConfigParser
-
-import json
-
-try:
- import pyrax
- from pyrax.utils import slugify
-except ImportError:
- sys.exit('pyrax is required for this module')
-
-from time import time
-
-from ansible.constants import get_config
-from ansible.module_utils.parsing.convert_bool import boolean
-from ansible.module_utils.six import text_type
-
-NON_CALLABLES = (text_type, str, bool, dict, int, list, type(None))
-
-
-def load_config_file():
- p = ConfigParser.ConfigParser()
- config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
- 'rax.ini')
- try:
- p.read(config_file)
- except ConfigParser.Error:
- return None
- else:
- return p
-
-
-def rax_slugify(value):
- return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
-
-
-def to_dict(obj):
- instance = {}
- for key in dir(obj):
- value = getattr(obj, key)
- if isinstance(value, NON_CALLABLES) and not key.startswith('_'):
- key = rax_slugify(key)
- instance[key] = value
-
- return instance
-
-
-def host(regions, hostname):
- hostvars = {}
-
- for region in regions:
- # Connect to the region
- cs = pyrax.connect_to_cloudservers(region=region)
- for server in cs.servers.list():
- if server.name == hostname:
- for key, value in to_dict(server).items():
- hostvars[key] = value
-
- # And finally, add an IP address
- hostvars['ansible_ssh_host'] = server.accessIPv4
- print(json.dumps(hostvars, sort_keys=True, indent=4))
-
-
-def _list_into_cache(regions):
- groups = collections.defaultdict(list)
- hostvars = collections.defaultdict(dict)
- images = {}
- cbs_attachments = collections.defaultdict(dict)
-
- prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta')
-
- try:
- # Ansible 2.3+
- networks = get_config(p, 'rax', 'access_network',
- 'RAX_ACCESS_NETWORK', 'public', value_type='list')
- except TypeError:
- # Ansible 2.2.x and below
- # pylint: disable=unexpected-keyword-arg
- networks = get_config(p, 'rax', 'access_network',
- 'RAX_ACCESS_NETWORK', 'public', islist=True)
- try:
- try:
- # Ansible 2.3+
- ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
- 'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
- except TypeError:
- # Ansible 2.2.x and below
- # pylint: disable=unexpected-keyword-arg
- ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
- 'RAX_ACCESS_IP_VERSION', 4, islist=True))
- except Exception:
- ip_versions = [4]
- else:
- ip_versions = [v for v in ip_versions if v in [4, 6]]
- if not ip_versions:
- ip_versions = [4]
-
- # Go through all the regions looking for servers
- for region in regions:
- # Connect to the region
- cs = pyrax.connect_to_cloudservers(region=region)
- if cs is None:
- warnings.warn(
- 'Connecting to Rackspace region "%s" has caused Pyrax to '
- 'return None. Is this a valid region?' % region,
- RuntimeWarning)
- continue
- for server in cs.servers.list():
- # Create a group on region
- groups[region].append(server.name)
-
- # Check if group metadata key in servers' metadata
- group = server.metadata.get('group')
- if group:
- groups[group].append(server.name)
-
- for extra_group in server.metadata.get('groups', '').split(','):
- if extra_group:
- groups[extra_group].append(server.name)
-
- # Add host metadata
- for key, value in to_dict(server).items():
- hostvars[server.name][key] = value
-
- hostvars[server.name]['rax_region'] = region
-
- for key, value in iteritems(server.metadata):
- groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
-
- groups['instance-%s' % server.id].append(server.name)
- groups['flavor-%s' % server.flavor['id']].append(server.name)
-
- # Handle boot from volume
- if not server.image:
- if not cbs_attachments[region]:
- cbs = pyrax.connect_to_cloud_blockstorage(region)
- for vol in cbs.list():
- if boolean(vol.bootable, strict=False):
- for attachment in vol.attachments:
- metadata = vol.volume_image_metadata
- server_id = attachment['server_id']
- cbs_attachments[region][server_id] = {
- 'id': metadata['image_id'],
- 'name': slugify(metadata['image_name'])
- }
- image = cbs_attachments[region].get(server.id)
- if image:
- server.image = {'id': image['id']}
- hostvars[server.name]['rax_image'] = server.image
- hostvars[server.name]['rax_boot_source'] = 'volume'
- images[image['id']] = image['name']
- else:
- hostvars[server.name]['rax_boot_source'] = 'local'
-
- try:
- imagegroup = 'image-%s' % images[server.image['id']]
- groups[imagegroup].append(server.name)
- groups['image-%s' % server.image['id']].append(server.name)
- except KeyError:
- try:
- image = cs.images.get(server.image['id'])
- except cs.exceptions.NotFound:
- groups['image-%s' % server.image['id']].append(server.name)
- else:
- images[image.id] = image.human_id
- groups['image-%s' % image.human_id].append(server.name)
- groups['image-%s' % server.image['id']].append(server.name)
-
- # And finally, add an IP address
- ansible_ssh_host = None
- # use accessIPv[46] instead of looping address for 'public'
- for network_name in networks:
- if ansible_ssh_host:
- break
- if network_name == 'public':
- for version_name in ip_versions:
- if ansible_ssh_host:
- break
- if version_name == 6 and server.accessIPv6:
- ansible_ssh_host = server.accessIPv6
- elif server.accessIPv4:
- ansible_ssh_host = server.accessIPv4
- if not ansible_ssh_host:
- addresses = server.addresses.get(network_name, [])
- for address in addresses:
- for version_name in ip_versions:
- if ansible_ssh_host:
- break
- if address.get('version') == version_name:
- ansible_ssh_host = address.get('addr')
- break
- if ansible_ssh_host:
- hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host
-
- if hostvars:
- groups['_meta'] = {'hostvars': hostvars}
-
- with open(get_cache_file_path(regions), 'w') as cache_file:
- json.dump(groups, cache_file)
-
-
-def get_cache_file_path(regions):
- regions_str = '.'.join([reg.strip().lower() for reg in regions])
- ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp')
- if not os.path.exists(ansible_tmp_path):
- os.makedirs(ansible_tmp_path)
- return os.path.join(ansible_tmp_path,
- 'ansible-rax-%s-%s.cache' % (
- pyrax.identity.username, regions_str))
-
-
-def _list(regions, refresh_cache=True):
- cache_max_age = int(get_config(p, 'rax', 'cache_max_age',
- 'RAX_CACHE_MAX_AGE', 600))
-
- if (not os.path.exists(get_cache_file_path(regions)) or
- refresh_cache or
- (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age):
- # Cache file doesn't exist or older than 10m or refresh cache requested
- _list_into_cache(regions)
-
- with open(get_cache_file_path(regions), 'r') as cache_file:
- groups = json.load(cache_file)
- print(json.dumps(groups, sort_keys=True, indent=4))
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
- 'inventory module')
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('--list', action='store_true',
- help='List active servers')
- group.add_argument('--host', help='List details about the specific host')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help=('Force refresh of cache, making API requests to'
- 'RackSpace (default: False - use cache files)'))
- return parser.parse_args()
-
-
-def setup():
- default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
-
- env = get_config(p, 'rax', 'environment', 'RAX_ENV', None)
- if env:
- pyrax.set_environment(env)
-
- keyring_username = pyrax.get_setting('keyring_username')
-
- # Attempt to grab credentials from environment first
- creds_file = get_config(p, 'rax', 'creds_file',
- 'RAX_CREDS_FILE', None)
- if creds_file is not None:
- creds_file = os.path.expanduser(creds_file)
- else:
- # But if that fails, use the default location of
- # ~/.rackspace_cloud_credentials
- if os.path.isfile(default_creds_file):
- creds_file = default_creds_file
- elif not keyring_username:
- sys.exit('No value in environment variable %s and/or no '
- 'credentials file at %s'
- % ('RAX_CREDS_FILE', default_creds_file))
-
- identity_type = pyrax.get_setting('identity_type')
- pyrax.set_setting('identity_type', identity_type or 'rackspace')
-
- region = pyrax.get_setting('region')
-
- try:
- if keyring_username:
- pyrax.keyring_auth(keyring_username, region=region)
- else:
- pyrax.set_credential_file(creds_file, region=region)
- except Exception as e:
- sys.exit("%s: %s" % (e, e.message))
-
- regions = []
- if region:
- regions.append(region)
- else:
- try:
- # Ansible 2.3+
- region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
- value_type='list')
- except TypeError:
- # Ansible 2.2.x and below
- # pylint: disable=unexpected-keyword-arg
- region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
- islist=True)
-
- for region in region_list:
- region = region.strip().upper()
- if region == 'ALL':
- regions = pyrax.regions
- break
- elif region not in pyrax.regions:
- sys.exit('Unsupported region %s' % region)
- elif region not in regions:
- regions.append(region)
-
- return regions
-
-
-def main():
- args = parse_args()
- regions = setup()
- if args.list:
- _list(regions, refresh_cache=args.refresh_cache)
- elif args.host:
- host(regions, args.host)
- sys.exit(0)
-
-
-p = load_config_file()
-if __name__ == '__main__':
- main()
diff --git a/contrib/inventory/rhv.py b/contrib/inventory/rhv.py
deleted file mode 120000
index e66635dd42..0000000000
--- a/contrib/inventory/rhv.py
+++ /dev/null
@@ -1 +0,0 @@
-ovirt4.py \ No newline at end of file
diff --git a/contrib/inventory/rudder.ini b/contrib/inventory/rudder.ini
deleted file mode 100644
index 748b3d2121..0000000000
--- a/contrib/inventory/rudder.ini
+++ /dev/null
@@ -1,35 +0,0 @@
-# Rudder external inventory script settings
-#
-
-[rudder]
-
-# Your Rudder server API URL, typically:
-# https://rudder.local/rudder/api
-uri = https://rudder.local/rudder/api
-
-# By default, Rudder uses a self-signed certificate. Set this to True
-# to disable certificate validation.
-disable_ssl_certificate_validation = True
-
-# Your Rudder API token, created in the Web interface.
-token = aaabbbccc
-
-# Rudder API version to use, use "latest" for latest available
-# version.
-version = latest
-
-# Property to use as group name in the output.
-# Can generally be "id" or "displayName".
-group_name = displayName
-
-# Fail if there are two groups with the same name or two hosts with the
-# same hostname in the output.
-fail_if_name_collision = True
-
-# We cache the results of Rudder API in a local file
-cache_path = /tmp/ansible-rudder.cache
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-# Set to 0 to disable cache.
-cache_max_age = 500
diff --git a/contrib/inventory/rudder.py b/contrib/inventory/rudder.py
deleted file mode 100755
index 4722fcf1e4..0000000000
--- a/contrib/inventory/rudder.py
+++ /dev/null
@@ -1,296 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2015, Normation SAS
-#
-# Inspired by the EC2 inventory plugin:
-# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-'''
-Rudder external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API request to
-a Rudder server. This script is compatible with Rudder 2.10 or later.
-
-The output JSON includes all your Rudder groups, containing the hostnames of
-their nodes. Groups and nodes have a variable called rudder_group_id and
-rudder_node_id, which is the Rudder internal id of the item, allowing to identify
-them uniquely. Hosts variables also include your node properties, which are
-key => value properties set by the API and specific to each node.
-
-This script assumes there is an rudder.ini file alongside it. To specify a
-different path to rudder.ini, define the RUDDER_INI_PATH environment variable:
-
- export RUDDER_INI_PATH=/path/to/my_rudder.ini
-
-You have to configure your Rudder server information, either in rudder.ini or
-by overriding it with environment variables:
-
- export RUDDER_API_VERSION='latest'
- export RUDDER_API_TOKEN='my_token'
- export RUDDER_API_URI='https://rudder.local/rudder/api'
-'''
-
-
-import sys
-import os
-import re
-import argparse
-import httplib2 as http
-from time import time
-from ansible.module_utils import six
-from ansible.module_utils.six.moves import configparser
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-
-import json
-
-
-class RudderInventory(object):
- def __init__(self):
- ''' Main execution path '''
-
- # Empty inventory by default
- self.inventory = {}
-
- # Read settings and parse CLI arguments
- self.read_settings()
- self.parse_cli_args()
-
- # Create connection
- self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation)
-
- # Cache
- if self.args.refresh_cache:
- self.update_cache()
- elif not self.is_cache_valid():
- self.update_cache()
- else:
- self.load_cache()
-
- data_to_print = {}
-
- if self.args.host:
- data_to_print = self.get_host_info(self.args.host)
- elif self.args.list:
- data_to_print = self.get_list_info()
-
- print(self.json_format_dict(data_to_print, True))
-
- def read_settings(self):
- ''' Reads the settings from the rudder.ini file '''
- if six.PY2:
- config = configparser.SafeConfigParser()
- else:
- config = configparser.ConfigParser()
- rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini')
- rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path)))
- config.read(rudder_ini_path)
-
- self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token'))
- self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version'))
- self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri'))
-
- self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation')
- self.group_name = config.get('rudder', 'group_name')
- self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision')
-
- self.cache_path = config.get('rudder', 'cache_path')
- self.cache_max_age = config.getint('rudder', 'cache_max_age')
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all the variables about a specific instance')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)')
- self.args = parser.parse_args()
-
- def is_cache_valid(self):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if os.path.isfile(self.cache_path):
- mod_time = os.path.getmtime(self.cache_path)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- return True
-
- return False
-
- def load_cache(self):
- ''' Reads the cache from the cache file sets self.cache '''
-
- cache = open(self.cache_path, 'r')
- json_cache = cache.read()
-
- try:
- self.inventory = json.loads(json_cache)
- except ValueError as e:
- self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache')
-
- def write_cache(self):
- ''' Writes data in JSON format to a file '''
-
- json_data = self.json_format_dict(self.inventory, True)
- cache = open(self.cache_path, 'w')
- cache.write(json_data)
- cache.close()
-
- def get_nodes(self):
- ''' Gets the nodes list from Rudder '''
-
- path = '/nodes?select=nodeAndPolicyServer'
- result = self.api_call(path)
-
- nodes = {}
-
- for node in result['data']['nodes']:
- nodes[node['id']] = {}
- nodes[node['id']]['hostname'] = node['hostname']
- if 'properties' in node:
- nodes[node['id']]['properties'] = node['properties']
- else:
- nodes[node['id']]['properties'] = []
-
- return nodes
-
- def get_groups(self):
- ''' Gets the groups list from Rudder '''
-
- path = '/groups'
- result = self.api_call(path)
-
- groups = {}
-
- for group in result['data']['groups']:
- groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])}
-
- return groups
-
- def update_cache(self):
- ''' Fetches the inventory information from Rudder and creates the inventory '''
-
- nodes = self.get_nodes()
- groups = self.get_groups()
-
- inventory = {}
-
- for group in groups:
- # Check for name collision
- if self.fail_if_name_collision:
- if groups[group]['name'] in inventory:
- self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups')
- # Add group to inventory
- inventory[groups[group]['name']] = {}
- inventory[groups[group]['name']]['hosts'] = []
- inventory[groups[group]['name']]['vars'] = {}
- inventory[groups[group]['name']]['vars']['rudder_group_id'] = group
- for node in groups[group]['hosts']:
- # Add node to group
- inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname'])
-
- properties = {}
-
- for node in nodes:
- # Check for name collision
- if self.fail_if_name_collision:
- if nodes[node]['hostname'] in properties:
- self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts')
- # Add node properties to inventory
- properties[nodes[node]['hostname']] = {}
- properties[nodes[node]['hostname']]['rudder_node_id'] = node
- for node_property in nodes[node]['properties']:
- properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value']
-
- inventory['_meta'] = {}
- inventory['_meta']['hostvars'] = properties
-
- self.inventory = inventory
-
- if self.cache_max_age > 0:
- self.write_cache()
-
- def get_list_info(self):
- ''' Gets inventory information from local cache '''
-
- return self.inventory
-
- def get_host_info(self, hostname):
- ''' Gets information about a specific host from local cache '''
-
- if hostname in self.inventory['_meta']['hostvars']:
- return self.inventory['_meta']['hostvars'][hostname]
- else:
- return {}
-
- def api_call(self, path):
- ''' Performs an API request '''
-
- headers = {
- 'X-API-Token': self.token,
- 'X-API-Version': self.version,
- 'Content-Type': 'application/json;charset=utf-8'
- }
-
- target = urlparse(self.uri + path)
- method = 'GET'
- body = ''
-
- try:
- response, content = self.conn.request(target.geturl(), method, body, headers)
- except Exception:
- self.fail_with_error('Error connecting to Rudder server')
-
- try:
- data = json.loads(content)
- except ValueError as e:
- self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response')
-
- return data
-
- def fail_with_error(self, err_msg, err_operation=None):
- ''' Logs an error to std err for ansible-playbook to consume and exit '''
- if err_operation:
- err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
- err_msg=err_msg, err_operation=err_operation)
- sys.stderr.write(err_msg)
- sys.exit(1)
-
- def json_format_dict(self, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
- def to_safe(self, word):
- ''' Converts 'bad' characters in a string to underscores so they can be
- used as Ansible variable names '''
-
- return re.sub(r'[^A-Za-z0-9\_]', '_', word)
-
-
-# Run the script
-RudderInventory()
diff --git a/contrib/inventory/scaleway.ini b/contrib/inventory/scaleway.ini
deleted file mode 100644
index 99615a124c..0000000000
--- a/contrib/inventory/scaleway.ini
+++ /dev/null
@@ -1,37 +0,0 @@
-# Ansible dynamic inventory script for Scaleway cloud provider
-#
-
-[compute]
-# Fetch inventory for regions. If not defined will read the SCALEWAY_REGION environment variable
-#
-# regions = all
-# regions = ams1
-# regions = par1, ams1
-regions = par1
-
-
-# Define a Scaleway token to perform required queries on the API
-# in order to generate inventory output.
-#
-[auth]
-# Token to authenticate with Scaleway's API.
-# If not defined will read the SCALEWAY_TOKEN environment variable
-#
-api_token = mysecrettoken
-
-
-# To avoid performing excessive calls to Scaleway API you can define a
-# cache for the plugin output. Within the time defined in seconds, latest
-# output will be reused. After that time, the cache will be refreshed.
-#
-[cache]
-cache_max_age = 60
-cache_dir = '~/.ansible/tmp'
-
-
-[defaults]
-# You may want to use only public IP addresses or private IP addresses.
-# You can set public_ip_only configuration to get public IPs only.
-# If not defined defaults to retrieving private IP addresses.
-#
-public_ip_only = false
diff --git a/contrib/inventory/scaleway.py b/contrib/inventory/scaleway.py
deleted file mode 100755
index 32999cc0e7..0000000000
--- a/contrib/inventory/scaleway.py
+++ /dev/null
@@ -1,230 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-'''
-External inventory script for Scaleway
-====================================
-
-Shamelessly copied from an existing inventory script.
-
-This script generates an inventory that Ansible can understand by making API requests to Scaleway API
-
-Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.org/project/requests/)
-
-Before using this script you may want to modify scaleway.ini config file.
-
-This script generates an Ansible hosts file with these host groups:
-
-<hostname>: Defines host itself with Scaleway's hostname as group name.
-<tag>: Contains all hosts which has "<tag>" as tag.
-<region>: Contains all hosts which are in the "<region>" region.
-all: Contains all hosts defined in Scaleway.
-'''
-
-# (c) 2017, Paul B. <paul@bonaud.fr>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import os
-import requests
-from ansible.module_utils import six
-from ansible.module_utils.six.moves import configparser
-import sys
-import time
-import traceback
-
-import json
-
-EMPTY_GROUP = {
- 'children': [],
- 'hosts': []
-}
-
-
-class ScalewayAPI:
- REGIONS = ['par1', 'ams1']
-
- def __init__(self, auth_token, region):
- self.session = requests.session()
- self.session.headers.update({
- 'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0])
- })
- self.session.headers.update({
- 'X-Auth-Token': auth_token.encode('latin1')
- })
- self.base_url = 'https://cp-%s.scaleway.com' % (region)
-
- def servers(self):
- raw = self.session.get('/'.join([self.base_url, 'servers']))
-
- try:
- response = raw.json()
- return self.get_resource('servers', response, raw)
- except ValueError:
- return []
-
- def get_resource(self, resource, response, raw):
- raw.raise_for_status()
-
- if resource in response:
- return response[resource]
- else:
- raise ValueError(
- "Resource %s not found in Scaleway API response" % (resource))
-
-
-def env_or_param(env_key, param=None, fallback=None):
- env_value = os.environ.get(env_key)
-
- if (param, env_value) == (None, None):
- return fallback
- elif env_value is not None:
- return env_value
- else:
- return param
-
-
-def save_cache(data, config):
- ''' saves item to cache '''
- dpath = config.get('cache', 'cache_dir')
- try:
- cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w')
- cache.write(json.dumps(data))
- cache.close()
- except IOError as e:
- pass # not really sure what to do here
-
-
-def get_cache(cache_item, config):
- ''' returns cached item '''
- dpath = config.get('cache', 'cache_dir')
- inv = {}
- try:
- cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r')
- inv = cache.read()
- cache.close()
- except IOError as e:
- pass # not really sure what to do here
-
- return inv
-
-
-def cache_available(config):
- ''' checks if we have a 'fresh' cache available for item requested '''
-
- if config.has_option('cache', 'cache_dir'):
- dpath = config.get('cache', 'cache_dir')
-
- try:
- existing = os.stat(
- '/'.join([dpath, 'scaleway_ansible_inventory.json']))
- except OSError:
- return False
-
- if config.has_option('cache', 'cache_max_age'):
- maxage = config.get('cache', 'cache_max_age')
- else:
- maxage = 60
- if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
- return True
-
- return False
-
-
-def generate_inv_from_api(config):
- try:
- inventory['scaleway'] = copy.deepcopy(EMPTY_GROUP)
-
- auth_token = None
- if config.has_option('auth', 'api_token'):
- auth_token = config.get('auth', 'api_token')
- auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token)
- if auth_token is None:
- sys.stderr.write('ERROR: missing authentication token for Scaleway API')
- sys.exit(1)
-
- if config.has_option('compute', 'regions'):
- regions = config.get('compute', 'regions')
- if regions == 'all':
- regions = ScalewayAPI.REGIONS
- else:
- regions = map(str.strip, regions.split(','))
- else:
- regions = [
- env_or_param('SCALEWAY_REGION', fallback='par1')
- ]
-
- for region in regions:
- api = ScalewayAPI(auth_token, region)
-
- for server in api.servers():
- hostname = server['hostname']
- if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'):
- ip = server['public_ip']['address']
- else:
- ip = server['private_ip']
- for server_tag in server['tags']:
- if server_tag not in inventory:
- inventory[server_tag] = copy.deepcopy(EMPTY_GROUP)
- inventory[server_tag]['children'].append(hostname)
- if region not in inventory:
- inventory[region] = copy.deepcopy(EMPTY_GROUP)
- inventory[region]['children'].append(hostname)
- inventory['scaleway']['children'].append(hostname)
- inventory[hostname] = []
- inventory[hostname].append(ip)
-
- return inventory
- except Exception:
- # Return empty hosts output
- traceback.print_exc()
- return {'scaleway': {'hosts': []}, '_meta': {'hostvars': {}}}
-
-
-def get_inventory(config):
- ''' Reads the inventory from cache or Scaleway api '''
-
- if cache_available(config):
- inv = get_cache('scaleway_ansible_inventory.json', config)
- else:
- inv = generate_inv_from_api(config)
-
- save_cache(inv, config)
- return json.dumps(inv)
-
-
-if __name__ == '__main__':
- inventory = {}
-
- # Read config
- if six.PY3:
- config = configparser.ConfigParser()
- else:
- config = configparser.SafeConfigParser()
- for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']:
- if os.path.exists(configfilename):
- config.read(configfilename)
- break
-
- if cache_available(config):
- inventory = get_cache('scaleway_ansible_inventory.json', config)
- else:
- inventory = get_inventory(config)
-
- # return to ansible
- sys.stdout.write(str(inventory))
- sys.stdout.flush()
diff --git a/contrib/inventory/serf.py b/contrib/inventory/serf.py
deleted file mode 100755
index 8a24027dd4..0000000000
--- a/contrib/inventory/serf.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Dynamic inventory script which lets you use nodes discovered by Serf
-# (https://serfdom.io/).
-#
-# Requires the `serfclient` Python module from
-# https://pypi.org/project/serfclient/
-#
-# Environment variables
-# ---------------------
-# - `SERF_RPC_ADDR`
-# - `SERF_RPC_AUTH`
-#
-# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
-
-import argparse
-import collections
-import os
-import sys
-
-# https://pypi.org/project/serfclient/
-from serfclient import SerfClient, EnvironmentConfig
-
-import json
-
-_key = 'serf'
-
-
-def _serf_client():
- env = EnvironmentConfig()
- return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
-
-
-def get_serf_members_data():
- return _serf_client().members().body['Members']
-
-
-def get_nodes(data):
- return [node['Name'] for node in data]
-
-
-def get_groups(data):
- groups = collections.defaultdict(list)
-
- for node in data:
- for key, value in node['Tags'].items():
- groups[value].append(node['Name'])
-
- return groups
-
-
-def get_meta(data):
- meta = {'hostvars': {}}
- for node in data:
- meta['hostvars'][node['Name']] = node['Tags']
- return meta
-
-
-def print_list():
- data = get_serf_members_data()
- nodes = get_nodes(data)
- groups = get_groups(data)
- meta = get_meta(data)
- inventory_data = {_key: nodes, '_meta': meta}
- inventory_data.update(groups)
- print(json.dumps(inventory_data))
-
-
-def print_host(host):
- data = get_serf_members_data()
- meta = get_meta(data)
- print(json.dumps(meta['hostvars'][host]))
-
-
-def get_args(args_list):
- parser = argparse.ArgumentParser(
- description='ansible inventory script reading from serf cluster')
- mutex_group = parser.add_mutually_exclusive_group(required=True)
- help_list = 'list all hosts from serf cluster'
- mutex_group.add_argument('--list', action='store_true', help=help_list)
- help_host = 'display variables for a host'
- mutex_group.add_argument('--host', help=help_host)
- return parser.parse_args(args_list)
-
-
-def main(args_list):
- args = get_args(args_list)
- if args.list:
- print_list()
- if args.host:
- print_host(args.host)
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
diff --git a/contrib/inventory/softlayer.py b/contrib/inventory/softlayer.py
deleted file mode 100755
index 016eb4c060..0000000000
--- a/contrib/inventory/softlayer.py
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/usr/bin/env python
-"""
-SoftLayer external inventory script.
-
-The SoftLayer Python API client is required. Use `pip install softlayer` to install it.
-You have a few different options for configuring your username and api_key. You can pass
-environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to
-~/.softlayer or /etc/softlayer.conf. For more information see the SL API at:
-- https://softlayer-python.readthedocs.io/en/latest/config_file.html
-
-The SoftLayer Python client has a built in command for saving this configuration file
-via the command `sl config setup`.
-"""
-
-# Copyright (C) 2014 AJ Bourg <aj@ajbourg.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# I found the structure of the ec2.py script very helpful as an example
-# as I put this together. Thanks to whoever wrote that script!
-#
-
-import SoftLayer
-import re
-import argparse
-import itertools
-import json
-
-
-class SoftLayerInventory(object):
- common_items = [
- 'id',
- 'globalIdentifier',
- 'hostname',
- 'domain',
- 'fullyQualifiedDomainName',
- 'primaryBackendIpAddress',
- 'primaryIpAddress',
- 'datacenter',
- 'tagReferences',
- 'userData.value',
- ]
-
- vs_items = [
- 'lastKnownPowerState.name',
- 'powerState',
- 'maxCpu',
- 'maxMemory',
- 'activeTransaction.transactionStatus[friendlyName,name]',
- 'status',
- ]
-
- hw_items = [
- 'hardwareStatusId',
- 'processorPhysicalCoreAmount',
- 'memoryCapacity',
- ]
-
- def _empty_inventory(self):
- return {"_meta": {"hostvars": {}}}
-
- def __init__(self):
- '''Main path'''
-
- self.inventory = self._empty_inventory()
-
- self.parse_options()
-
- if self.args.list:
- self.get_all_servers()
- print(self.json_format_dict(self.inventory, True))
- elif self.args.host:
- self.get_all_servers()
- print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True))
-
- def to_safe(self, word):
- '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups'''
-
- return re.sub(r"[^A-Za-z0-9\-\.]", "_", word)
-
- def push(self, my_dict, key, element):
- '''Push an element onto an array that may not have been defined in the dict'''
-
- if key in my_dict:
- my_dict[key].append(element)
- else:
- my_dict[key] = [element]
-
- def parse_options(self):
- '''Parse all the arguments from the CLI'''
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer')
- parser.add_argument('--list', action='store_true', default=False,
- help='List instances (default: False)')
- parser.add_argument('--host', action='store',
- help='Get all the variables about a specific instance')
- self.args = parser.parse_args()
-
- def json_format_dict(self, data, pretty=False):
- '''Converts a dict to a JSON object and dumps it as a formatted string'''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
- def process_instance(self, instance, instance_type="virtual"):
- '''Populate the inventory dictionary with any instance information'''
-
- # only want active instances
- if 'status' in instance and instance['status']['name'] != 'Active':
- return
-
- # and powered on instances
- if 'powerState' in instance and instance['powerState']['name'] != 'Running':
- return
-
- # 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid
- if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5:
- return
-
- # if there's no IP address, we can't reach it
- if 'primaryIpAddress' not in instance:
- return
-
- instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else ''
-
- dest = instance['primaryIpAddress']
-
- instance['tags'] = list()
- for tag in instance['tagReferences']:
- instance['tags'].append(tag['tag']['name'])
-
- del instance['tagReferences']
-
- self.inventory["_meta"]["hostvars"][dest] = instance
-
- # Inventory: group by memory
- if 'maxMemory' in instance:
- self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest)
- elif 'memoryCapacity' in instance:
- self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest)
-
- # Inventory: group by cpu count
- if 'maxCpu' in instance:
- self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest)
- elif 'processorPhysicalCoreAmount' in instance:
- self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest)
-
- # Inventory: group by datacenter
- self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest)
-
- # Inventory: group by hostname
- self.push(self.inventory, self.to_safe(instance['hostname']), dest)
-
- # Inventory: group by FQDN
- self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest)
-
- # Inventory: group by domain
- self.push(self.inventory, self.to_safe(instance['domain']), dest)
-
- # Inventory: group by type (hardware/virtual)
- self.push(self.inventory, instance_type, dest)
-
- for tag in instance['tags']:
- self.push(self.inventory, tag, dest)
-
- def get_virtual_servers(self):
- '''Get all the CCI instances'''
- vs = SoftLayer.VSManager(self.client)
- mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items))
- instances = vs.list_instances(mask=mask)
-
- for instance in instances:
- self.process_instance(instance)
-
- def get_physical_servers(self):
- '''Get all the hardware instances'''
- hw = SoftLayer.HardwareManager(self.client)
- mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items))
- instances = hw.list_hardware(mask=mask)
-
- for instance in instances:
- self.process_instance(instance, 'hardware')
-
- def get_all_servers(self):
- self.client = SoftLayer.Client()
- self.get_virtual_servers()
- self.get_physical_servers()
-
-
-SoftLayerInventory()
diff --git a/contrib/inventory/spacewalk.ini b/contrib/inventory/spacewalk.ini
deleted file mode 100644
index 5433c4221b..0000000000
--- a/contrib/inventory/spacewalk.ini
+++ /dev/null
@@ -1,16 +0,0 @@
-# Put this ini-file in the same directory as spacewalk.py
-# Command line options have precedence over options defined in here.
-
-[spacewalk]
-# To limit the script on one organization in spacewalk, uncomment org_number
-# and fill in the organization ID:
-# org_number=2
-
-# To prefix the group names with the organization ID set prefix_org_name=true.
-# This is convenient when org_number is not set and you have the same group names
-# in multiple organizations within spacewalk
-# The prefix is "org_number-"
-prefix_org_name=false
-
-# Default cache_age for files created with spacewalk-report is 300sec.
-cache_age=300
diff --git a/contrib/inventory/spacewalk.py b/contrib/inventory/spacewalk.py
deleted file mode 100755
index dc96b1fe3b..0000000000
--- a/contrib/inventory/spacewalk.py
+++ /dev/null
@@ -1,237 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Spacewalk external inventory script
-=================================
-
-Ansible has a feature where instead of reading from /etc/ansible/hosts
-as a text file, it can query external programs to obtain the list
-of hosts, groups the hosts are in, and even variables to assign to each host.
-
-To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
-This, more or less, allows you to keep one central database containing
-info about all of your managed instances.
-
-This script is dependent upon the spacealk-reports package being installed
-on the same machine. It is basically a CSV-to-JSON converter from the
-output of "spacewalk-report system-groups-systems|inventory".
-
-Tested with Ansible 1.9.2 and spacewalk 2.3
-"""
-#
-# Author:: Jon Miller <jonEbird@gmail.com>
-# Copyright:: Copyright (c) 2013, Jon Miller
-#
-# Extended for support of multiple organizations and
-# adding the "_meta" dictionary to --list output by
-# Bernhard Lichtinger <bernhard.lichtinger@lrz.de> 2015
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or (at
-# your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from __future__ import print_function
-
-import sys
-import os
-import time
-from optparse import OptionParser
-import subprocess
-import json
-
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.six.moves import configparser as ConfigParser
-
-
-base_dir = os.path.dirname(os.path.realpath(__file__))
-default_ini_file = os.path.join(base_dir, "spacewalk.ini")
-
-SW_REPORT = '/usr/bin/spacewalk-report'
-CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
-CACHE_AGE = 300 # 5min
-INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file)))
-
-
-# Sanity check
-if not os.path.exists(SW_REPORT):
- print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr)
- sys.exit(1)
-
-# Pre-startup work
-if not os.path.exists(CACHE_DIR):
- os.mkdir(CACHE_DIR)
- os.chmod(CACHE_DIR, 0o2775)
-
-# Helper functions
-# ------------------------------
-
-
-def spacewalk_report(name):
- """Yield a dictionary form of each CSV output produced by the specified
- spacewalk-report
- """
- cache_filename = os.path.join(CACHE_DIR, name)
- if not os.path.exists(cache_filename) or \
- (time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
- # Update the cache
- fh = open(cache_filename, 'w')
- p = subprocess.Popen([SW_REPORT, name], stdout=fh)
- p.wait()
- fh.close()
-
- with open(cache_filename, 'r') as f:
- lines = f.readlines()
- keys = lines[0].strip().split(',')
- # add 'spacewalk_' prefix to the keys
- keys = ['spacewalk_' + key for key in keys]
- for line in lines[1:]:
- values = line.strip().split(',')
- if len(keys) == len(values):
- yield dict(zip(keys, values))
-
-
-# Options
-# ------------------------------
-
-parser = OptionParser(usage="%prog [options] --list | --host <machine>")
-parser.add_option('--list', default=False, dest="list", action="store_true",
- help="Produce a JSON consumable grouping of servers for Ansible")
-parser.add_option('--host', default=None, dest="host",
- help="Generate additional host specific details for given host for Ansible")
-parser.add_option('-H', '--human', dest="human",
- default=False, action="store_true",
- help="Produce a friendlier version of either server list or host detail")
-parser.add_option('-o', '--org', default=None, dest="org_number",
- help="Limit to spacewalk organization number")
-parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
- help="Prefix the group name with the organization number")
-(options, args) = parser.parse_args()
-
-
-# read spacewalk.ini if present
-# ------------------------------
-if os.path.exists(INI_FILE):
- config = ConfigParser.SafeConfigParser()
- config.read(INI_FILE)
- if config.has_option('spacewalk', 'cache_age'):
- CACHE_AGE = config.get('spacewalk', 'cache_age')
- if not options.org_number and config.has_option('spacewalk', 'org_number'):
- options.org_number = config.get('spacewalk', 'org_number')
- if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'):
- options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name')
-
-
-# Generate dictionary for mapping group_id to org_id
-# ------------------------------
-org_groups = {}
-try:
- for group in spacewalk_report('system-groups'):
- org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
-
-except (OSError) as e:
- print('Problem executing the command "%s system-groups": %s' %
- (SW_REPORT, str(e)), file=sys.stderr)
- sys.exit(2)
-
-
-# List out the known server from Spacewalk
-# ------------------------------
-if options.list:
-
- # to build the "_meta"-Group with hostvars first create dictionary for later use
- host_vars = {}
- try:
- for item in spacewalk_report('inventory'):
- host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items())
-
- except (OSError) as e:
- print('Problem executing the command "%s inventory": %s' %
- (SW_REPORT, str(e)), file=sys.stderr)
- sys.exit(2)
-
- groups = {}
- meta = {"hostvars": {}}
- try:
- for system in spacewalk_report('system-groups-systems'):
- # first get org_id of system
- org_id = org_groups[system['spacewalk_group_id']]
-
- # shall we add the org_id as prefix to the group name:
- if options.prefix_org_name:
- prefix = org_id + "-"
- group_name = prefix + system['spacewalk_group_name']
- else:
- group_name = system['spacewalk_group_name']
-
- # if we are limited to one organization:
- if options.org_number:
- if org_id == options.org_number:
- if group_name not in groups:
- groups[group_name] = set()
-
- groups[group_name].add(system['spacewalk_server_name'])
- if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
- meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
- # or we list all groups and systems:
- else:
- if group_name not in groups:
- groups[group_name] = set()
-
- groups[group_name].add(system['spacewalk_server_name'])
- if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
- meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
-
- except (OSError) as e:
- print('Problem executing the command "%s system-groups-systems": %s' %
- (SW_REPORT, str(e)), file=sys.stderr)
- sys.exit(2)
-
- if options.human:
- for group, systems in iteritems(groups):
- print('[%s]\n%s\n' % (group, '\n'.join(systems)))
- else:
- final = dict([(k, list(s)) for k, s in iteritems(groups)])
- final["_meta"] = meta
- print(json.dumps(final))
- # print(json.dumps(groups))
- sys.exit(0)
-
-
-# Return a details information concerning the spacewalk server
-# ------------------------------
-elif options.host:
-
- host_details = {}
- try:
- for system in spacewalk_report('inventory'):
- if system['spacewalk_hostname'] == options.host:
- host_details = system
- break
-
- except (OSError) as e:
- print('Problem executing the command "%s inventory": %s' %
- (SW_REPORT, str(e)), file=sys.stderr)
- sys.exit(2)
-
- if options.human:
- print('Host: %s' % options.host)
- for k, v in iteritems(host_details):
- print(' %s: %s' % (k, '\n '.join(v.split(';'))))
- else:
- print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items())))
- sys.exit(0)
-
-else:
-
- parser.print_help()
- sys.exit(1)
diff --git a/contrib/inventory/ssh_config.py b/contrib/inventory/ssh_config.py
deleted file mode 100755
index c7db6c7a88..0000000000
--- a/contrib/inventory/ssh_config.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2014, Tomas Karasek <tomas.karasek@digile.fi>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Dynamic inventory script which lets you use aliases from ~/.ssh/config.
-#
-# There were some issues with various Paramiko versions. I took a deeper look
-# and tested heavily. Now, ansible parses this alright with Paramiko versions
-# 1.7.2 to 1.15.2.
-#
-# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts
-# with their alias, rather than with the IP or hostname. It takes advantage
-# of the ansible_ssh_{host,port,user,private_key_file}.
-#
-# If you have in your .ssh/config:
-# Host git
-# HostName git.domain.org
-# User tkarasek
-# IdentityFile /home/tomk/keys/thekey
-#
-# You can do
-# $ ansible git -m ping
-#
-# Example invocation:
-# ssh_config.py --list
-# ssh_config.py --host <alias>
-
-import argparse
-import os.path
-import sys
-
-import json
-
-import paramiko
-
-from ansible.module_utils.common._collections_compat import MutableSequence
-
-SSH_CONF = '~/.ssh/config'
-
-_key = 'ssh_config'
-
-_ssh_to_ansible = [('user', 'ansible_ssh_user'),
- ('hostname', 'ansible_ssh_host'),
- ('identityfile', 'ansible_ssh_private_key_file'),
- ('port', 'ansible_ssh_port')]
-
-
-def get_config():
- if not os.path.isfile(os.path.expanduser(SSH_CONF)):
- return {}
- with open(os.path.expanduser(SSH_CONF)) as f:
- cfg = paramiko.SSHConfig()
- cfg.parse(f)
- ret_dict = {}
- for d in cfg._config:
- if isinstance(d['host'], MutableSequence):
- alias = d['host'][0]
- else:
- alias = d['host']
- if ('?' in alias) or ('*' in alias):
- continue
- _copy = dict(d)
- del _copy['host']
- if 'config' in _copy:
- ret_dict[alias] = _copy['config']
- else:
- ret_dict[alias] = _copy
- return ret_dict
-
-
-def print_list():
- cfg = get_config()
- meta = {'hostvars': {}}
- for alias, attributes in cfg.items():
- tmp_dict = {}
- for ssh_opt, ans_opt in _ssh_to_ansible:
- if ssh_opt in attributes:
- # If the attribute is a list, just take the first element.
- # Private key is returned in a list for some reason.
- attr = attributes[ssh_opt]
- if isinstance(attr, MutableSequence):
- attr = attr[0]
- tmp_dict[ans_opt] = attr
- if tmp_dict:
- meta['hostvars'][alias] = tmp_dict
-
- print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta}))
-
-
-def print_host(host):
- cfg = get_config()
- print(json.dumps(cfg[host]))
-
-
-def get_args(args_list):
- parser = argparse.ArgumentParser(
- description='ansible inventory script parsing .ssh/config')
- mutex_group = parser.add_mutually_exclusive_group(required=True)
- help_list = 'list all hosts from .ssh/config inventory'
- mutex_group.add_argument('--list', action='store_true', help=help_list)
- help_host = 'display variables for a host'
- mutex_group.add_argument('--host', help=help_host)
- return parser.parse_args(args_list)
-
-
-def main(args_list):
-
- args = get_args(args_list)
- if args.list:
- print_list()
- if args.host:
- print_host(args.host)
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
diff --git a/contrib/inventory/stacki.py b/contrib/inventory/stacki.py
deleted file mode 100755
index 98f35c4fb5..0000000000
--- a/contrib/inventory/stacki.py
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016, Hugh Ma <hugh.ma@flextronics.com>
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-
-# Stacki inventory script
-# Configure stacki.yml with proper auth information and place in the following:
-# - ../inventory/stacki.yml
-# - /etc/stacki/stacki.yml
-# - /etc/ansible/stacki.yml
-# The stacki.yml file can contain entries for authentication information
-# regarding the Stacki front-end node.
-#
-# use_hostnames uses hostname rather than interface ip as connection
-#
-#
-
-"""
-Example Usage:
- List Stacki Nodes
- $ ./stack.py --list
-
-
-Example Configuration:
----
-stacki:
- auth:
- stacki_user: admin
- stacki_password: abc12345678910
- stacki_endpoint: http://192.168.200.50/stack
-use_hostnames: false
-"""
-
-import argparse
-import os
-import sys
-import yaml
-from distutils.version import StrictVersion
-
-import json
-
-try:
- import requests
-except Exception:
- sys.exit('requests package is required for this inventory script')
-
-
-CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']
-
-
-def stack_auth(params):
- endpoint = params['stacki_endpoint']
- auth_creds = {'USERNAME': params['stacki_user'],
- 'PASSWORD': params['stacki_password']}
-
- client = requests.session()
- client.get(endpoint)
-
- init_csrf = client.cookies['csrftoken']
-
- header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf,
- 'Content-type': 'application/x-www-form-urlencoded'}
-
- login_endpoint = endpoint + "/login"
-
- login_req = client.post(login_endpoint, data=auth_creds, headers=header)
-
- csrftoken = login_req.cookies['csrftoken']
- sessionid = login_req.cookies['sessionid']
-
- auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid)
-
- return client, auth_creds
-
-
-def stack_build_header(auth_creds):
- header = {'csrftoken': auth_creds['CSRFTOKEN'],
- 'X-CSRFToken': auth_creds['CSRFTOKEN'],
- 'sessionid': auth_creds['SESSIONID'],
- 'Content-type': 'application/json'}
-
- return header
-
-
-def stack_host_list(endpoint, header, client):
-
- stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
- headers=header)
- return json.loads(stack_r.json())
-
-
-def stack_net_list(endpoint, header, client):
-
- stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
- headers=header)
- return json.loads(stack_r.json())
-
-
-def format_meta(hostdata, intfdata, config):
- use_hostnames = config['use_hostnames']
- meta = dict(all=dict(hosts=list()),
- frontends=dict(hosts=list()),
- backends=dict(hosts=list()),
- _meta=dict(hostvars=dict()))
-
- # Iterate through list of dicts of hosts and remove
- # environment key as it causes conflicts
- for host in hostdata:
- del host['environment']
- meta['_meta']['hostvars'][host['host']] = host
- meta['_meta']['hostvars'][host['host']]['interfaces'] = list()
-
- # @bbyhuy to improve readability in next iteration
-
- for intf in intfdata:
- if intf['host'] in meta['_meta']['hostvars']:
- meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf)
- if intf['default'] is True:
- meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip']
- if not use_hostnames:
- meta['all']['hosts'].append(intf['ip'])
- if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
- meta['backends']['hosts'].append(intf['ip'])
- else:
- meta['frontends']['hosts'].append(intf['ip'])
- else:
- meta['all']['hosts'].append(intf['host'])
- if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
- meta['backends']['hosts'].append(intf['host'])
- else:
- meta['frontends']['hosts'].append(intf['host'])
- return meta
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Stacki Inventory Module')
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('--list', action='store_true',
- help='List active hosts')
- group.add_argument('--host', help='List details about the specific host')
-
- return parser.parse_args()
-
-
-def main():
- args = parse_args()
-
- if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
- sys.exit('requests>=2.4.3 is required for this inventory script')
-
- try:
- config_files = CONFIG_FILES
- config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml')
- config = None
- for cfg_file in config_files:
- if os.path.isfile(cfg_file):
- stream = open(cfg_file, 'r')
- config = yaml.safe_load(stream)
- break
- if not config:
- sys.stderr.write("No config file found at {0}\n".format(config_files))
- sys.exit(1)
- client, auth_creds = stack_auth(config['stacki']['auth'])
- header = stack_build_header(auth_creds)
- host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client)
- intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client)
- final_meta = format_meta(host_list, intf_list, config)
- print(json.dumps(final_meta, indent=4))
- except Exception as e:
- sys.stderr.write('%s\n' % e.message)
- sys.exit(1)
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/contrib/inventory/stacki.yml b/contrib/inventory/stacki.yml
deleted file mode 100644
index 2e31c72cbc..0000000000
--- a/contrib/inventory/stacki.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-stacki:
- auth:
- stacki_user: admin
- stacki_password: GhYgWut1hfGbbnstmbW3m-bJbeME-3EvC20rF1LHrDM
- stacki_endpoint: http://192.168.200.50/stack
-use_hostnames: false \ No newline at end of file
diff --git a/contrib/inventory/vagrant.py b/contrib/inventory/vagrant.py
deleted file mode 100755
index 96517c30d3..0000000000
--- a/contrib/inventory/vagrant.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env python
-"""
-Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
-returns it under the host group 'vagrant'
-
-Example Vagrant configuration using this script:
-
- config.vm.provision :ansible do |ansible|
- ansible.playbook = "./provision/your_playbook.yml"
- ansible.inventory_path = "./provision/inventory/vagrant.py"
- ansible.verbose = true
- end
-"""
-
-# Copyright (C) 2013 Mark Mandel <mark@compoundtheory.com>
-# 2015 Igor Khomyakov <homyakov@gmail.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Thanks to the spacewalk.py inventory script for giving me the basic structure
-# of this.
-#
-
-import sys
-import os.path
-import subprocess
-import re
-from paramiko import SSHConfig
-from optparse import OptionParser
-from collections import defaultdict
-import json
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.six.moves import StringIO
-
-
-_group = 'vagrant' # a default group
-_ssh_to_ansible = [('user', 'ansible_user'),
- ('hostname', 'ansible_host'),
- ('identityfile', 'ansible_ssh_private_key_file'),
- ('port', 'ansible_port')]
-
-# Options
-# ------------------------------
-
-parser = OptionParser(usage="%prog [options] --list | --host <machine>")
-parser.add_option('--list', default=False, dest="list", action="store_true",
- help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
-parser.add_option('--host', default=None, dest="host",
- help="Generate additional host specific details for given host for Ansible")
-(options, args) = parser.parse_args()
-
-#
-# helper functions
-#
-
-
-# get all the ssh configs for all boxes in an array of dictionaries.
-def get_ssh_config():
- return dict((k, get_a_ssh_config(k)) for k in list_running_boxes())
-
-
-# list all the running boxes
-def list_running_boxes():
-
- output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n')
-
- boxes = []
-
- for line in output:
- matcher = re.search(r"([^\s]+)[\s]+running \(.+", line)
- if matcher:
- boxes.append(matcher.group(1))
-
- return boxes
-
-
-# get the ssh config for a single box
-def get_a_ssh_config(box_name):
- """Gives back a map of all the machine's ssh configurations"""
-
- output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict')
- config = SSHConfig()
- config.parse(StringIO(output))
- host_config = config.lookup(box_name)
-
- # man 5 ssh_config:
- # > It is possible to have multiple identity files ...
- # > all these identities will be tried in sequence.
- for id in host_config['identityfile']:
- if os.path.isfile(id):
- host_config['identityfile'] = id
-
- return dict((v, host_config[k]) for k, v in _ssh_to_ansible)
-
-
-# List out servers that vagrant has running
-# ------------------------------
-if options.list:
- ssh_config = get_ssh_config()
- meta = defaultdict(dict)
-
- for host in ssh_config:
- meta['hostvars'][host] = ssh_config[host]
-
- print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta}))
- sys.exit(0)
-
-# Get out the host details
-# ------------------------------
-elif options.host:
- print(json.dumps(get_a_ssh_config(options.host)))
- sys.exit(0)
-
-# Print out help
-# ------------------------------
-else:
- parser.print_help()
- sys.exit(0)
diff --git a/contrib/inventory/vbox.py b/contrib/inventory/vbox.py
deleted file mode 100755
index 7a0ed702ae..0000000000
--- a/contrib/inventory/vbox.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-from subprocess import Popen, PIPE
-
-import json
-
-
-class SetEncoder(json.JSONEncoder):
- def default(self, obj):
- if isinstance(obj, set):
- return list(obj)
- return json.JSONEncoder.default(self, obj)
-
-
-VBOX = "VBoxManage"
-
-
-def get_hosts(host=None):
-
- returned = {}
- try:
- if host:
- p = Popen([VBOX, 'showvminfo', host], stdout=PIPE)
- else:
- returned = {'all': set(), '_metadata': {}}
- p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
- except Exception:
- sys.exit(1)
-
- hostvars = {}
- prevkey = pref_k = ''
-
- for line in p.stdout.readlines():
-
- try:
- k, v = line.split(':', 1)
- except Exception:
- continue
-
- if k == '':
- continue
-
- v = v.strip()
- if k.startswith('Name'):
- if v not in hostvars:
- curname = v
- hostvars[curname] = {}
- try: # try to get network info
- x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE)
- ipinfo = x.stdout.read()
- if 'Value' in ipinfo:
- a, ip = ipinfo.split(':', 1)
- hostvars[curname]['ansible_ssh_host'] = ip.strip()
- except Exception:
- pass
-
- continue
-
- if not host:
- if k == 'Groups':
- for group in v.split('/'):
- if group:
- if group not in returned:
- returned[group] = set()
- returned[group].add(curname)
- returned['all'].add(curname)
- continue
-
- pref_k = 'vbox_' + k.strip().replace(' ', '_')
- if k.startswith(' '):
- if prevkey not in hostvars[curname]:
- hostvars[curname][prevkey] = {}
- hostvars[curname][prevkey][pref_k] = v
- else:
- if v != '':
- hostvars[curname][pref_k] = v
-
- prevkey = pref_k
-
- if not host:
- returned['_metadata']['hostvars'] = hostvars
- else:
- returned = hostvars[host]
- return returned
-
-
-if __name__ == '__main__':
-
- inventory = {}
- hostname = None
-
- if len(sys.argv) > 1:
- if sys.argv[1] == "--host":
- hostname = sys.argv[2]
-
- if hostname:
- inventory = get_hosts(hostname)
- else:
- inventory = get_hosts()
-
- sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder))
diff --git a/contrib/inventory/zabbix.ini b/contrib/inventory/zabbix.ini
deleted file mode 100644
index ead19b62d5..0000000000
--- a/contrib/inventory/zabbix.ini
+++ /dev/null
@@ -1,20 +0,0 @@
-# Ansible Zabbix external inventory script settings
-#
-
-[zabbix]
-
-# Server location
-server = http://zabbix.example.com/zabbix
-
-# Login
-username = admin
-password = zabbix
-
-# Verify the server's SSL certificate
-validate_certs = True
-
-# Read zabbix inventory per host
-read_host_inventory = True
-
-# Set ansible_ssh_host based on first interface settings
-use_host_interface = True \ No newline at end of file
diff --git a/contrib/inventory/zabbix.py b/contrib/inventory/zabbix.py
deleted file mode 100755
index acdf38e704..0000000000
--- a/contrib/inventory/zabbix.py
+++ /dev/null
@@ -1,196 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2013, Greg Buehler
-# (c) 2018, Filippo Ferrazini
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-"""
-Zabbix Server external inventory script.
-========================================
-
-Returns hosts and hostgroups from Zabbix Server.
-If you want to run with --limit against a host group with space in the
-name, use asterisk. For example --limit="Linux*servers".
-
-Configuration is read from `zabbix.ini`.
-
-Tested with Zabbix Server 2.0.6, 3.2.3 and 3.4.
-"""
-
-from __future__ import print_function
-
-import os
-import sys
-import argparse
-from ansible.module_utils.six.moves import configparser
-
-try:
- from zabbix_api import ZabbixAPI
-except Exception:
- print("Error: Zabbix API library must be installed: pip install zabbix-api.",
- file=sys.stderr)
- sys.exit(1)
-
-import json
-
-
-class ZabbixInventory(object):
-
- def read_settings(self):
- config = configparser.SafeConfigParser()
- conf_path = './zabbix.ini'
- if not os.path.exists(conf_path):
- conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini'
- if os.path.exists(conf_path):
- config.read(conf_path)
- # server
- if config.has_option('zabbix', 'server'):
- self.zabbix_server = config.get('zabbix', 'server')
-
- # login
- if config.has_option('zabbix', 'username'):
- self.zabbix_username = config.get('zabbix', 'username')
- if config.has_option('zabbix', 'password'):
- self.zabbix_password = config.get('zabbix', 'password')
- # ssl certs
- if config.has_option('zabbix', 'validate_certs'):
- if config.get('zabbix', 'validate_certs') in ['false', 'False', False]:
- self.validate_certs = False
- # host inventory
- if config.has_option('zabbix', 'read_host_inventory'):
- if config.get('zabbix', 'read_host_inventory') in ['true', 'True', True]:
- self.read_host_inventory = True
- # host interface
- if config.has_option('zabbix', 'use_host_interface'):
- if config.get('zabbix', 'use_host_interface') in ['false', 'False', False]:
- self.use_host_interface = False
-
- def read_cli(self):
- parser = argparse.ArgumentParser()
- parser.add_argument('--host')
- parser.add_argument('--list', action='store_true')
- self.options = parser.parse_args()
-
- def hoststub(self):
- return {
- 'hosts': []
- }
-
- def get_host(self, api, name):
- api_query = {'output': 'extend', 'selectGroups': 'extend', "filter": {"host": [name]}}
- if self.use_host_interface:
- api_query['selectInterfaces'] = ['useip', 'ip', 'dns']
- if self.read_host_inventory:
- api_query['selectInventory'] = "extend"
-
- data = {'ansible_ssh_host': name}
- if self.use_host_interface or self.read_host_inventory:
- try:
- hosts_data = api.host.get(api_query)[0]
- if 'interfaces' in hosts_data:
- # use first interface only
- if hosts_data['interfaces'][0]['useip'] == 0:
- data['ansible_ssh_host'] = hosts_data['interfaces'][0]['dns']
- else:
- data['ansible_ssh_host'] = hosts_data['interfaces'][0]['ip']
- if ('inventory' in hosts_data) and (hosts_data['inventory']):
- data.update(hosts_data['inventory'])
- except IndexError:
- # Host not found in zabbix
- pass
- return data
-
- def get_list(self, api):
- api_query = {'output': 'extend', 'selectGroups': 'extend'}
- if self.use_host_interface:
- api_query['selectInterfaces'] = ['useip', 'ip', 'dns']
- if self.read_host_inventory:
- api_query['selectInventory'] = "extend"
-
- hosts_data = api.host.get(api_query)
- data = {'_meta': {'hostvars': {}}}
-
- data[self.defaultgroup] = self.hoststub()
- for host in hosts_data:
- hostname = host['name']
- hostvars = dict()
- data[self.defaultgroup]['hosts'].append(hostname)
-
- for group in host['groups']:
- groupname = group['name']
-
- if groupname not in data:
- data[groupname] = self.hoststub()
-
- data[groupname]['hosts'].append(hostname)
- if 'interfaces' in host:
- # use first interface only
- if host['interfaces'][0]['useip'] == 0:
- hostvars['ansible_ssh_host'] = host['interfaces'][0]['dns']
- else:
- hostvars['ansible_ssh_host'] = host['interfaces'][0]['ip']
- if ('inventory' in host) and (host['inventory']):
- hostvars.update(host['inventory'])
- data['_meta']['hostvars'][hostname] = hostvars
-
- return data
-
- def __init__(self):
-
- self.defaultgroup = 'group_all'
- self.zabbix_server = None
- self.zabbix_username = None
- self.zabbix_password = None
- self.validate_certs = True
- self.read_host_inventory = False
- self.use_host_interface = True
-
- self.meta = {}
-
- self.read_settings()
- self.read_cli()
-
- if self.zabbix_server and self.zabbix_username:
- try:
- api = ZabbixAPI(server=self.zabbix_server, validate_certs=self.validate_certs)
- api.login(user=self.zabbix_username, password=self.zabbix_password)
- # zabbix_api tries to exit if it cannot parse what the zabbix server returned
- # so we have to use SystemExit here
- except (Exception, SystemExit) as e:
- print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr)
- sys.exit(1)
-
- if self.options.host:
- data = self.get_host(api, self.options.host)
- print(json.dumps(data, indent=2))
-
- elif self.options.list:
- data = self.get_list(api)
- print(json.dumps(data, indent=2))
-
- else:
- print("usage: --list ..OR.. --host <hostname>", file=sys.stderr)
- sys.exit(1)
-
- else:
- print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr)
- sys.exit(1)
-
-
-ZabbixInventory()
diff --git a/contrib/inventory/zone.py b/contrib/inventory/zone.py
deleted file mode 100755
index 825e7499b0..0000000000
--- a/contrib/inventory/zone.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import Popen, PIPE
-import sys
-import json
-
-result = {}
-result['all'] = {}
-
-pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True)
-result['all']['hosts'] = []
-for l in pipe.stdout.readlines():
- # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
- s = l.split(':')
- if s[1] != 'global':
- result['all']['hosts'].append(s[1])
-
-result['all']['vars'] = {}
-result['all']['vars']['ansible_connection'] = 'zone'
-
-if len(sys.argv) == 2 and sys.argv[1] == '--list':
- print(json.dumps(result))
-elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print(json.dumps({'ansible_connection': 'zone'}))
-else:
- sys.stderr.write("Need an argument, either --list or --host <host>\n")