diff options
author | Michael DeHaan <michael.dehaan@gmail.com> | 2012-04-04 16:27:24 +0200 |
---|---|---|
committer | Michael DeHaan <michael.dehaan@gmail.com> | 2012-04-04 16:27:24 +0200 |
commit | 8d06c074b1f6b35aca7a3969481c88229a9078e6 (patch) | |
tree | 1c6fd8516b0a2943cbe5b2d9e797f9f53610f361 | |
parent | Added tests for the guts of the program (Runner()) and modules. Idea is to run (diff) | |
download | ansible-8d06c074b1f6b35aca7a3969481c88229a9078e6.tar.xz ansible-8d06c074b1f6b35aca7a3969481c88229a9078e6.zip |
* Added uptime.py as an example of somewhat minimal API usage & reporting
* Pattern in API now has a default
* Fixed bug in template module operation detected from running playbook (tests for that pending)
* Workaround for multiprocessing lib being harmlessly squeaky (feeder thread got sentinel)
-rwxr-xr-x | examples/uptime.py | 31 | ||||
-rw-r--r-- | lib/ansible/constants.py | 2 | ||||
-rwxr-xr-x | lib/ansible/runner.py | 19 |
3 files changed, 44 insertions, 8 deletions
diff --git a/examples/uptime.py b/examples/uptime.py new file mode 100755 index 0000000000..dd11ed20e0 --- /dev/null +++ b/examples/uptime.py @@ -0,0 +1,31 @@ +#!/usr/bin/python +# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> +# example of getting the uptime of all hosts, 10 at a time + +import ansible.runner +import sys + +# construct the ansible runner and execute on all hosts +results = ansible.runner.Runner( + pattern='*', forks=10, + module_name='command', module_args=['/usr/bin/uptime'], +).run() + +if results is None: + print "No hosts found" + sys.exit(1) + +print "UP ***********" +for (hostname, result) in results['contacted'].items(): + if not 'failed' in result: + print "%s >>> %s" % (hostname, result['stdout']) + +print "FAILED *******" +for (hostname, result) in results['contacted'].items(): + if 'failed' in result: + print "%s >>> %s" % (hostname, result['msg']) + +print "DOWN *********" +for (hostname, result) in results['dark'].items(): + print "%s >>> %s" % (hostname, result) + diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index becf008365..fde6a50363 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -20,7 +20,7 @@ DEFAULT_HOST_LIST = '/etc/ansible/hosts' DEFAULT_MODULE_PATH = '/usr/share/ansible' DEFAULT_MODULE_NAME = 'command' -DEFAULT_PATTERN = None +DEFAULT_PATTERN = '*' DEFAULT_FORKS = 5 DEFAULT_MODULE_ARGS = '' DEFAULT_TIMEOUT = 10 diff --git a/lib/ansible/runner.py b/lib/ansible/runner.py index 08e7fcd7d6..c06e94232a 100755 --- a/lib/ansible/runner.py +++ b/lib/ansible/runner.py @@ -34,8 +34,12 @@ import random ################################################ +def noop(*args, **kwargs): + pass + def _executor_hook(job_queue, result_queue): ''' callback used by multiprocessing pool ''' + signal.signal(signal.SIGINT, signal.SIG_IGN) while not job_queue.empty(): try: @@ -78,7 +82,7 @@ class Runner(object): self.host_list, self.groups = self.parse_hosts(host_list) self.module_path = module_path self.module_name = module_name - self.forks = forks + self.forks = int(forks) self.pattern = pattern self.module_args = module_args self.timeout = timeout @@ -275,8 +279,7 @@ class Runner(object): self._transfer_file(conn, source, temppath) # install the template module - self.module_name = 'template' - module = self._transfer_module(conn, tmp) + module = self._transfer_module(conn, tmp, 'template') # run the template module args = [ "src=%s" % temppath, "dest=%s" % dest, "metadata=%s" % metadata ] @@ -318,6 +321,7 @@ class Runner(object): raise Exception("???") self._delete_remote_files(conn, tmp) conn.close() + return result def remote_log(self, conn, msg): @@ -355,18 +359,19 @@ class Runner(object): def run(self): ''' xfer & run module on all matched hosts ''' - + # find hosts that match the pattern hosts = self.match_hosts(self.pattern) if len(hosts) == 0: return None - + # attack pool of hosts in N forks # _executor_hook does all of the work hosts = [ (self,x) for x in hosts ] + if self.forks > 1: - job_queue = multiprocessing.Queue() - result_queue = multiprocessing.Queue() + job_queue = multiprocessing.Manager().Queue() + result_queue = multiprocessing.Manager().Queue() for i in hosts: job_queue.put(i) |