import ansible.
inventory
import [Link] as C
import [Link]
from [Link] import template
from ansible import utils
from ansible import errors
from ansible.module_utils.splitter import split_args, unquote
import [Link]
import [Link]
import os
import shlex
import collections
from play import Play
import StringIO
import pipes
# the setup cache stores all variables about a host
# gathered during the setup step, while the vars cache
# holds all other variables about a host
SETUP_CACHE = [Link]()
VARS_CACHE = [Link](dict)
class PlayBook(object):
'''
runs an ansible playbook, given as a datastructure or YAML filename.
A playbook is a deployment, config management, or automation based
set of commands to run in series.
multiple plays/tasks do not execute simultaneously, but tasks in each
pattern do execute in parallel (according to the number of forks
requested) among the hosts they address
'''
# *****************************************************
def __init__(self,
playbook = None,
host_list = C.DEFAULT_HOST_LIST,
module_path = None,
forks = C.DEFAULT_FORKS,
timeout = C.DEFAULT_TIMEOUT,
remote_user = C.DEFAULT_REMOTE_USER,
remote_pass = C.DEFAULT_REMOTE_PASS,
sudo_pass = C.DEFAULT_SUDO_PASS,
remote_port = None,
transport = C.DEFAULT_TRANSPORT,
private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
callbacks = None,
runner_callbacks = None,
stats = None,
sudo = False,
sudo_user = C.DEFAULT_SUDO_USER,
extra_vars = None,
only_tags = None,
skip_tags = None,
subset = C.DEFAULT_SUBSET,
inventory = None,
check = False,
diff = False,
any_errors_fatal = False,
su = False,
su_user = False,
su_pass = False,
vault_password = False,
force_handlers = False,
):
"""
playbook: path to a playbook file
host_list: path to a file like /etc/ansible/hosts
module_path: path to ansible modules, like /usr/share/ansible/
forks: desired level of parallelism
timeout: connection timeout
remote_user: run as this user if not specified in a particular play
remote_pass: use this remote password (for all plays) vs using SSH
keys
sudo_pass: if sudo==True, and a password is required, this is the
sudo password
remote_port: default remote port to use if not specified with the host
or play
transport: how to connect to hosts that don't specify a transport
(local, paramiko, etc)
callbacks output callbacks for the playbook
runner_callbacks: more callbacks, this time for the runner API
stats: holds aggregrate data about events occurring to each host
sudo: if not specified per play, requests all plays use sudo
mode
inventory: can be specified instead of host_list to use a pre-
existing inventory object
check: don't change anything, just try to detect some potential
changes
any_errors_fatal: terminate the entire execution immediately when one of
the hosts has failed
force_handlers: continue to notify and run handlers even if a task fails
"""
self.SETUP_CACHE = SETUP_CACHE
self.VARS_CACHE = VARS_CACHE
arguments = []
if playbook is None:
[Link]('playbook')
if callbacks is None:
[Link]('callbacks')
if runner_callbacks is None:
[Link]('runner_callbacks')
if stats is None:
[Link]('stats')
if arguments:
raise Exception('PlayBook missing required arguments: %s' % ',
'.join(arguments))
if extra_vars is None:
extra_vars = {}
if only_tags is None:
only_tags = [ 'all' ]
if skip_tags is None:
skip_tags = []
[Link] = check
[Link] = diff
self.module_path = module_path
[Link] = forks
[Link] = timeout
self.remote_user = remote_user
self.remote_pass = remote_pass
self.remote_port = remote_port
[Link] = transport
[Link] = callbacks
self.runner_callbacks = runner_callbacks
[Link] = stats
[Link] = sudo
self.sudo_pass = sudo_pass
self.sudo_user = sudo_user
self.extra_vars = extra_vars
self.global_vars = {}
self.private_key_file = private_key_file
self.only_tags = only_tags
self.skip_tags = skip_tags
self.any_errors_fatal = any_errors_fatal
[Link] = su
self.su_user = su_user
self.su_pass = su_pass
self.vault_password = vault_password
self.force_handlers = force_handlers
[Link] = self
self.runner_callbacks.playbook = self
if inventory is None:
[Link] = [Link](host_list)
[Link](subset)
else:
[Link] = inventory
if self.module_path is not None:
[Link].module_finder.add_directory(self.module_path)
[Link] = [Link](playbook) or '.'
[Link].push_basedir([Link])
# let inventory know the playbook basedir so it can load more vars
[Link].set_playbook_basedir([Link])
vars = extra_vars.copy()
vars['playbook_dir'] = [Link]([Link])
if [Link]() is not None:
vars['inventory_dir'] = [Link]()
if [Link]() is not None:
vars['inventory_file'] = [Link]()
[Link] = playbook
([Link], self.play_basedirs) =
self._load_playbook_from_file(playbook, vars)
[Link].load_callback_plugins()
[Link].set_playbook([Link], self)
self._ansible_version = utils.version_info(gitinfo=True)
# *****************************************************
def _get_playbook_vars(self, play_ds, existing_vars):
'''
Gets the vars specified with the play and blends them
with any existing vars that have already been read in
'''
new_vars = existing_vars.copy()
if 'vars' in play_ds:
if isinstance(play_ds['vars'], dict):
new_vars.update(play_ds['vars'])
elif isinstance(play_ds['vars'], list):
for v in play_ds['vars']:
new_vars.update(v)
return new_vars
# *****************************************************
def _get_include_info(self, play_ds, basedir, existing_vars={}):
'''
Gets any key=value pairs specified with the included file
name and returns the merged vars along with the path
'''
new_vars = existing_vars.copy()
tokens = split_args(play_ds.get('include', ''))
for t in tokens[1:]:
try:
(k,v) = unquote(t).split("=", 1)
new_vars[k] = template(basedir, v, new_vars)
except ValueError:
raise [Link]('included playbook variables must be in
the form k=v, got: %s' % t)
return (new_vars, unquote(tokens[0]))
# *****************************************************
def _get_playbook_vars_files(self, play_ds, existing_vars_files):
new_vars_files = list(existing_vars_files)
if 'vars_files' in play_ds:
new_vars_files = utils.list_union(new_vars_files,
play_ds['vars_files'])
return new_vars_files
# *****************************************************
def _extend_play_vars(self, play, vars={}):
'''
Extends the given play's variables with the additional specified vars.
'''
if 'vars' not in play or not play['vars']:
# someone left out or put an empty "vars:" entry in their playbook
return [Link]()
play_vars = None
if isinstance(play['vars'], dict):
play_vars = play['vars'].copy()
play_vars.update(vars)
elif isinstance(play['vars'], list):
# nobody should really do this, but handle vars: a=1 b=2
play_vars = play['vars'][:]
play_vars.extend([{k:v} for k,v in [Link]()])
return play_vars
# *****************************************************
def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
'''
run top level error checking on playbooks and allow them to include other
playbooks.
'''
playbook_data = utils.parse_yaml_from_file(path,
vault_password=self.vault_password)
accumulated_plays = []
play_basedirs = []
if type(playbook_data) != list:
raise [Link]("parse error: playbooks must be formatted as
a YAML list, got %s" % type(playbook_data))
basedir = [Link](path) or '.'
[Link].push_basedir(basedir)
for play in playbook_data:
if type(play) != dict:
raise [Link]("parse error: each play in a playbook
must be a YAML dictionary (hash), received: %s" % play)
if 'include' in play:
# a playbook (list of plays) decided to include some other list of
plays
# from another file. The result is a flat list of plays in the
end.
play_vars = self._get_playbook_vars(play, vars)
play_vars_files = self._get_playbook_vars_files(play, vars_files)
inc_vars, inc_path = self._get_include_info(play, basedir,
play_vars)
play_vars.update(inc_vars)
included_path = utils.path_dwim(basedir, template(basedir,
inc_path, play_vars))
(plays, basedirs) = self._load_playbook_from_file(included_path,
vars=play_vars, vars_files=play_vars_files)
for p in plays:
# support for parameterized play includes works by passing
# those variables along to the subservient play
p['vars'] = self._extend_play_vars(p, play_vars)
# now add in the vars_files
p['vars_files'] = utils.list_union([Link]('vars_files', []),
play_vars_files)
accumulated_plays.extend(plays)
play_basedirs.extend(basedirs)
else:
# this is a normal (non-included play)
accumulated_plays.append(play)
play_basedirs.append(basedir)
return (accumulated_plays, play_basedirs)
# *****************************************************
def run(self):
''' run all patterns in the playbook '''
plays = []
matched_tags_all = set()
unmatched_tags_all = set()
# loop through all patterns and run them
[Link].on_start()
for (play_ds, play_basedir) in zip([Link], self.play_basedirs):
play = Play(self, play_ds, play_basedir,
vault_password=self.vault_password)
assert play is not None
matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
matched_tags_all = matched_tags_all | matched_tags
unmatched_tags_all = unmatched_tags_all | unmatched_tags
# Remove tasks we wish to skip
matched_tags = matched_tags - set(self.skip_tags)
# if we have matched_tags, the play must be run.
# if the play contains no tasks, assume we just want to gather facts
# in this case there are actually 3 meta tasks (handler flushes) not 0
# tasks, so that's why there's a check against 3
if (len(matched_tags) > 0 or len([Link]()) == 3):
[Link](play)
# if the playbook is invoked with --tags or --skip-tags that don't
# exist at all in the playbooks then we need to raise an error so that
# the user can correct the arguments.
unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
(matched_tags_all | unmatched_tags_all))
unknown_tags.discard('all')
if len(unknown_tags) > 0:
unmatched_tags_all.discard('all')
msg = 'tag(s) not found in playbook: %s. possible values: %s'
unknown = ','.join(sorted(unknown_tags))
unmatched = ','.join(sorted(unmatched_tags_all))
raise [Link](msg % (unknown, unmatched))
for play in plays:
[Link].set_play([Link], play)
[Link].set_play(self.runner_callbacks, play)
if not self._run_play(play):
break
[Link].set_play([Link], None)
[Link].set_play(self.runner_callbacks, None)
# summarize the results
results = {}
for host in [Link]():
results[host] = [Link](host)
return results
# *****************************************************
def _async_poll(self, poller, async_seconds, async_poll_interval):
''' launch an async job, if poll_interval is set, wait for completion '''
results = [Link](async_seconds, async_poll_interval)
# mark any hosts that are still listed as started as failed
# since these likely got killed by async_wrapper
for host in poller.hosts_to_poll:
reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
self.runner_callbacks.on_async_failed(host, reason,
[Link].vars_cache[host]['ansible_job_id'])
results['contacted'][host] = reason
return results
# *****************************************************
def _trim_unavailable_hosts(self, hostlist=[]):
''' returns a list of hosts that haven't failed and aren't dark '''
return [ h for h in hostlist if (h not in [Link]) and (h not
in [Link])]
# *****************************************************
def _run_task_internal(self, task):
''' run a particular module step in a playbook '''
hosts =
self._trim_unavailable_hosts([Link].list_hosts([Link]._play_hosts))
[Link].restrict_to(hosts)
runner = [Link](
pattern=[Link],
inventory=[Link],
module_name=task.module_name,
module_args=task.module_args,
forks=[Link],
remote_pass=self.remote_pass,
module_path=self.module_path,
timeout=[Link],
remote_user=task.remote_user,
remote_port=[Link].remote_port,
module_vars=task.module_vars,
play_vars=task.play_vars,
play_file_vars=task.play_file_vars,
role_vars=task.role_vars,
role_params=task.role_params,
default_vars=task.default_vars,
extra_vars=self.extra_vars,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
basedir=[Link],
conditional=[Link],
callbacks=self.runner_callbacks,
sudo=[Link],
sudo_user=task.sudo_user,
transport=[Link],
sudo_pass=task.sudo_pass,
is_playbook=True,
check=[Link],
diff=[Link],
environment=[Link],
complex_args=[Link],
accelerate=[Link],
accelerate_port=[Link].accelerate_port,
accelerate_ipv6=[Link].accelerate_ipv6,
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
su=[Link],
su_user=task.su_user,
su_pass=task.su_pass,
vault_pass = self.vault_password,
run_hosts=hosts,
no_log=task.no_log,
run_once=task.run_once,
)
runner.module_vars.update({'play_hosts': hosts})
runner.module_vars.update({'ansible_version': self._ansible_version})
if task.async_seconds == 0:
results = [Link]()
else:
results, poller = runner.run_async(task.async_seconds)
[Link](results)
if task.async_poll_interval > 0:
# if not polling, playbook requested fire and forget, so don't poll
results = self._async_poll(poller, task.async_seconds,
task.async_poll_interval)
else:
for (host, res) in [Link]('contacted', {}).iteritems():
self.runner_callbacks.on_async_ok(host, res,
[Link].vars_cache[host]['ansible_job_id'])
contacted = [Link]('contacted',{})
dark = [Link]('dark', {})
[Link].lift_restriction()
if len([Link]()) == 0 and len([Link]()) == 0:
return None
return results
# *****************************************************
def _run_task(self, play, task, is_handler):
''' run a single task in the playbook and recursively run any subtasks.
'''
[Link].set_task([Link], task)
[Link].set_task(self.runner_callbacks, task)
if task.role_name:
name = '%s | %s' % (task.role_name, [Link])
else:
name = [Link]
[Link].on_task_start(template([Link], name, task.module_vars,
lookup_fatal=False, filter_fatal=False), is_handler)
if hasattr([Link], 'skip_task') and [Link].skip_task:
[Link].set_task([Link], None)
[Link].set_task(self.runner_callbacks, None)
return True
# template ignore_errors
cond = template([Link], task.ignore_errors, task.module_vars,
expand_lists=False)
task.ignore_errors = utils.check_conditional(cond, [Link],
task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
# load up an appropriate ansible runner to run the task in parallel
results = self._run_task_internal(task)
# if no hosts are matched, carry on
hosts_remaining = True
if results is None:
hosts_remaining = False
results = {}
contacted = [Link]('contacted', {})
[Link](results, ignore_errors=task.ignore_errors)
def _register_play_vars(host, result):
# when 'register' is used, persist the result in the vars cache
# rather than the setup cache - vars should be transient between
# playbook executions
if 'stdout' in result and 'stdout_lines' not in result:
result['stdout_lines'] = result['stdout'].splitlines()
utils.update_hash(self.VARS_CACHE, host, {[Link]: result})
def _save_play_facts(host, facts):
# saves play facts in SETUP_CACHE, unless the module executed was
# set_fact, in which case we add them to the VARS_CACHE
if task.module_name in ('set_fact', 'include_vars'):
utils.update_hash(self.VARS_CACHE, host, facts)
else:
utils.update_hash(self.SETUP_CACHE, host, facts)
# add facts to the global setup cache
for host, result in [Link]():
if 'results' in result:
# task ran with_ lookup plugin, so facts are encapsulated in
# multiple list items in the results key
for res in result['results']:
if type(res) == dict:
facts = [Link]('ansible_facts', {})
_save_play_facts(host, facts)
else:
# when facts are returned, persist them in the setup cache
facts = [Link]('ansible_facts', {})
_save_play_facts(host, facts)
# if requested, save the result into the registered variable name
if [Link]:
_register_play_vars(host, result)
# also have to register some failed, but ignored, tasks
if task.ignore_errors and [Link]:
failed = [Link]('failed', {})
for host, result in [Link]():
_register_play_vars(host, result)
# flag which notify handlers need to be run
if len([Link]) > 0:
for host, results in [Link]('contacted',{}).iteritems():
if [Link]('changed', False):
for handler_name in [Link]:
self._flag_handler(play, template([Link],
handler_name, task.module_vars), host)
[Link].set_task([Link], None)
[Link].set_task(self.runner_callbacks, None)
return hosts_remaining
# *****************************************************
def _flag_handler(self, play, handler_name, host):
'''
if a task has any notify elements, flag handlers for run
at end of execution cycle for hosts that have indicated
changes have been made
'''
found = False
for x in [Link]():
if handler_name == template([Link], [Link], x.module_vars):
found = True
[Link].on_notify(host, [Link])
x.notified_by.append(host)
if not found:
raise [Link]("change handler (%s) is not defined" %
handler_name)
# *****************************************************
def _do_setup_step(self, play):
''' get facts from the remote system '''
host_list = self._trim_unavailable_hosts(play._play_hosts)
if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
host_list = [h for h in host_list if h not in self.SETUP_CACHE or
'module_setup' not in self.SETUP_CACHE[h]]
if len(host_list) == 0:
return {}
elif play.gather_facts is False or (play.gather_facts is None and
C.DEFAULT_GATHERING == 'explicit'):
return {}
[Link].on_setup()
[Link].restrict_to(host_list)
[Link].set_task([Link], None)
[Link].set_task(self.runner_callbacks, None)
# push any variables down to the system
setup_results = [Link](
basedir=[Link],
pattern=[Link],
module_name='setup',
module_args={},
inventory=[Link],
forks=[Link],
module_path=self.module_path,
timeout=[Link],
remote_user=play.remote_user,
remote_pass=self.remote_pass,
remote_port=play.remote_port,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
callbacks=self.runner_callbacks,
sudo=[Link],
sudo_user=play.sudo_user,
sudo_pass=self.sudo_pass,
su=[Link],
su_user=play.su_user,
su_pass=self.su_pass,
vault_pass=self.vault_password,
transport=[Link],
is_playbook=True,
module_vars=[Link],
play_vars=[Link],
play_file_vars=play.vars_file_vars,
role_vars=play.role_vars,
default_vars=play.default_vars,
check=[Link],
diff=[Link],
accelerate=[Link],
accelerate_port=play.accelerate_port,
).run()
[Link](setup_results, setup=True)
[Link].lift_restriction()
# now for each result, load into the setup cache so we can
# let runner template out future commands
setup_ok = setup_results.get('contacted', {})
for (host, result) in setup_ok.iteritems():
utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
utils.update_hash(self.SETUP_CACHE, host, [Link]('ansible_facts',
{}))
return setup_results
# *****************************************************
def generate_retry_inventory(self, replay_hosts):
'''
called by /usr/bin/ansible when a playbook run fails. It generates an
inventory
that allows re-running on ONLY the failed hosts. This may duplicate some
variable information in group_vars/host_vars but that is ok, and expected.
'''
buf = [Link]()
for x in replay_hosts:
[Link]("%s\n" % x)
basedir = [Link]()
filename = "%[Link]" % [Link]([Link])
filename = [Link](".yml","")
filename = [Link]([Link]('$HOME/'), filename)
try:
fd = open(filename, 'w')
[Link]([Link]())
[Link]()
return filename
except:
pass
return None
# *****************************************************
def _run_play(self, play):
''' run a list of tasks for a given pattern, in order '''
[Link].on_play_start([Link])
# Get the hosts for this play
play._play_hosts = [Link].list_hosts([Link])
# if no hosts matches this play, drop out
if not play._play_hosts:
[Link].on_no_hosts_matched()
return True
# get facts from system
self._do_setup_step(play)
# now with that data, handle contentional variable file imports!
all_hosts = self._trim_unavailable_hosts(play._play_hosts)
play.update_vars_files(all_hosts, vault_password=self.vault_password)
hosts_count = len(all_hosts)
if [Link]("%"):
# This is a percentage, so calculate it based on the
# number of hosts
serial_pct = int([Link]("%",""))
serial = int((serial_pct/100.0) * len(all_hosts))
# Ensure that no matter how small the percentage, serial
# can never fall below 1, so that things actually happen
serial = max(serial, 1)
else:
serial = int([Link])
serialized_batch = []
if serial <= 0:
serialized_batch = [all_hosts]
else:
# do N forks all the way through before moving to next
while len(all_hosts) > 0:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batch.append(play_hosts)
task_errors = False
for on_hosts in serialized_batch:
# restrict the play to just the hosts we have in our on_hosts block
that are
# available.
play._play_hosts = self._trim_unavailable_hosts(on_hosts)
[Link].also_restrict_to(on_hosts)
for task in [Link]():
if [Link] is not None:
# meta tasks can force handlers to run mid-play
if [Link] == 'flush_handlers':
self.run_handlers(play)
# skip calling the handler till the play is finished
continue
# only run the task if the requested tags match
should_run = False
for x in self.only_tags:
for y in [Link]:
if x == y:
should_run = True
break
# Check for tags that we need to skip
if should_run:
if any(x in [Link] for x in self.skip_tags):
should_run = False
if should_run:
if not self._run_task(play, task, False):
# whether no hosts matched is fatal or not depends if it
was on the initial step.
# if we got exactly no hosts on the first step (setup!)
then the host group
# just didn't match anything and that's ok
return False
# Get a new list of what hosts are left as available, the ones that
# did not go fail/dark during the task
host_list = self._trim_unavailable_hosts(play._play_hosts)
# Set max_fail_pct to 0, So if any hosts fails, bail out
if task.any_errors_fatal and len(host_list) < hosts_count:
play.max_fail_pct = 0
# If threshold for max nodes failed is exceeded, bail out.
if [Link] > 0:
# if serial is set, we need to shorten the size of host_count
play_count = len(play._play_hosts)
if (play_count - len(host_list)) >
int((play.max_fail_pct)/100.0 * play_count):
host_list = None
else:
if (hosts_count - len(host_list)) >
int((play.max_fail_pct)/100.0 * hosts_count):
host_list = None
# if no hosts remain, drop out
if not host_list:
if self.force_handlers:
task_errors = True
break
else:
[Link].on_no_hosts_remaining()
return False
# lift restrictions after each play finishes
[Link].lift_also_restriction()
if task_errors and not self.force_handlers:
# if there were failed tasks and handler execution
# is not forced, quit the play with an error
return False
else:
# no errors, go ahead and execute all handlers
if not self.run_handlers(play):
return False
return True
def run_handlers(self, play):
on_hosts = play._play_hosts
hosts_count = len(on_hosts)
for task in [Link]():
if [Link] is not None:
fired_names = {}
for handler in [Link]():
if len(handler.notified_by) > 0:
[Link].restrict_to(handler.notified_by)
# Resolve the variables first
handler_name = template([Link], [Link],
handler.module_vars)
if handler_name not in fired_names:
self._run_task(play, handler, True)
# prevent duplicate handler includes from running more than
once
fired_names[handler_name] = 1
host_list = self._trim_unavailable_hosts(play._play_hosts)
if handler.any_errors_fatal and len(host_list) <
hosts_count:
play.max_fail_pct = 0
if (hosts_count - len(host_list)) >
int((play.max_fail_pct)/100.0 * hosts_count):
host_list = None
if not host_list and not self.force_handlers:
[Link].on_no_hosts_remaining()
return False
[Link].lift_restriction()
new_list = handler.notified_by[:]
for host in handler.notified_by:
if host in on_hosts:
while host in new_list:
new_list.remove(host)
handler.notified_by = new_list
continue
return True