Skip to content

Commit

Permalink
Merge pull request ansible#37 from ansible/devel
Browse files Browse the repository at this point in the history
Rebase
  • Loading branch information
sean-m-sullivan authored Feb 10, 2021
2 parents 012189c + 2ef08b1 commit 44d7915
Show file tree
Hide file tree
Showing 176 changed files with 6,352 additions and 3,269 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
[![Gated by Zuul](https://zuul-ci.org/gated.svg)](https://ansible.softwarefactory-project.io/zuul/status)

<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />

AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is the upstream project for [Tower](https://www.ansible.com/tower), a commercial derivative of AWX.

To install AWX, please view the [Install guide](./INSTALL.md).
Expand Down
2 changes: 2 additions & 0 deletions awx/main/models/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -828,6 +828,7 @@ def _get_inventory_hosts(
return self.inventory.hosts.only(*only)

def start_job_fact_cache(self, destination, modification_times, timeout=None):
self.log_lifecycle("start_job_fact_cache")
os.makedirs(destination, mode=0o700)
hosts = self._get_inventory_hosts()
if timeout is None:
Expand All @@ -852,6 +853,7 @@ def start_job_fact_cache(self, destination, modification_times, timeout=None):
modification_times[filepath] = os.path.getmtime(filepath)

def finish_job_fact_cache(self, destination, modification_times):
self.log_lifecycle("finish_job_fact_cache")
for host in self._get_inventory_hosts():
filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):
Expand Down
5 changes: 5 additions & 0 deletions awx/main/models/notifications.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,7 @@ class JobNotificationMixin(object):
{'unified_job_template': ['id', 'name', 'description', 'unified_job_type']},
{'instance_group': ['name', 'id']},
{'created_by': ['id', 'username', 'first_name', 'last_name']},
{'schedule': ['id', 'name', 'description', 'next_run']},
{'labels': ['count', 'results']}]}]

@classmethod
Expand Down Expand Up @@ -344,6 +345,10 @@ def context_stub(cls):
'name': 'Stub project',
'scm_type': 'git',
'status': 'successful'},
'schedule': {'description': 'Sample schedule',
'id': 42,
'name': 'Stub schedule',
'next_run': datetime.datetime(2038, 1, 1, 0, 0, 0, 0, tzinfo=datetime.timezone.utc)},
'unified_job_template': {'description': 'Sample unified job template description',
'id': 39,
'name': 'Stub Job Template',
Expand Down
25 changes: 22 additions & 3 deletions awx/main/models/unified_jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
__all__ = ['UnifiedJobTemplate', 'UnifiedJob', 'StdoutMaxBytesExceeded']

logger = logging.getLogger('awx.main.models.unified_jobs')

logger_job_lifecycle = logging.getLogger('awx.analytics.job_lifecycle')
# NOTE: ACTIVE_STATES moved to constants because it is used by parent modules


Expand Down Expand Up @@ -420,7 +420,7 @@ def create_unified_job(self, **kwargs):
# have been associated to the UJ
if unified_job.__class__ in activity_stream_registrar.models:
activity_stream_create(None, unified_job, True)

unified_job.log_lifecycle("created")
return unified_job

@classmethod
Expand Down Expand Up @@ -862,7 +862,7 @@ def save(self, *args, **kwargs):
self.unified_job_template = self._get_parent_instance()
if 'unified_job_template' not in update_fields:
update_fields.append('unified_job_template')

if self.cancel_flag and not self.canceled_on:
# Record the 'canceled' time.
self.canceled_on = now()
Expand Down Expand Up @@ -1010,6 +1010,7 @@ def event_processing_finished(self):
event_qs = self.get_event_queryset()
except NotImplementedError:
return True # Model without events, such as WFJT
self.log_lifecycle("event_processing_finished")
return self.emitted_events == event_qs.count()

def result_stdout_raw_handle(self, enforce_max_bytes=True):
Expand Down Expand Up @@ -1318,6 +1319,10 @@ def pre_start(self, **kwargs):
if 'extra_vars' in kwargs:
self.handle_extra_data(kwargs['extra_vars'])

# remove any job_explanations that may have been set while job was in pending
if self.job_explanation != "":
self.job_explanation = ""

return (True, opts)

def signal_start(self, **kwargs):
Expand Down Expand Up @@ -1484,3 +1489,17 @@ def is_isolated(self):
@property
def is_containerized(self):
return False

def log_lifecycle(self, state, blocked_by=None):
extra={'type': self._meta.model_name,
'task_id': self.id,
'state': state}
if self.unified_job_template:
extra["template_name"] = self.unified_job_template.name
if state == "blocked" and blocked_by:
blocked_by_msg = f"{blocked_by._meta.model_name}-{blocked_by.id}"
msg = f"{self._meta.model_name}-{self.id} blocked by {blocked_by_msg}"
extra["blocked_by"] = blocked_by_msg
else:
msg = f"{self._meta.model_name}-{self.id} {state.replace('_', ' ')}"
logger_job_lifecycle.debug(msg, extra=extra)
135 changes: 62 additions & 73 deletions awx/main/scheduler/dependency_graph.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from django.utils.timezone import now as tz_now

from awx.main.models import (
Job,
ProjectUpdate,
Expand All @@ -20,119 +18,110 @@ class DependencyGraph(object):
INVENTORY_SOURCE_UPDATES = 'inventory_source_updates'
WORKFLOW_JOB_TEMPLATES_JOBS = 'workflow_job_template_jobs'

LATEST_PROJECT_UPDATES = 'latest_project_updates'
LATEST_INVENTORY_UPDATES = 'latest_inventory_updates'

INVENTORY_SOURCES = 'inventory_source_ids'

def __init__(self, queue):
self.queue = queue
def __init__(self):
self.data = {}
# project_id -> True / False
self.data[self.PROJECT_UPDATES] = {}
# inventory_id -> True / False
# The reason for tracking both inventory and inventory sources:
# Consider InvA, which has two sources, InvSource1, InvSource2.
# JobB might depend on InvA, which launches two updates, one for each source.
# To determine if JobB can run, we can just check InvA, which is marked in
# INVENTORY_UPDATES, instead of having to check for both entries in
# INVENTORY_SOURCE_UPDATES.
self.data[self.INVENTORY_UPDATES] = {}
# job_template_id -> True / False
self.data[self.JOB_TEMPLATE_JOBS] = {}

'''
Track runnable job related project and inventory to ensure updates
don't run while a job needing those resources is running.
'''

# inventory_source_id -> True / False
self.data[self.INVENTORY_SOURCE_UPDATES] = {}
# True / False
self.data[self.SYSTEM_JOB] = True
# workflow_job_template_id -> True / False
self.data[self.JOB_TEMPLATE_JOBS] = {}
self.data[self.SYSTEM_JOB] = {}
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS] = {}

# project_id -> latest ProjectUpdateLatestDict'
self.data[self.LATEST_PROJECT_UPDATES] = {}
# inventory_source_id -> latest InventoryUpdateLatestDict
self.data[self.LATEST_INVENTORY_UPDATES] = {}

# inventory_id -> [inventory_source_ids]
self.data[self.INVENTORY_SOURCES] = {}

def add_latest_project_update(self, job):
self.data[self.LATEST_PROJECT_UPDATES][job.project_id] = job
def mark_if_no_key(self, job_type, id, job):
# only mark first occurrence of a task. If 10 of JobA are launched
# (concurrent disabled), the dependency graph should return that jobs
# 2 through 10 are blocked by job1
if id not in self.data[job_type]:
self.data[job_type][id] = job

def get_now(self):
return tz_now()
def get_item(self, job_type, id):
return self.data[job_type].get(id, None)

def mark_system_job(self):
self.data[self.SYSTEM_JOB] = False
def mark_system_job(self, job):
# Don't track different types of system jobs, so that only one can run
# at a time. Therefore id in this case is just 'system_job'.
self.mark_if_no_key(self.SYSTEM_JOB, 'system_job', job)

def mark_project_update(self, job):
self.data[self.PROJECT_UPDATES][job.project_id] = False
self.mark_if_no_key(self.PROJECT_UPDATES, job.project_id, job)

def mark_inventory_update(self, inventory_id):
self.data[self.INVENTORY_UPDATES][inventory_id] = False
def mark_inventory_update(self, job):
if type(job) is AdHocCommand:
self.mark_if_no_key(self.INVENTORY_UPDATES, job.inventory_id, job)
else:
self.mark_if_no_key(self.INVENTORY_UPDATES, job.inventory_source.inventory_id, job)

def mark_inventory_source_update(self, inventory_source_id):
self.data[self.INVENTORY_SOURCE_UPDATES][inventory_source_id] = False
def mark_inventory_source_update(self, job):
self.mark_if_no_key(self.INVENTORY_SOURCE_UPDATES, job.inventory_source_id, job)

def mark_job_template_job(self, job):
self.data[self.JOB_TEMPLATE_JOBS][job.job_template_id] = False
self.mark_if_no_key(self.JOB_TEMPLATE_JOBS, job.job_template_id, job)

def mark_workflow_job(self, job):
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS][job.workflow_job_template_id] = False
self.mark_if_no_key(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id, job)

def can_project_update_run(self, job):
return self.data[self.PROJECT_UPDATES].get(job.project_id, True)
def project_update_blocked_by(self, job):
return self.get_item(self.PROJECT_UPDATES, job.project_id)

def can_inventory_update_run(self, job):
return self.data[self.INVENTORY_SOURCE_UPDATES].get(job.inventory_source_id, True)
def inventory_update_blocked_by(self, job):
return self.get_item(self.INVENTORY_SOURCE_UPDATES, job.inventory_source_id)

def can_job_run(self, job):
if self.data[self.PROJECT_UPDATES].get(job.project_id, True) is True and \
self.data[self.INVENTORY_UPDATES].get(job.inventory_id, True) is True:
if job.allow_simultaneous is False:
return self.data[self.JOB_TEMPLATE_JOBS].get(job.job_template_id, True)
else:
return True
return False
def job_blocked_by(self, job):
project_block = self.get_item(self.PROJECT_UPDATES, job.project_id)
inventory_block = self.get_item(self.INVENTORY_UPDATES, job.inventory_id)
if job.allow_simultaneous is False:
job_block = self.get_item(self.JOB_TEMPLATE_JOBS, job.job_template_id)
else:
job_block = None
return project_block or inventory_block or job_block

def can_workflow_job_run(self, job):
if job.allow_simultaneous:
return True
return self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS].get(job.workflow_job_template_id, True)
def workflow_job_blocked_by(self, job):
if job.allow_simultaneous is False:
return self.get_item(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id)
return None

def can_system_job_run(self):
return self.data[self.SYSTEM_JOB]
def system_job_blocked_by(self, job):
return self.get_item(self.SYSTEM_JOB, 'system_job')

def can_ad_hoc_command_run(self, job):
return self.data[self.INVENTORY_UPDATES].get(job.inventory_id, True)
def ad_hoc_command_blocked_by(self, job):
return self.get_item(self.INVENTORY_UPDATES, job.inventory_id)

def is_job_blocked(self, job):
def task_blocked_by(self, job):
if type(job) is ProjectUpdate:
return not self.can_project_update_run(job)
return self.project_update_blocked_by(job)
elif type(job) is InventoryUpdate:
return not self.can_inventory_update_run(job)
return self.inventory_update_blocked_by(job)
elif type(job) is Job:
return not self.can_job_run(job)
return self.job_blocked_by(job)
elif type(job) is SystemJob:
return not self.can_system_job_run()
return self.system_job_blocked_by(job)
elif type(job) is AdHocCommand:
return not self.can_ad_hoc_command_run(job)
return self.ad_hoc_command_blocked_by(job)
elif type(job) is WorkflowJob:
return not self.can_workflow_job_run(job)
return self.workflow_job_blocked_by(job)

def add_job(self, job):
if type(job) is ProjectUpdate:
self.mark_project_update(job)
elif type(job) is InventoryUpdate:
self.mark_inventory_update(job.inventory_source.inventory_id)
self.mark_inventory_source_update(job.inventory_source_id)
self.mark_inventory_update(job)
self.mark_inventory_source_update(job)
elif type(job) is Job:
self.mark_job_template_job(job)
elif type(job) is WorkflowJob:
self.mark_workflow_job(job)
elif type(job) is SystemJob:
self.mark_system_job()
self.mark_system_job(job)
elif type(job) is AdHocCommand:
self.mark_inventory_update(job.inventory_id)
self.mark_inventory_update(job)

def add_jobs(self, jobs):
for j in jobs:
Expand Down
40 changes: 32 additions & 8 deletions awx/main/scheduler/task_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ def __init__(self):
# will no longer be started and will be started on the next task manager cycle.
self.start_task_limit = settings.START_TASK_LIMIT

self.time_delta_job_explanation = timedelta(seconds=30)

def after_lock_init(self):
'''
Init AFTER we know this instance of the task manager will run because the lock is acquired.
Expand All @@ -80,26 +82,29 @@ def after_lock_init(self):
instances_by_hostname = {i.hostname: i for i in instances_partial}

for rampart_group in InstanceGroup.objects.prefetch_related('instances'):
self.graph[rampart_group.name] = dict(graph=DependencyGraph(rampart_group.name),
self.graph[rampart_group.name] = dict(graph=DependencyGraph(),
capacity_total=rampart_group.capacity,
consumed_capacity=0,
instances=[])
for instance in rampart_group.instances.filter(capacity__gt=0, enabled=True).order_by('hostname'):
if instance.hostname in instances_by_hostname:
self.graph[rampart_group.name]['instances'].append(instances_by_hostname[instance.hostname])

def is_job_blocked(self, task):
def job_blocked_by(self, task):
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
# in the old task manager this was handled as a method on each task object outside of the graph and
# probably has the side effect of cutting down *a lot* of the logic from this task manager class
for g in self.graph:
if self.graph[g]['graph'].is_job_blocked(task):
return True
blocked_by = self.graph[g]['graph'].task_blocked_by(task)
if blocked_by:
return blocked_by

if not task.dependent_jobs_finished():
return True
blocked_by = task.dependent_jobs.first()
if blocked_by:
return blocked_by

return False
return None

def get_tasks(self, status_list=('pending', 'waiting', 'running')):
jobs = [j for j in Job.objects.filter(status__in=status_list).prefetch_related('instance_group')]
Expand Down Expand Up @@ -312,6 +317,7 @@ def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
with disable_activity_stream():
task.celery_task_id = str(uuid.uuid4())
task.save()
task.log_lifecycle("waiting")

if rampart_group is not None:
self.consume_capacity(task, rampart_group.name)
Expand Down Expand Up @@ -450,6 +456,7 @@ def should_update_related_project(self, job, latest_project_update):
def generate_dependencies(self, undeped_tasks):
created_dependencies = []
for task in undeped_tasks:
task.log_lifecycle("acknowledged")
dependencies = []
if not type(task) is Job:
continue
Expand Down Expand Up @@ -489,11 +496,18 @@ def generate_dependencies(self, undeped_tasks):

def process_pending_tasks(self, pending_tasks):
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
tasks_to_update_job_explanation = []
for task in pending_tasks:
if self.start_task_limit <= 0:
break
if self.is_job_blocked(task):
logger.debug("{} is blocked from running".format(task.log_format))
blocked_by = self.job_blocked_by(task)
if blocked_by:
task.log_lifecycle("blocked", blocked_by=blocked_by)
job_explanation = gettext_noop(f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish")
if task.job_explanation != job_explanation:
if task.created < (tz_now() - self.time_delta_job_explanation):
task.job_explanation = job_explanation
tasks_to_update_job_explanation.append(task)
continue
preferred_instance_groups = task.preferred_instance_groups
found_acceptable_queue = False
Expand Down Expand Up @@ -539,7 +553,17 @@ def process_pending_tasks(self, pending_tasks):
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
rampart_group.name, task.log_format, task.task_impact))
if not found_acceptable_queue:
task.log_lifecycle("needs_capacity")
job_explanation = gettext_noop("This job is not ready to start because there is not enough available capacity.")
if task.job_explanation != job_explanation:
if task.created < (tz_now() - self.time_delta_job_explanation):
# Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds.
# Therefore we should only update the job_explanation after some time has elapsed to
# prevent excessive task saves.
task.job_explanation = job_explanation
tasks_to_update_job_explanation.append(task)
logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
UnifiedJob.objects.bulk_update(tasks_to_update_job_explanation, ['job_explanation'])

def timeout_approval_node(self):
workflow_approvals = WorkflowApproval.objects.filter(status='pending')
Expand Down
Loading

0 comments on commit 44d7915

Please sign in to comment.