diff --git a/awx/api/serializers.py b/awx/api/serializers.py index a50065688f43..992805d1806c 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -98,21 +98,14 @@ 'total_hosts', 'hosts_with_active_failures', 'total_groups', - 'groups_with_active_failures', 'has_inventory_sources', 'total_inventory_sources', 'inventory_sources_with_failures', 'organization_id', 'kind', 'insights_credential_id',), - 'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures', - 'has_inventory_sources'), - 'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures', - 'total_hosts', - 'hosts_with_active_failures', - 'total_groups', - 'groups_with_active_failures', - 'has_inventory_sources'), + 'host': DEFAULT_SUMMARY_FIELDS, + 'group': DEFAULT_SUMMARY_FIELDS, 'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'), 'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'), 'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',), @@ -1549,20 +1542,15 @@ class InventorySerializer(BaseSerializerWithVariables): 'admin', 'adhoc', {'copy': 'organization.inventory_admin'} ] - groups_with_active_failures = serializers.IntegerField( - read_only=True, - min_value=0, - help_text=_('This field has been deprecated and will be removed in a future release') - ) class Meta: model = Inventory fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures', 'total_hosts', 'hosts_with_active_failures', 'total_groups', - 'groups_with_active_failures', 'has_inventory_sources', - 'total_inventory_sources', 'inventory_sources_with_failures', - 'insights_credential', 'pending_deletion',) + 'has_inventory_sources', 'total_inventory_sources', + 'inventory_sources_with_failures', 'insights_credential', + 'pending_deletion',) def get_related(self, obj): res = super(InventorySerializer, self).get_related(obj) @@ -1644,6 +1632,9 @@ class HostSerializer(BaseSerializerWithVariables): show_capabilities = ['edit', 'delete'] capabilities_prefetch = ['inventory.admin'] + has_active_failures = serializers.SerializerMethodField() + has_inventory_sources = serializers.SerializerMethodField() + class Meta: model = Host fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables', @@ -1757,6 +1748,14 @@ def to_representation(self, obj): ret['last_job_host_summary'] = None return ret + def get_has_active_failures(self, obj): + return bool( + obj.last_job_host_summary and obj.last_job_host_summary.failed + ) + + def get_has_inventory_sources(self, obj): + return obj.inventory_sources.exists() + class AnsibleFactsSerializer(BaseSerializer): class Meta: @@ -1769,17 +1768,10 @@ def to_representation(self, obj): class GroupSerializer(BaseSerializerWithVariables): show_capabilities = ['copy', 'edit', 'delete'] capabilities_prefetch = ['inventory.admin', 'inventory.adhoc'] - groups_with_active_failures = serializers.IntegerField( - read_only=True, - min_value=0, - help_text=_('This field has been deprecated and will be removed in a future release') - ) class Meta: model = Group - fields = ('*', 'inventory', 'variables', 'has_active_failures', - 'total_hosts', 'hosts_with_active_failures', 'total_groups', - 'groups_with_active_failures', 'has_inventory_sources') + fields = ('*', 'inventory', 'variables') def build_relational_field(self, field_name, relation_info): field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info) diff --git a/awx/api/views/__init__.py b/awx/api/views/__init__.py index e791173ceb3f..ddb5d5533ee9 100644 --- a/awx/api/views/__init__.py +++ b/awx/api/views/__init__.py @@ -204,20 +204,15 @@ def get(self, request, format=None): 'failed': ec2_inventory_failed.count()} user_groups = get_user_queryset(request.user, models.Group) - groups_job_failed = ( - models.Group.objects.filter(hosts_with_active_failures__gt=0) | models.Group.objects.filter(groups_with_active_failures__gt=0) - ).count() groups_inventory_failed = models.Group.objects.filter(inventory_sources__last_job_failed=True).count() data['groups'] = {'url': reverse('api:group_list', request=request), - 'failures_url': reverse('api:group_list', request=request) + "?has_active_failures=True", 'total': user_groups.count(), - 'job_failed': groups_job_failed, 'inventory_failed': groups_inventory_failed} user_hosts = get_user_queryset(request.user, models.Host) - user_hosts_failed = user_hosts.filter(has_active_failures=True) + user_hosts_failed = user_hosts.filter(last_job_host_summary__failed=True) data['hosts'] = {'url': reverse('api:host_list', request=request), - 'failures_url': reverse('api:host_list', request=request) + "?has_active_failures=True", + 'failures_url': reverse('api:host_list', request=request) + "?last_job_host_summary__failed=True", 'total': user_hosts.count(), 'failed': user_hosts_failed.count()} diff --git a/awx/main/access.py b/awx/main/access.py index 7f9be6533352..5dd8d7a92536 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -907,7 +907,7 @@ class HostAccess(BaseAccess): model = Host select_related = ('created_by', 'modified_by', 'inventory', 'last_job__job_template', 'last_job_host_summary__job',) - prefetch_related = ('groups',) + prefetch_related = ('groups', 'inventory_sources') def filtered_queryset(self): return self.model.objects.filter(inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role')) diff --git a/awx/main/migrations/0103_v370_remove_computed_fields.py b/awx/main/migrations/0103_v370_remove_computed_fields.py new file mode 100644 index 000000000000..ca81402df9ab --- /dev/null +++ b/awx/main/migrations/0103_v370_remove_computed_fields.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.16 on 2019-02-21 17:35 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0102_v370_unifiedjob_canceled'), + ] + + operations = [ + migrations.RemoveField( + model_name='group', + name='groups_with_active_failures', + ), + migrations.RemoveField( + model_name='group', + name='has_active_failures', + ), + migrations.RemoveField( + model_name='group', + name='has_inventory_sources', + ), + migrations.RemoveField( + model_name='group', + name='hosts_with_active_failures', + ), + migrations.RemoveField( + model_name='group', + name='total_groups', + ), + migrations.RemoveField( + model_name='group', + name='total_hosts', + ), + migrations.RemoveField( + model_name='host', + name='has_active_failures', + ), + migrations.RemoveField( + model_name='host', + name='has_inventory_sources', + ), + migrations.AlterField( + model_name='jobhostsummary', + name='failed', + field=models.BooleanField(db_index=True, default=False, editable=False), + ), + ] diff --git a/awx/main/models/inventory.py b/awx/main/models/inventory.py index c8bc88eb0b3e..9a5eafedb2b5 100644 --- a/awx/main/models/inventory.py +++ b/awx/main/models/inventory.py @@ -4,7 +4,6 @@ # Python import datetime import time -import itertools import logging import re import copy @@ -339,139 +338,17 @@ def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice return data - def update_host_computed_fields(self): - ''' - Update computed fields for all hosts in this inventory. - ''' - hosts_to_update = {} - hosts_qs = self.hosts - # Define queryset of all hosts with active failures. - hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__failed=True).values_list('pk', flat=True) - # Find all hosts that need the has_active_failures flag set. - hosts_to_set = hosts_qs.filter(has_active_failures=False, pk__in=hosts_with_active_failures) - for host_pk in hosts_to_set.values_list('pk', flat=True): - host_updates = hosts_to_update.setdefault(host_pk, {}) - host_updates['has_active_failures'] = True - # Find all hosts that need the has_active_failures flag cleared. - hosts_to_clear = hosts_qs.filter(has_active_failures=True).exclude(pk__in=hosts_with_active_failures) - for host_pk in hosts_to_clear.values_list('pk', flat=True): - host_updates = hosts_to_update.setdefault(host_pk, {}) - host_updates['has_active_failures'] = False - # Define queryset of all hosts with cloud inventory sources. - hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True) - # Find all hosts that need the has_inventory_sources flag set. - hosts_to_set = hosts_qs.filter(has_inventory_sources=False, pk__in=hosts_with_cloud_inventory) - for host_pk in hosts_to_set.values_list('pk', flat=True): - host_updates = hosts_to_update.setdefault(host_pk, {}) - host_updates['has_inventory_sources'] = True - # Find all hosts that need the has_inventory_sources flag cleared. - hosts_to_clear = hosts_qs.filter(has_inventory_sources=True).exclude(pk__in=hosts_with_cloud_inventory) - for host_pk in hosts_to_clear.values_list('pk', flat=True): - host_updates = hosts_to_update.setdefault(host_pk, {}) - host_updates['has_inventory_sources'] = False - # Now apply updates to hosts where needed (in batches). - all_update_pks = list(hosts_to_update.keys()) - - def _chunk(items, chunk_size): - for i, group in itertools.groupby(enumerate(items), lambda x: x[0] // chunk_size): - yield (g[1] for g in group) - - for update_pks in _chunk(all_update_pks, 500): - for host in hosts_qs.filter(pk__in=update_pks): - host_updates = hosts_to_update[host.pk] - for field, value in host_updates.items(): - setattr(host, field, value) - host.save(update_fields=host_updates.keys()) - - def update_group_computed_fields(self): - ''' - Update computed fields for all active groups in this inventory. - ''' - group_children_map = self.get_group_children_map() - group_hosts_map = self.get_group_hosts_map() - active_host_pks = set(self.hosts.values_list('pk', flat=True)) - failed_host_pks = set(self.hosts.filter(last_job_host_summary__failed=True).values_list('pk', flat=True)) - # active_group_pks = set(self.groups.values_list('pk', flat=True)) - failed_group_pks = set() # Update below as we check each group. - groups_with_cloud_pks = set(self.groups.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)) - groups_to_update = {} - - # Build list of group pks to check, starting with the groups at the - # deepest level within the tree. - root_group_pks = set(self.root_groups.values_list('pk', flat=True)) - group_depths = {} # pk: max_depth - - def update_group_depths(group_pk, current_depth=0): - max_depth = group_depths.get(group_pk, -1) - # Arbitrarily limit depth to avoid hitting Python recursion limit (which defaults to 1000). - if current_depth > 100: - return - if current_depth > max_depth: - group_depths[group_pk] = current_depth - for child_pk in group_children_map.get(group_pk, set()): - update_group_depths(child_pk, current_depth + 1) - for group_pk in root_group_pks: - update_group_depths(group_pk) - group_pks_to_check = [x[1] for x in sorted([(v,k) for k,v in group_depths.items()], reverse=True)] - - for group_pk in group_pks_to_check: - # Get all children and host pks for this group. - parent_pks_to_check = set([group_pk]) - parent_pks_checked = set() - child_pks = set() - host_pks = set() - while parent_pks_to_check: - for parent_pk in list(parent_pks_to_check): - c_ids = group_children_map.get(parent_pk, set()) - child_pks.update(c_ids) - parent_pks_to_check.remove(parent_pk) - parent_pks_checked.add(parent_pk) - parent_pks_to_check.update(c_ids - parent_pks_checked) - h_ids = group_hosts_map.get(parent_pk, set()) - host_pks.update(h_ids) - # Define updates needed for this group. - group_updates = groups_to_update.setdefault(group_pk, {}) - group_updates.update({ - 'total_hosts': len(active_host_pks & host_pks), - 'has_active_failures': bool(failed_host_pks & host_pks), - 'hosts_with_active_failures': len(failed_host_pks & host_pks), - 'total_groups': len(child_pks), - 'groups_with_active_failures': len(failed_group_pks & child_pks), - 'has_inventory_sources': bool(group_pk in groups_with_cloud_pks), - }) - if group_updates['has_active_failures']: - failed_group_pks.add(group_pk) - - # Now apply updates to each group as needed (in batches). - all_update_pks = list(groups_to_update.keys()) - for offset in range(0, len(all_update_pks), 500): - update_pks = all_update_pks[offset:(offset + 500)] - for group in self.groups.filter(pk__in=update_pks): - group_updates = groups_to_update[group.pk] - for field, value in list(group_updates.items()): - if getattr(group, field) != value: - setattr(group, field, value) - else: - group_updates.pop(field) - if group_updates: - group.save(update_fields=group_updates.keys()) - - def update_computed_fields(self, update_groups=True, update_hosts=True): + def update_computed_fields(self): ''' Update model fields that are computed from database relationships. ''' logger.debug("Going to update inventory computed fields, pk={0}".format(self.pk)) start_time = time.time() - if update_hosts: - self.update_host_computed_fields() - if update_groups: - self.update_group_computed_fields() active_hosts = self.hosts - failed_hosts = active_hosts.filter(has_active_failures=True) + failed_hosts = active_hosts.filter(last_job_host_summary__failed=True) active_groups = self.groups if self.kind == 'smart': active_groups = active_groups.none() - failed_groups = active_groups.filter(has_active_failures=True) if self.kind == 'smart': active_inventory_sources = self.inventory_sources.none() else: @@ -482,7 +359,6 @@ def update_computed_fields(self, update_groups=True, update_hosts=True): 'total_hosts': active_hosts.count(), 'hosts_with_active_failures': failed_hosts.count(), 'total_groups': active_groups.count(), - 'groups_with_active_failures': failed_groups.count(), 'has_inventory_sources': bool(active_inventory_sources.count()), 'total_inventory_sources': active_inventory_sources.count(), 'inventory_sources_with_failures': failed_inventory_sources.count(), @@ -545,7 +421,7 @@ def save(self, *args, **kwargs): if (self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and connection.vendor != 'sqlite'): # Minimal update of host_count for smart inventory host filter changes - self.update_computed_fields(update_groups=False, update_hosts=False) + self.update_computed_fields() def delete(self, *args, **kwargs): self._update_host_smart_inventory_memeberships() @@ -631,18 +507,6 @@ class Meta: editable=False, on_delete=models.SET_NULL, ) - has_active_failures = models.BooleanField( - default=False, - editable=False, - help_text=_('This field is deprecated and will be removed in a future release. ' - 'Flag indicating whether the last job failed for this host.'), - ) - has_inventory_sources = models.BooleanField( - default=False, - editable=False, - help_text=_('This field is deprecated and will be removed in a future release. ' - 'Flag indicating whether this host was created/updated from any external inventory sources.'), - ) inventory_sources = models.ManyToManyField( 'InventorySource', related_name='hosts', @@ -673,34 +537,6 @@ class Meta: def get_absolute_url(self, request=None): return reverse('api:host_detail', kwargs={'pk': self.pk}, request=request) - def update_computed_fields(self, update_inventory=True, update_groups=True): - ''' - Update model fields that are computed from database relationships. - ''' - has_active_failures = bool(self.last_job_host_summary and - self.last_job_host_summary.failed) - active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES) - computed_fields = { - 'has_active_failures': has_active_failures, - 'has_inventory_sources': bool(active_inventory_sources.count()), - } - for field, value in computed_fields.items(): - if getattr(self, field) != value: - setattr(self, field, value) - else: - computed_fields.pop(field) - if computed_fields: - self.save(update_fields=computed_fields.keys()) - # Groups and inventory may also need to be updated when host fields - # change. - # NOTE: I think this is no longer needed - # if update_groups: - # for group in self.all_groups: - # group.update_computed_fields() - # if update_inventory: - # self.inventory.update_computed_fields(update_groups=False, - # update_hosts=False) - # Rebuild summary fields cache variables_dict = VarsDictProperty('variables') @property @@ -815,42 +651,6 @@ class Meta: blank=True, help_text=_('Hosts associated directly with this group.'), ) - total_hosts = models.PositiveIntegerField( - default=0, - editable=False, - help_text=_('This field is deprecated and will be removed in a future release. ' - 'Total number of hosts directly or indirectly in this group.'), - ) - has_active_failures = models.BooleanField( - default=False, - editable=False, - help_text=_('This field is deprecated and will be removed in a future release. ' - 'Flag indicating whether this group has any hosts with active failures.'), - ) - hosts_with_active_failures = models.PositiveIntegerField( - default=0, - editable=False, - help_text=_('This field is deprecated and will be removed in a future release. ' - 'Number of hosts in this group with active failures.'), - ) - total_groups = models.PositiveIntegerField( - default=0, - editable=False, - help_text=_('This field is deprecated and will be removed in a future release. ' - 'Total number of child groups contained within this group.'), - ) - groups_with_active_failures = models.PositiveIntegerField( - default=0, - editable=False, - help_text=_('This field is deprecated and will be removed in a future release. ' - 'Number of child groups within this group that have active failures.'), - ) - has_inventory_sources = models.BooleanField( - default=False, - editable=False, - help_text=_('This field is deprecated and will be removed in a future release. ' - 'Flag indicating whether this group was created/updated from any external inventory sources.'), - ) inventory_sources = models.ManyToManyField( 'InventorySource', related_name='groups', @@ -925,32 +725,6 @@ def mark_actual(): mark_actual() activity_stream_delete(None, self) - def update_computed_fields(self): - ''' - Update model fields that are computed from database relationships. - ''' - active_hosts = self.all_hosts - failed_hosts = active_hosts.filter(last_job_host_summary__failed=True) - active_groups = self.all_children - # FIXME: May not be accurate unless we always update groups depth-first. - failed_groups = active_groups.filter(has_active_failures=True) - active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES) - computed_fields = { - 'total_hosts': active_hosts.count(), - 'has_active_failures': bool(failed_hosts.count()), - 'hosts_with_active_failures': failed_hosts.count(), - 'total_groups': active_groups.count(), - 'groups_with_active_failures': failed_groups.count(), - 'has_inventory_sources': bool(active_inventory_sources.count()), - } - for field, value in computed_fields.items(): - if getattr(self, field) != value: - setattr(self, field, value) - else: - computed_fields.pop(field) - if computed_fields: - self.save(update_fields=computed_fields.keys()) - variables_dict = VarsDictProperty('variables') def get_all_parents(self, except_pks=None): @@ -1556,7 +1330,7 @@ def save(self, *args, **kwargs): self.update() if not getattr(_inventory_updates, 'is_updating', False): if self.inventory is not None: - self.inventory.update_computed_fields(update_groups=False, update_hosts=False) + self.inventory.update_computed_fields() def _get_current_status(self): if self.source: diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index 4048cb135859..6fdd01a608d8 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -1060,7 +1060,7 @@ class Meta: processed = models.PositiveIntegerField(default=0, editable=False) rescued = models.PositiveIntegerField(default=0, editable=False) skipped = models.PositiveIntegerField(default=0, editable=False) - failed = models.BooleanField(default=False, editable=False) + failed = models.BooleanField(default=False, editable=False, db_index=True) def __str__(self): host = getattr_dne(self, 'host') @@ -1095,7 +1095,6 @@ def update_host_last_job_summary(self): update_fields.append('last_job_host_summary_id') if update_fields: self.host.save(update_fields=update_fields) - #self.host.update_computed_fields() class SystemJobOptions(BaseModel): diff --git a/awx/main/signals.py b/awx/main/signals.py index 8e2c6fe030e0..185033649fe2 100644 --- a/awx/main/signals.py +++ b/awx/main/signals.py @@ -10,6 +10,7 @@ import sys # Django +from django.db import connection from django.conf import settings from django.db.models.signals import ( pre_save, @@ -103,7 +104,7 @@ def emit_update_inventory_computed_fields(sender, **kwargs): except Inventory.DoesNotExist: pass else: - update_inventory_computed_fields.delay(inventory.id, True) + update_inventory_computed_fields.delay(inventory.id) def emit_update_inventory_on_created_or_deleted(sender, **kwargs): @@ -124,7 +125,9 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs): pass else: if inventory is not None: - update_inventory_computed_fields.delay(inventory.id, True) + connection.on_commit( + lambda: update_inventory_computed_fields.delay(inventory.id) + ) def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwargs): diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 09dc866d3063..8b1ac33e5775 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -588,7 +588,7 @@ def handle_work_error(task_id, *args, **kwargs): @task() -def update_inventory_computed_fields(inventory_id, should_update_hosts=True): +def update_inventory_computed_fields(inventory_id): ''' Signal handler and wrapper around inventory.update_computed_fields to prevent unnecessary recursive calls. @@ -599,7 +599,7 @@ def update_inventory_computed_fields(inventory_id, should_update_hosts=True): return i = i[0] try: - i.update_computed_fields(update_hosts=should_update_hosts) + i.update_computed_fields() except DatabaseError as e: if 'did not affect any rows' in str(e): logger.debug('Exiting duplicate update_inventory_computed_fields task.') @@ -642,7 +642,7 @@ def update_host_smart_inventory_memberships(): logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk)) # Update computed fields for changed inventories outside atomic action for smart_inventory in changed_inventories: - smart_inventory.update_computed_fields(update_groups=False, update_hosts=False) + smart_inventory.update_computed_fields() @task() @@ -1868,7 +1868,7 @@ def final_run_hook(self, job, status, private_data_dir, fact_modification_times, except Inventory.DoesNotExist: pass else: - update_inventory_computed_fields.delay(inventory.id, True) + update_inventory_computed_fields.delay(inventory.id) @task() @@ -2851,4 +2851,4 @@ def deep_copy_model_obj( ), permission_check_func[2]) permission_check_func(creater, copy_mapping.values()) if isinstance(new_obj, Inventory): - update_inventory_computed_fields.delay(new_obj.id, True) + update_inventory_computed_fields.delay(new_obj.id) diff --git a/awx/main/tests/functional/__init__.py b/awx/main/tests/functional/__init__.py index 262a142790ac..15c299128125 100644 --- a/awx/main/tests/functional/__init__.py +++ b/awx/main/tests/functional/__init__.py @@ -2,6 +2,9 @@ from django.db.models.signals import post_migrate from django.apps import apps from django.conf import settings +from unittest import mock + +import contextlib def app_post_migration(sender, app_config, **kwargs): @@ -23,3 +26,13 @@ def app_post_migration(sender, app_config, **kwargs): +@contextlib.contextmanager +def immediate_on_commit(): + """ + Context manager executing transaction.on_commit() hooks immediately as + if the connection was in auto-commit mode. + """ + def on_commit(func): + func() + with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch: + yield patch diff --git a/awx/main/tests/functional/api/test_oauth.py b/awx/main/tests/functional/api/test_oauth.py index 3973ebeac373..6ea1fd717a96 100644 --- a/awx/main/tests/functional/api/test_oauth.py +++ b/awx/main/tests/functional/api/test_oauth.py @@ -1,8 +1,6 @@ import pytest import base64 -import contextlib import json -from unittest import mock from django.db import connection from django.test.utils import override_settings @@ -12,22 +10,11 @@ from awx.api.versioning import reverse, drf_reverse from awx.main.models.oauth import (OAuth2Application as Application, OAuth2AccessToken as AccessToken) +from awx.main.tests.functional import immediate_on_commit from awx.sso.models import UserEnterpriseAuth from oauth2_provider.models import RefreshToken -@contextlib.contextmanager -def immediate_on_commit(): - """ - Context manager executing transaction.on_commit() hooks immediately as - if the connection was in auto-commit mode. - """ - def on_commit(func): - func() - with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch: - yield patch - - @pytest.mark.django_db def test_personal_access_token_creation(oauth_application, post, alice): url = drf_reverse('api:oauth_authorization_root_view') + 'token/' diff --git a/awx/main/tests/functional/conftest.py b/awx/main/tests/functional/conftest.py index 5b545d8e912b..1b680eee8d02 100644 --- a/awx/main/tests/functional/conftest.py +++ b/awx/main/tests/functional/conftest.py @@ -125,9 +125,9 @@ def __get__(self, obj, obj_type): @pytest.fixture def run_computed_fields_right_away(request): - def run_me(inventory_id, should_update_hosts=True): + def run_me(inventory_id): i = Inventory.objects.get(id=inventory_id) - i.update_computed_fields(update_hosts=should_update_hosts) + i.update_computed_fields() mocked = mock.patch( 'awx.main.signals.update_inventory_computed_fields.delay', diff --git a/awx/main/tests/functional/models/test_context_managers.py b/awx/main/tests/functional/models/test_context_managers.py index 61aad54ad4e1..0e1fe024f21c 100644 --- a/awx/main/tests/functional/models/test_context_managers.py +++ b/awx/main/tests/functional/models/test_context_managers.py @@ -11,6 +11,7 @@ # AWX models from awx.main.models.organization import Organization from awx.main.models import ActivityStream, Job +from awx.main.tests.functional import immediate_on_commit @pytest.mark.django_db @@ -34,9 +35,10 @@ class TestComputedFields: def test_computed_fields_normal_use(self, mocker, inventory): job = Job.objects.create(name='fake-job', inventory=inventory) - with mocker.patch.object(update_inventory_computed_fields, 'delay'): - job.delete() - update_inventory_computed_fields.delay.assert_called_once_with(inventory.id, True) + with immediate_on_commit(): + with mocker.patch.object(update_inventory_computed_fields, 'delay'): + job.delete() + update_inventory_computed_fields.delay.assert_called_once_with(inventory.id) def test_disable_computed_fields(self, mocker, inventory): job = Job.objects.create(name='fake-job', inventory=inventory) diff --git a/awx/main/tests/functional/models/test_unified_job.py b/awx/main/tests/functional/models/test_unified_job.py index c1d0967583eb..90fc4ce37ed7 100644 --- a/awx/main/tests/functional/models/test_unified_job.py +++ b/awx/main/tests/functional/models/test_unified_job.py @@ -283,13 +283,13 @@ def r(hosts, forks): def test_limit_task_impact(self, job_host_limit, run_computed_fields_right_away): job = job_host_limit(5, 2) - job.inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory + job.inventory.update_computed_fields() assert job.inventory.total_hosts == 5 assert job.task_impact == 2 + 1 # forks becomes constraint def test_host_task_impact(self, job_host_limit, run_computed_fields_right_away): job = job_host_limit(3, 5) - job.inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory + job.inventory.update_computed_fields() assert job.task_impact == 3 + 1 # hosts becomes constraint def test_shard_task_impact(self, slice_job_factory, run_computed_fields_right_away): @@ -304,6 +304,7 @@ def test_shard_task_impact(self, slice_job_factory, run_computed_fields_right_aw len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts']) for i in range(3) ] == [1, 1, 1] + jobs[0].inventory.update_computed_fields() assert [job.task_impact for job in jobs] == [2, 2, 2] # plus one base task impact # Uneven distribution - first job takes the extra host jobs[0].inventory.hosts.create(name='remainder_foo') @@ -311,5 +312,5 @@ def test_shard_task_impact(self, slice_job_factory, run_computed_fields_right_aw len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts']) for i in range(3) ] == [2, 1, 1] - jobs[0].inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory + jobs[0].inventory.update_computed_fields() assert [job.task_impact for job in jobs] == [3, 2, 2] diff --git a/awx/ui/client/legacy/styles/lists.less b/awx/ui/client/legacy/styles/lists.less index 3e004461afe9..6a4922fd2bdd 100644 --- a/awx/ui/client/legacy/styles/lists.less +++ b/awx/ui/client/legacy/styles/lists.less @@ -598,6 +598,11 @@ table, tbody { } .List-staticColumnLayout--groups { + display: grid; + grid-template-columns: @at-space @at-space-5x auto; +} + +.List-staticColumnLayout--hostNestedGroups { display: grid; grid-template-columns: @at-space @at-space-5x @at-space-5x auto; } diff --git a/awx/ui/client/src/home/dashboard/counts/dashboard-counts.directive.js b/awx/ui/client/src/home/dashboard/counts/dashboard-counts.directive.js index e605c21c86c3..4b4e8c16909c 100644 --- a/awx/ui/client/src/home/dashboard/counts/dashboard-counts.directive.js +++ b/awx/ui/client/src/home/dashboard/counts/dashboard-counts.directive.js @@ -39,7 +39,7 @@ export default label: i18n._("Hosts") }, { - url: "/#/hosts?host_search=has_active_failures:true", + url: "/#/hosts?host_search=last_job_host_summary__failed:true", number: scope.data.hosts.failed, label: i18n._("Failed Hosts"), isFailureCount: true diff --git a/awx/ui/client/src/inventories-hosts/hosts/related/groups/hosts-related-groups.controller.js b/awx/ui/client/src/inventories-hosts/hosts/related/groups/hosts-related-groups.controller.js index 81ae0ff8822a..f25a961cebb0 100644 --- a/awx/ui/client/src/inventories-hosts/hosts/related/groups/hosts-related-groups.controller.js +++ b/awx/ui/client/src/inventories-hosts/hosts/related/groups/hosts-related-groups.controller.js @@ -5,11 +5,9 @@ *************************************************/ export default ['$scope', '$rootScope', '$state', '$stateParams', 'HostsRelatedGroupsList', 'InventoryUpdate', - 'CancelSourceUpdate', 'rbacUiControlService', 'GetBasePath', - 'GetHostsStatusMsg', 'Dataset', 'Find', 'QuerySet', 'inventoryData', 'host', 'GroupsService', + 'CancelSourceUpdate', 'rbacUiControlService', 'GetBasePath', 'Dataset', 'Find', 'QuerySet', 'inventoryData', 'host', 'GroupsService', function($scope, $rootScope, $state, $stateParams, HostsRelatedGroupsList, InventoryUpdate, - CancelSourceUpdate, rbacUiControlService, GetBasePath, - GetHostsStatusMsg, Dataset, Find, qs, inventoryData, host, GroupsService){ + CancelSourceUpdate, rbacUiControlService, GetBasePath, Dataset, Find, qs, inventoryData, host, GroupsService){ let list = HostsRelatedGroupsList; @@ -29,27 +27,6 @@ $scope[`${list.iterator}_dataset`] = Dataset.data; $scope[list.name] = $scope[`${list.iterator}_dataset`].results; - $scope.$watchCollection(list.name, function(){ - _.forEach($scope[list.name], buildStatusIndicators); - }); - } - - function buildStatusIndicators(group){ - if (group === undefined || group === null) { - group = {}; - } - - let hosts_status; - - hosts_status = GetHostsStatusMsg({ - active_failures: group.hosts_with_active_failures, - total_hosts: group.total_hosts, - inventory_id: $scope.inventory_id, - group_id: group.id - }); - _.assign(group, - {hosts_status_tip: hosts_status.tooltip}, - {hosts_status_class: hosts_status.class}); } $scope.editGroup = function(id){ diff --git a/awx/ui/client/src/inventories-hosts/inventories/related/groups/factories/get-hosts-status-msg.factory.js b/awx/ui/client/src/inventories-hosts/inventories/related/groups/factories/get-hosts-status-msg.factory.js deleted file mode 100644 index 43ddedb58494..000000000000 --- a/awx/ui/client/src/inventories-hosts/inventories/related/groups/factories/get-hosts-status-msg.factory.js +++ /dev/null @@ -1,33 +0,0 @@ -export default - ['i18n', function(i18n) { - return function(params) { - var active_failures = params.active_failures, - total_hosts = params.total_hosts, - tip, failures, html_class; - - // Return values for use on host status indicator - - if (active_failures > 0) { - tip = total_hosts + ((total_hosts === 1) ? ' host' : ' hosts') + '. ' + active_failures + i18n._(' with failed jobs.'); - html_class = 'error'; - failures = true; - } else { - failures = false; - if (total_hosts === 0) { - // no hosts - tip = i18n._("Contains 0 hosts."); - html_class = 'none'; - } else { - // many hosts with 0 failures - tip = total_hosts + ((total_hosts === 1) ? ' host' : ' hosts') + '. ' + i18n._('No job failures'); - html_class = 'success'; - } - } - - return { - tooltip: tip, - failures: failures, - 'class': html_class - }; - }; - }]; diff --git a/awx/ui/client/src/inventories-hosts/inventories/related/groups/groups.list.js b/awx/ui/client/src/inventories-hosts/inventories/related/groups/groups.list.js index 14b46e671577..ffd048de78f2 100644 --- a/awx/ui/client/src/inventories-hosts/inventories/related/groups/groups.list.js +++ b/awx/ui/client/src/inventories-hosts/inventories/related/groups/groups.list.js @@ -18,22 +18,6 @@ basePath: 'api/v2/inventories/{{$stateParams.inventory_id}}/groups/', layoutClass: 'List-staticColumnLayout--groups', actionHolderClass: 'List-actionHolder List-actionHolder--rootGroups', - staticColumns: [ - { - field: 'failed_hosts', - content: { - label: '', - nosort: true, - mode: 'all', - iconOnly: true, - awToolTip: "{{ group.hosts_status_tip }}", - dataPlacement: "top", - icon: "{{ 'fa icon-job-' + group.hosts_status_class }}", - columnClass: 'status-column' - } - } - ], - fields: { name: { label: i18n._('Groups'), diff --git a/awx/ui/client/src/inventories-hosts/inventories/related/groups/list/groups-list.controller.js b/awx/ui/client/src/inventories-hosts/inventories/related/groups/list/groups-list.controller.js index f375e757eb91..6d36cd617811 100644 --- a/awx/ui/client/src/inventories-hosts/inventories/related/groups/list/groups-list.controller.js +++ b/awx/ui/client/src/inventories-hosts/inventories/related/groups/list/groups-list.controller.js @@ -5,13 +5,11 @@ *************************************************/ export default ['$scope', '$state', '$stateParams', 'listDefinition', 'InventoryUpdate', - 'GroupsService', 'CancelSourceUpdate', - 'GetHostsStatusMsg', 'Dataset', 'inventoryData', 'canAdd', - 'InventoryHostsStrings', '$transitions', + 'GroupsService', 'CancelSourceUpdate', 'Dataset', 'inventoryData', 'canAdd', + 'InventoryHostsStrings', '$transitions', 'GetBasePath', 'Rest', function($scope, $state, $stateParams, listDefinition, InventoryUpdate, - GroupsService, CancelSourceUpdate, - GetHostsStatusMsg, Dataset, inventoryData, canAdd, - InventoryHostsStrings, $transitions){ + GroupsService, CancelSourceUpdate, Dataset, inventoryData, canAdd, + InventoryHostsStrings, $transitions, GetBasePath, Rest){ let list = listDefinition; @@ -70,18 +68,6 @@ group.isSelected = true; } }); - - let hosts_status; - - hosts_status = GetHostsStatusMsg({ - active_failures: group.hosts_with_active_failures, - total_hosts: group.total_hosts, - inventory_id: $scope.inventory_id, - group_id: group.id - }); - _.assign(group, - {hosts_status_tip: hosts_status.tooltip}, - {hosts_status_class: hosts_status.class}); } $scope.createGroup = function(){ @@ -102,35 +88,51 @@ $state.go('inventories.edit.groups.edit.nested_groups', {group_id: id}); }; $scope.deleteGroup = function(group){ - $scope.toDelete = {}; - $scope.strings.deleteModal = {}; - angular.extend($scope.toDelete, group); - if($scope.toDelete.total_groups === 0 && $scope.toDelete.total_hosts === 0) { - // This group doesn't have any child groups or hosts - the user is just trying to delete - // the group - $scope.deleteOption = "delete"; - } - else { - $scope.strings.deleteModal.group = InventoryHostsStrings.get('deletegroup.GROUP', $scope.toDelete.total_groups); - $scope.strings.deleteModal.host = InventoryHostsStrings.get('deletegroup.HOST', $scope.toDelete.total_hosts); - - if($scope.toDelete.total_groups === 0 || $scope.toDelete.total_hosts === 0) { - if($scope.toDelete.total_groups === 0) { - $scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_HOST', $scope.toDelete.total_hosts); - $scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_HOST', $scope.toDelete.total_hosts); + const promises = []; + + Rest.setUrl(group.related.hosts); + promises.push(Rest.get()); + + Rest.setUrl(group.related.children); + promises.push(Rest.get()); + + Promise.all(promises) + .then(([hostResponse, groupResponse]) => { + $scope.toDelete = {}; + $scope.strings.deleteModal = {}; + $scope.toDelete.hostCount = _.get(hostResponse, ['data', 'count'], 0); + $scope.toDelete.groupCount = _.get(groupResponse, ['data', 'count'], 0); + angular.extend($scope.toDelete, group); + + if($scope.toDelete.groupCount === 0 && $scope.toDelete.hostCount === 0) { + // This group doesn't have any child groups or hosts - the user is just trying to delete + // the group + $scope.deleteOption = "delete"; } - else if($scope.toDelete.total_hosts === 0) { - $scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUP', $scope.toDelete.total_groups); - $scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUP', $scope.toDelete.total_groups); + else { + $scope.strings.deleteModal.group = InventoryHostsStrings.get('deletegroup.GROUP', $scope.toDelete.groupCount); + $scope.strings.deleteModal.host = InventoryHostsStrings.get('deletegroup.HOST', $scope.toDelete.hostCount); + + if($scope.toDelete.groupCount === 0 || $scope.toDelete.groupCount === 0) { + if($scope.toDelete.groupCount === 0) { + $scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_HOST', $scope.toDelete.hostCount); + $scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_HOST', $scope.toDelete.hostCount); + } + else if($scope.toDelete.hostCount === 0) { + $scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUP', $scope.toDelete.groupCount); + $scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUP', $scope.toDelete.groupCount); + } + } + else { + $scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.groupCount, hosts: $scope.toDelete.hostCount}); + $scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.groupCount, hosts: $scope.toDelete.hostCount}); + } } - } - else { - $scope.strings.deleteModal.deleteGroupsHosts = InventoryHostsStrings.get('deletegroup.DELETE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.total_groups, hosts: $scope.toDelete.total_hosts}); - $scope.strings.deleteModal.promoteGroupsHosts = InventoryHostsStrings.get('deletegroup.PROMOTE_GROUPS_AND_HOSTS', {groups: $scope.toDelete.total_groups, hosts: $scope.toDelete.total_hosts}); - } - } - $('#group-delete-modal').modal('show'); + $('#group-delete-modal').modal('show'); + }); + + }; $scope.confirmDelete = function(){ let reloadListStateParams = null; diff --git a/awx/ui/client/src/inventories-hosts/inventories/related/groups/list/groups-list.partial.html b/awx/ui/client/src/inventories-hosts/inventories/related/groups/list/groups-list.partial.html index 23579b228595..f9d3da3925a6 100644 --- a/awx/ui/client/src/inventories-hosts/inventories/related/groups/list/groups-list.partial.html +++ b/awx/ui/client/src/inventories-hosts/inventories/related/groups/list/groups-list.partial.html @@ -18,12 +18,10 @@