Commit 2d37b341 authored by Rémi Duraffort's avatar Rémi Duraffort Committed by Stevan Radakovic
Browse files

Remove is_pipeline

Change-Id: I6c753e93b6e3e51baf5c3602c499ef9699309cfc
parent 1ae6dda0
......@@ -66,7 +66,7 @@ The **callback** section supports following options:
of the data and possible options are as following:
* **minimal** This will provide basic job info such as job id, status,
submit_time, start_time, end_time, submitter_username, is_pipeline,
submit_time, start_time, end_time, submitter_username,
failure_comment, priority, description, actual_device_id, definition and
metadata.
* **logs** In addition to minimal data this will also attach the job log
......
......@@ -1181,7 +1181,7 @@ class QueryCondition(models.Model):
TestJob: [
"submitter", "start_time", "end_time", "state", "health", "actual_device",
"requested_device_type", "health_check", "user", "group",
"priority", "is_pipeline", "description"],
"priority", "description"],
TestSuite: ["name"],
TestCase: ["name", "result", "measurement"],
NamedTestAttribute: []
......
......@@ -70,7 +70,7 @@ class ModelFactory(object):
hostname = self.getUniqueString()
if tags and type(tags) != list:
tags = []
device = Device(device_type=device_type, is_public=is_public, hostname=hostname, is_pipeline=True, **kw)
device = Device(device_type=device_type, is_public=is_public, hostname=hostname, **kw)
if tags:
device.tags = tags
logging.debug("making a device of type %s %s %s with tags '%s'"
......
......@@ -206,7 +206,7 @@ class DeviceAdmin(admin.ModelAdmin):
('Properties', {
'fields': (('device_type', 'hostname'), 'worker_host', 'device_version')}),
('Device owner', {
'fields': (('user', 'group'), ('physical_owner', 'physical_group'), 'is_public', 'is_pipeline')}),
'fields': (('user', 'group'), ('physical_owner', 'physical_group'), 'is_public')}),
('Status', {
'fields': (('state', 'health'), ('last_health_report_job', 'current_job'))}),
('Advanced properties', {
......@@ -217,7 +217,7 @@ class DeviceAdmin(admin.ModelAdmin):
readonly_fields = ('device_dictionary_jinja', 'state', 'current_job')
list_display = ('hostname', 'device_type', 'current_job', 'worker_host',
'state', 'health', 'has_health_check',
'health_check_enabled', 'is_public', 'is_pipeline',
'health_check_enabled', 'is_public',
'valid_device', 'exclusive_device')
search_fields = ('hostname', 'device_type__name')
ordering = ['hostname']
......
......@@ -310,7 +310,7 @@ class SchedulerAPI(ExposedAPI):
continue
devices_list.append(dev)
return [[dev.hostname, dev.device_type.name, build_device_status_display(dev.state, dev.health), dev.current_job().pk if dev.current_job() else None, dev.is_pipeline]
return [[dev.hostname, dev.device_type.name, build_device_status_display(dev.state, dev.health), dev.current_job().pk if dev.current_job() else None, True]
for dev in devices_list]
def all_device_types(self):
......@@ -605,7 +605,7 @@ class SchedulerAPI(ExposedAPI):
device_dict["job"] = None
device_dict["offline_since"] = None
device_dict["offline_by"] = None
device_dict["is_pipeline"] = device.is_pipeline
device_dict["is_pipeline"] = True
current_job = device.current_job()
if current_job is not None:
......@@ -801,6 +801,7 @@ class SchedulerAPI(ExposedAPI):
job.health = job.get_health_display()
job.submitter_username = job.submitter.username
job.absolute_url = job.get_absolute_url()
job.is_pipeline = True
except PermissionDenied:
raise xmlrpclib.Fault(
401, "Permission denied for user to job %s" % job_id)
......@@ -858,26 +859,10 @@ class SchedulerAPI(ExposedAPI):
'sub_id': job.sub_id
})
if job.is_pipeline:
job_status.update({
'job_status': job.get_legacy_status_display(),
'bundle_sha1': ""
})
return job_status
# DEPRECATED
bundle_sha1 = ""
if job.results_link:
try:
bundle_sha1 = job.results_link.split('/')[-2]
except IndexError:
pass
job_status.update({
'job_status': job.get_legacy_status_display(),
'bundle_sha1': bundle_sha1
'bundle_sha1': ""
})
return job_status
def job_list_status(self, job_id_list):
......@@ -1213,11 +1198,6 @@ class SchedulerAPI(ExposedAPI):
raise xmlrpclib.Fault(
404, "Device '%s' was not found." % hostname
)
if not device.is_pipeline:
raise xmlrpclib.Fault(
400, "Device '%s' is not a pipeline device" % hostname
)
device_dict = device.load_configuration(output_format="raw")
if not device_dict:
raise xmlrpclib.Fault(
......@@ -1263,11 +1243,11 @@ class SchedulerAPI(ExposedAPI):
"""
if not name:
devices = Device.objects.filter(is_pipeline=True).exclude(health=Device.HEALTH_RETIRED)
devices = Device.objects.exclude(health=Device.HEALTH_RETIRED)
else:
devices = Device.objects.filter(is_pipeline=True).exclude(health=Device.HEALTH_RETIRED).filter(device_type__name=name)
devices = Device.objects.exclude(health=Device.HEALTH_RETIRED).filter(device_type__name=name)
if not devices:
devices = Device.objects.filter(is_pipeline=True).exclude(health=Device.HEALTH_RETIRED).filter(hostname=name)
devices = Device.objects.exclude(health=Device.HEALTH_RETIRED).filter(hostname=name)
if not devices and name:
raise xmlrpclib.Fault(
404,
......
......@@ -117,9 +117,8 @@ class SchedulerDevicesAPI(ExposedV2API):
try:
Device.objects.create(hostname=hostname, device_type=device_type,
user=user, group=group, is_public=public,
worker_host=worker, is_pipeline=True,
state=Device.STATE_IDLE, health=health_val,
description=description)
worker_host=worker, description=description)
except (IntegrityError, ValidationError) as exc:
raise xmlrpclib.Fault(
......@@ -247,8 +246,7 @@ class SchedulerDevicesAPI(ExposedV2API):
"type": device.device_type.name,
"health": device.get_health_display(),
"state": device.get_state_display(),
"current_job": current_job.pk if current_job else None,
"pipeline": device.is_pipeline}
"current_job": current_job.pk if current_job else None}
ret.append(device_dict)
return ret
......@@ -293,7 +291,6 @@ class SchedulerDevicesAPI(ExposedV2API):
"health_job": bool(device.get_health_check()),
"description": device.description,
"public": device.is_public,
"pipeline": device.is_pipeline,
"has_device_dict": bool(device.load_configuration(output_format="raw")),
"worker": None,
"user": device.user.username if device.user else None,
......
......@@ -235,7 +235,6 @@ class SchedulerJobsAPI(ExposedV2API):
"device": device_hostname,
"device_type": device_type,
"health_check": job.health_check,
"pipeline": job.is_pipeline,
"health": job.get_health_display(),
"state": job.get_state_display(),
"submitter": job.submitter.username,
......
......@@ -32,7 +32,7 @@ from lava_scheduler_app.schema import SubmissionException
def check_health_checks(app_configs, **kwargs):
errors = []
for device in Device.objects.filter(is_pipeline=True):
for device in Device.objects.all():
ht = device.get_health_check()
ht_disabled = device.device_type.disable_health_check
......@@ -65,7 +65,7 @@ def check_health_checks(app_configs, **kwargs):
def check_device_configuration(app_configs, **kwargs):
errors = []
for device in Device.objects.filter(Q(is_pipeline=True), ~Q(health=Device.HEALTH_RETIRED)):
for device in Device.objects.exclude(health=Device.HEALTH_RETIRED):
if not device.is_valid():
errors.append(Error('Invalid configuration', obj=device.hostname))
......
......@@ -30,13 +30,12 @@ class RestrictedTestJobQuerySet(RestrictedResourceQuerySet):
from lava_scheduler_app.models import TestJob
# Pipeline jobs.
conditions = Q(is_pipeline=True)
if not user or user.is_anonymous():
conditions &= Q(is_public=True)
conditions = Q(is_public=True)
elif not user.is_superuser and not user.has_perm('lava_scheduler_app.cancel_resubmit_testjob') and not user.has_perm('lava_scheduler_app.change_device'):
# continue adding conditions only if user is not superuser and
# does not have admin permission for jobs or devices.
conditions &= (
conditions = (
Q(is_public=True) |
Q(submitter=user) |
(~Q(actual_device=None) & Q(actual_device__user=user)) |
......
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-08 17:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0035_remove_testjob__results_link'),
]
operations = [
migrations.RemoveField(
model_name='device',
name='is_pipeline',
),
migrations.RemoveField(
model_name='testjob',
name='is_pipeline',
),
]
......@@ -649,12 +649,6 @@ class Device(RestrictedResource):
on_delete=models.SET_NULL,
)
is_pipeline = models.BooleanField(
verbose_name="Pipeline device?",
default=False,
editable=True
)
def clean(self):
"""
Complies with the RestrictedResource constraints
......@@ -750,8 +744,6 @@ class Device(RestrictedResource):
return self.is_owned_by(user)
def is_valid(self, system=True):
if not self.is_pipeline:
return False # V1 config cannot be checked
rendered = self.load_configuration()
try:
validate_device(rendered)
......@@ -938,10 +930,6 @@ class Device(RestrictedResource):
return False
def get_health_check(self):
# Do not submit any new v1 job
if not self.is_pipeline:
return None
# Get the device dictionary
extends = self.get_extends()
if not extends:
......@@ -1158,8 +1146,7 @@ def _create_pipeline_job(job_data, user, taglist, device=None,
health_check=False,
user=user, is_public=public_state,
visibility=visibility,
priority=priority,
is_pipeline=True)
priority=priority)
job.save()
# need a valid job before the tags can be assigned, then it needs to be saved again.
for tag in Tag.objects.filter(name__in=taglist):
......@@ -1238,7 +1225,7 @@ def _pipeline_protocols(job_data, user, yaml_data=None): # pylint: disable=too-
allowed_devices = []
device_list = Device.objects.filter(
Q(device_type=device_type), Q(is_pipeline=True), ~Q(health=Device.HEALTH_RETIRED))
Q(device_type=device_type), ~Q(health=Device.HEALTH_RETIRED))
allowed_devices.extend(_check_submit_to_device(list(device_list), user))
if len(allowed_devices) < params['count']:
......@@ -1381,7 +1368,7 @@ class TestJob(RestrictedResource):
(Enhanced version of vmgroups.)
A Primary connection needs a real device (persistence).
"""
if not self.is_pipeline or not self.is_multinode or not self.definition:
if not self.is_multinode or not self.definition:
return False
job_data = yaml.load(self.definition)
return 'connection' in job_data
......@@ -1626,12 +1613,6 @@ class TestJob(RestrictedResource):
blank=True
)
is_pipeline = models.BooleanField(
verbose_name="Pipeline job?",
default=False,
editable=False
)
# calculated by the master validation process.
pipeline_compatibility = models.IntegerField(
default=0,
......@@ -1663,8 +1644,7 @@ class TestJob(RestrictedResource):
str(self.id))
def output_file(self):
filename = 'output.yaml' if self.is_pipeline else 'output.txt'
output_path = os.path.join(self.output_dir, filename)
output_path = os.path.join(self.output_dir, "output.yaml")
if os.path.exists(output_path):
return open(output_path, encoding='utf-8', errors='replace')
else:
......@@ -1690,10 +1670,7 @@ class TestJob(RestrictedResource):
@property
def results_link(self):
if self.is_pipeline:
return reverse("lava.results.testjob", args=[self.id])
else:
return None
return reverse("lava.results.testjob", args=[self.id])
@property
def essential_role(self): # pylint: disable=too-many-return-statements
......@@ -1770,14 +1747,14 @@ class TestJob(RestrictedResource):
# singlenode only
device_type = _get_device_type(user, job_data['device_type'])
allow = _check_submit_to_device(list(Device.objects.filter(
device_type=device_type, is_pipeline=True)), user)
device_type=device_type)), user)
if not allow:
raise DevicesUnavailableException("No devices of type %s have pipeline support." % device_type)
taglist = _get_tag_list(job_data.get('tags', []), True)
if taglist:
supported = _check_tags(taglist, device_type=device_type)
_check_tags_support(supported, allow)
if original_job and original_job.is_pipeline:
if original_job:
# Add old job absolute url to metadata for pipeline jobs.
job_url = str(original_job.get_absolute_url())
try:
......@@ -1796,16 +1773,11 @@ class TestJob(RestrictedResource):
Implement the schema constraints for visibility for pipeline jobs so that
admins cannot set a job into a logically inconsistent state.
"""
if self.is_pipeline:
# public settings must match
if self.is_public and self.visibility != TestJob.VISIBLE_PUBLIC:
raise ValidationError("is_public is set but visibility is not public.")
elif not self.is_public and self.visibility == TestJob.VISIBLE_PUBLIC:
raise ValidationError("is_public is not set but visibility is public.")
else:
if self.visibility != TestJob.VISIBLE_PUBLIC:
raise ValidationError("Only pipeline jobs support any value of visibility except the default "
"PUBLIC, even if the job and bundle are private.")
# public settings must match
if self.is_public and self.visibility != TestJob.VISIBLE_PUBLIC:
raise ValidationError("is_public is set but visibility is not public.")
elif not self.is_public and self.visibility == TestJob.VISIBLE_PUBLIC:
raise ValidationError("is_public is not set but visibility is public.")
return super(TestJob, self).clean()
def can_view(self, user):
......@@ -1826,9 +1798,6 @@ class TestJob(RestrictedResource):
return False
if self.is_public:
return True
if not self.is_pipeline:
# old jobs will be private, only pipeline extends beyond this level
return self.is_accessible_by(user)
logger = logging.getLogger('lava_scheduler_app')
if self.visibility == self.VISIBLE_PUBLIC:
# logical error
......@@ -1883,9 +1852,8 @@ class TestJob(RestrictedResource):
return self._can_admin(user) and self.state in states
def can_resubmit(self, user):
return self.is_pipeline and \
(user.is_superuser or
user.has_perm('lava_scheduler_app.cancel_resubmit_testjob'))
return (user.is_superuser or
user.has_perm('lava_scheduler_app.cancel_resubmit_testjob'))
def _generate_summary_mail(self):
domain = '???'
......@@ -2718,7 +2686,6 @@ class Notification(models.Model):
"start_time": str(self.test_job.start_time),
"end_time": str(self.test_job.end_time),
"submitter_username": self.test_job.submitter.username,
"is_pipeline": self.test_job.is_pipeline,
"failure_comment": self.test_job.failure_comment,
"priority": self.test_job.priority,
"description": self.test_job.description,
......@@ -2872,7 +2839,7 @@ def process_notifications(sender, **kwargs):
notification_state = [TestJob.STATE_RUNNING, TestJob.STATE_FINISHED]
# Send only for pipeline jobs.
# If it's a new TestJob, no need to send notifications.
if new_job.is_pipeline and new_job.id:
if new_job.id:
old_job = TestJob.objects.get(pk=new_job.id)
if new_job.state in notification_state and \
old_job.state != new_job.state:
......
......@@ -70,7 +70,6 @@ def device_post_handler(sender, **kwargs):
"state": instance.get_state_display(),
"device": instance.hostname,
"device_type": instance.device_type.name,
"pipeline": instance.is_pipeline,
}
current_job = instance.current_job()
if current_job is not None:
......@@ -110,7 +109,6 @@ def testjob_post_handler(sender, **kwargs):
"submit_time": instance.submit_time.isoformat(),
"submitter": str(instance.submitter),
"visibility": instance.get_visibility_display(),
"pipeline": instance.is_pipeline,
}
if instance.is_multinode:
data['sub_id'] = instance.sub_id
......
......@@ -552,7 +552,7 @@ class DeviceTable(LavaTable):
exclude = [
'user', 'group', 'is_public', 'device_version',
'physical_owner', 'physical_group', 'description',
'current_job', 'last_health_report_job', 'is_pipeline'
'current_job', 'last_health_report_job'
]
sequence = [
'hostname', 'worker_host', 'device_type', 'state',
......
......@@ -95,7 +95,7 @@ class TestSchedulerAPI(TestCaseWithFactory): # pylint: disable=too-many-ancesto
server = self.server_proxy('test', 'test')
self.assertEqual(
{'status': 'idle', 'job': None, 'offline_since': None, 'hostname': 'black01',
'offline_by': None, 'is_pipeline': False},
'offline_by': None, 'is_pipeline': True},
server.scheduler.get_device_status('black01'))
def test_type_aliases(self):
......
......@@ -74,7 +74,7 @@ class YamlFactory(ModelFactory):
if tags and not isinstance(tags, list):
tags = []
# a hidden device type will override is_public
device = Device(device_type=device_type, is_public=is_public, hostname=hostname, is_pipeline=True, **kw)
device = Device(device_type=device_type, is_public=is_public, hostname=hostname, **kw)
if tags:
device.tags = tags
if DEBUG:
......@@ -253,10 +253,8 @@ class TestPipelineSubmit(TestCaseWithFactory):
def test_exclusivity(self):
device = Device.objects.get(hostname="fakeqemu1")
self.assertTrue(device.is_pipeline)
self.assertFalse(device.is_exclusive)
device = Device.objects.get(hostname="fakeqemu3")
self.assertTrue(device.is_pipeline)
self.assertTrue(device.is_exclusive)
def test_context(self):
......@@ -293,7 +291,6 @@ class TestPipelineSubmit(TestCaseWithFactory):
self.factory.make_device(device_type=mustang_type, hostname=hostname)
device = Device.objects.get(hostname="fakemustang1")
self.assertEqual('mustang-uefi', device.device_type.name)
self.assertTrue(device.is_pipeline)
job_ctx = {
'tftp_mac': 'FF:01:00:69:AA:CC',
'extra_nfsroot_args': ',nolock',
......@@ -943,37 +940,6 @@ class TestYamlMultinode(TestCaseWithFactory):
job = TestJob.objects.get(id=job.id)
self.assertNotEqual(job.sub_id, '')
def test_mixed_multinode(self):
user = self.factory.make_user()
device_type = self.factory.make_device_type()
self.factory.make_device(device_type, 'fakeqemu1')
self.factory.make_device(device_type, 'fakeqemu2')
self.factory.make_device(device_type, 'fakeqemu3')
self.factory.make_device(device_type, 'fakeqemu4')
submission = yaml.load(open(
os.path.join(os.path.dirname(__file__), 'sample_jobs', 'kvm-multinode.yaml'), 'r'))
role_list = submission['protocols'][MultinodeProtocol.name]['roles']
for role in role_list:
if 'tags' in role_list[role]:
del role_list[role]['tags']
job_list = TestJob.from_yaml_and_user(yaml.dump(submission), user)
self.assertEqual(len(job_list), 2)
# make the list mixed
fakeqemu1 = Device.objects.get(hostname='fakeqemu1')
fakeqemu1.is_pipeline = False
fakeqemu1.save(update_fields=['is_pipeline'])
fakeqemu3 = Device.objects.get(hostname='fakeqemu3')
fakeqemu3.is_pipeline = False
fakeqemu3.save(update_fields=['is_pipeline'])
device_list = Device.objects.filter(device_type=device_type, is_pipeline=True)
self.assertEqual(len(device_list), 2)
self.assertIsInstance(device_list, RestrictedResourceQuerySet)
self.assertIsInstance(list(device_list), list)
job_list = TestJob.from_yaml_and_user(yaml.dump(submission), user)
self.assertEqual(len(job_list), 2)
for job in job_list:
self.assertEqual(job.requested_device_type, device_type)
def test_multinode_with_retired(self): # pylint: disable=too-many-statements
"""
check handling with retired devices in device_list
......@@ -992,64 +958,6 @@ class TestYamlMultinode(TestCaseWithFactory):
del role_list[role]['tags']
job_list = TestJob.from_yaml_and_user(yaml.dump(submission), user)
self.assertEqual(len(job_list), 2)
# make the list mixed
fakeqemu1 = Device.objects.get(hostname='fakeqemu1')
fakeqemu1.is_pipeline = False
fakeqemu1.save(update_fields=['is_pipeline'])
fakeqemu2 = Device.objects.get(hostname='fakeqemu2')
fakeqemu3 = Device.objects.get(hostname='fakeqemu3')
fakeqemu4 = Device.objects.get(hostname='fakeqemu4')
device_list = Device.objects.filter(device_type=device_type, is_pipeline=True)
self.assertEqual(len(device_list), 3)
self.assertIsInstance(device_list, RestrictedResourceQuerySet)
self.assertIsInstance(list(device_list), list)
allowed_devices = []
for device in list(device_list):
if _check_submit_to_device([device], user):
allowed_devices.append(device)
self.assertEqual(len(allowed_devices), 3)
self.assertIn(fakeqemu3, allowed_devices)
self.assertIn(fakeqemu4, allowed_devices)
self.assertIn(fakeqemu2, allowed_devices)
self.assertNotIn(fakeqemu1, allowed_devices)
# set one candidate device to RETIRED to force the bug
fakeqemu4.health = Device.HEALTH_RETIRED
fakeqemu4.save(update_fields=['health'])
# refresh the device_list
device_list = Device.objects.filter(device_type=device_type, is_pipeline=True).order_by('hostname')
allowed_devices = []
# test the old code to force the exception
try:
# by looping through in the test *and* in _check_submit_to_device
# the retired device in device_list triggers the exception.
for device in list(device_list):
if _check_submit_to_device([device], user):
allowed_devices.append(device)
except DevicesUnavailableException:
self.assertEqual(len(allowed_devices), 2)
self.assertIn(fakeqemu4, device_list)
self.assertEqual(fakeqemu4.health, Device.HEALTH_RETIRED)
else:
self.fail("Missed DevicesUnavailableException")
allowed_devices = []
allowed_devices.extend(_check_submit_to_device(list(device_list), user))
self.assertEqual(len(allowed_devices), 2)
self.assertIn(fakeqemu3, allowed_devices)
self.assertIn(fakeqemu2, allowed_devices)
self.assertNotIn(fakeqemu4, allowed_devices)
self.assertNotIn(fakeqemu1, allowed_devices)
allowed_devices = []
# test improvement as there is no point wasting memory with a Query containing Retired.
device_list = Device.objects.filter(
Q(device_type=device_type), Q(is_pipeline=True), ~Q(health=Device.HEALTH_RETIRED))
allowed_devices.extend(_check_submit_to_device(list(device_list), user))
self.assertEqual(len(allowed_devices), 2)
self.assertIn(fakeqemu3, allowed_devices)
self.assertIn(fakeqemu2, allowed_devices)
self.assertNotIn(fakeqemu4, allowed_devices)
self.assertNotIn(fakeqemu1, allowed_devices)
def test_multinode_v2_metadata(self):
device_type = self.factory.make_device_type()
......
......@@ -73,7 +73,7 @@ class TestHealthCheckScheduling(TestCase):
self.user = User.objects.create(username="user-01")
self.last_hc03 = TestJob.objects.create(health_check=True, actual_device=self.device03,
user=self.user, submitter=self.user,
start_time=timezone.now(),
start_time=timezone.now(), is_public=True,
state=TestJob.STATE_FINISHED, health=TestJob.HEALTH_COMPLETE)
self.device03.last_health_report_job = self.last_hc03
self.device03.save()
......@@ -209,7 +209,7 @@ class TestHealthCheckScheduling(TestCase):
# Create a job that should be scheduled now
j = TestJob.objects.create(requested_device_type=self.device_type01,
user=self.user, submitter=self.user,
user=self.user, submitter=self.user, is_public=True,
definition=_minimal_valid_job(None))
schedule(DummyLogger())
self.device01.refresh_from_db()
......@@ -221,7 +221,7 @@ class TestHealthCheckScheduling(TestCase):
# Create a job that should be scheduled after the health check
j = TestJob.objects.create(requested_device_type=self.device_type01,
user=self.user, submitter=self.user,
user=self.user, submitter=self.user, is_public=True,