Add jenkins plot

Adds jenkins plot celery task and ui page.

Related-PROD: PRODX-7403
Change-Id: Ib6984009560a8eba90b794efeb9cdee4b3fe8c0c
diff --git a/testrail_bot/control/admin.py b/testrail_bot/control/admin.py
index df2bfb3..89a2511 100644
--- a/testrail_bot/control/admin.py
+++ b/testrail_bot/control/admin.py
@@ -1,6 +1,6 @@
 from django.contrib import admin
 
-from .models import TestRun, Report
+from .models import TestRailTestRun, TestRailReport
 
-admin.site.register(TestRun)
-admin.site.register(Report)
+admin.site.register(TestRailTestRun)
+admin.site.register(TestRailReport)
diff --git a/testrail_bot/control/celery_tasks/jenkins_pipeline.py b/testrail_bot/control/celery_tasks/jenkins_pipeline.py
new file mode 100644
index 0000000..e3c9904
--- /dev/null
+++ b/testrail_bot/control/celery_tasks/jenkins_pipeline.py
@@ -0,0 +1,146 @@
+from datetime import datetime, timedelta, timezone
+import json
+import os
+
+from django.conf import settings
+from matplotlib import pyplot as plt
+from matplotlib import dates as mdates
+
+from jenkins import Jenkins
+
+from .. import models
+
+
+__all__ = ('update_plot',)
+
+
+jenkins_client = Jenkins(
+    "https://ci.mcp.mirantis.net/")
+
+VIEW_NAME = "MCP2.0 Openstack Periodic CI"
+
+
+def build_time_obj(original_obj=None):
+    def get_attr(attr):
+        return getattr(original_obj, attr, 0)
+
+    return datetime(year=2000, month=1, day=1, hour=get_attr("hour"),
+                    minute=get_attr("minute"), second=get_attr("second"))
+
+
+def build_start_time(timestamp):
+    return build_time_obj(datetime.utcfromtimestamp(
+        timestamp / 1000)).replace(tzinfo=timezone.utc).timestamp()
+
+
+def process_build(job_name, build):
+    build_info = jenkins_client.get_build_info(job_name, build["number"])
+    if build_info["result"] != "SUCCESS":
+        return None
+
+    return build_start_time(build_info["timestamp"]), \
+        build_info["duration"] / 1000
+
+
+def calculate_average(values):
+    return sum(values) / len(values)
+
+
+def process_job(job_name):
+    builds = jenkins_client.get_job_info(job_name)["builds"]
+    builds_info = (process_build(job_name, build) for build in builds)
+
+    start_times, durations = zip(*filter(None, builds_info))
+
+    avg_start_time = datetime.utcfromtimestamp(
+        calculate_average(start_times))
+    return {"duration": calculate_average(durations),
+            "start_time": avg_start_time}
+
+
+def get_aggregated_build_stats():
+    return {job_name["name"]: process_job(job_name["name"])
+            for job_name in jenkins_client.get_jobs(view_name=VIEW_NAME)}
+
+
+def get_lines(current, standard_datetime, next_day):
+    start_time = current["start_time"]
+    end = start_time + timedelta(seconds=current["duration"])
+
+    if end >= next_day:
+        return [
+            (standard_datetime, standard_datetime + (end - next_day)),
+            (start_time, next_day - timedelta(seconds=1))]
+
+    return [(start_time, end)]
+
+
+def build_data_for_jobs_time_plot(jobs):
+    standard_datetime = build_time_obj()
+    next_day = standard_datetime + timedelta(days=1)
+    return {job_name: get_lines(jobs[job_name], standard_datetime, next_day)
+            for job_name in jobs}
+
+
+def draw_plot(plot_data):
+    fig, ax = plt.subplots()
+
+    for index, (job_name, data) in enumerate(plot_data.items(), 1):
+        for start, end in data:
+            ax.plot([start, end], [index, index], color="#68a39f", linewidth=6)
+
+    hours = mdates.HourLocator()
+    hours_fmt = mdates.DateFormatter("%H")
+    ax.xaxis.set_major_locator(hours)
+    ax.xaxis.set_major_formatter(hours_fmt)
+
+    ax.format_ydata = lambda x: int(x)
+    ax.format_xdata = mdates.DateFormatter("%H")
+
+    # Set date limits
+    start_time = build_time_obj()
+    ax.set_xlim(
+        start_time, start_time + timedelta(days=1) + timedelta(seconds=1))
+
+    # Set y axes limits
+    jobs_num = len(plot_data) + 1
+    ax.set_ylim(0, jobs_num)
+
+    ax.set_yticks(range(1, jobs_num))
+    ax.yaxis.tick_right()
+
+    fig.autofmt_xdate()
+
+    # Enable grid
+    ax.grid(True, color="#f9f2f2")
+
+    # Save figure
+    path = os.path.join(settings.STATIC_ROOT, "plot.png")
+    plt.savefig(path, dpi=300)
+
+
+def save_job_names(plot_data_keys):
+    job_names_path = os.path.join(models.fs.location, "job_names.txt")
+
+    with open(job_names_path, "w") as f:
+        json.dump(plot_data_keys, f)
+
+
+def update_plot():
+    try:
+        jobs_dict = get_aggregated_build_stats()
+        plot_data = build_data_for_jobs_time_plot(jobs_dict)
+        draw_plot(plot_data)
+
+        save_job_names(list(plot_data.keys()))
+
+        try:
+            log_record = models.ActionLog.objects.get(
+                name="update_jenkins_plot")
+        except models.ActionLog.DoesNotExist:
+            log_record = models.ActionLog(name="update_jenkins_plot")
+        log_record.date = datetime.now()
+        log_record.save()
+    finally:
+        update = models.ActionLog.objects.get(name="update_plot_started")
+        update.delete()
diff --git a/testrail_bot/control/celery_tasks/tasks.py b/testrail_bot/control/celery_tasks/tasks.py
index 08308f9..9e68718 100644
--- a/testrail_bot/control/celery_tasks/tasks.py
+++ b/testrail_bot/control/celery_tasks/tasks.py
@@ -1,131 +1,17 @@
 from __future__ import absolute_import, unicode_literals
 
-import difflib
-
 from celery import shared_task
 
-from .. import models
 
-from . import filters
-from . import api
-from .enums import StatusEnum
-
-
-def finish_report(report):
-    report.finished = True
-    report.save()
-
-
-def apply_filters(data, test_run):
-    if test_run.filter_last_traceback:
-        data = filters.last_traceback_filter(data)
-
-    if test_run.ip_filter:
-        data = filters.filter_ip(data)
-
-    if test_run.uuid_filter:
-        data = filters.filter_uuid(data)
-
-    if test_run.filter_func:
-        exec(test_run.filter_func)
-        data = locals()["custom_filter"](data)
-    return data
-
-
-def process_plan(plan_id, case_id, last_comment, run):
-    run_id = api.get_run_id(api.get_entries(plan_id), run.run_name)
-    if not run_id:
-        return None, -1.0
-
-    results = api.get_result_for_case(run_id, case_id)
-
-    if not results:
-        return None, -2.0
-
-    status_code = str(results[0]["status_id"])
-    if status_code not in [StatusEnum.test_failed, StatusEnum.product_failed]:
-        return None, -3.0
-
-    comment = apply_filters(results[-1]["comment"], run)
-
-    ratio = difflib.SequenceMatcher(
-        lambda symbol: symbol in [" ", ",", "\n"],
-        last_comment, comment, autojunk=False).ratio()
-    if ratio > 0.9:
-        return results[0], ratio
-    return None, ratio
+from . import jenkins_pipeline
+from . import testrail_pipeline
 
 
 @shared_task
 def process_run(run_id, report_id, path, run_date):
-    report = models.Report.objects.get(pk=report_id)
-    with open(path, "w") as f:
-        test_run = models.TestRun.objects.get(pk=run_id)
-        f.write("Start processing {}\n".format(test_run.run_name))
-        f.flush()
+    testrail_pipeline.process_test_run(run_id, report_id, path, run_date)
 
-        project_id = api.get_project_id(test_run.project_name)
-        if not project_id:
-            f.write("Incorrect Project {}. Stopping processing\n".format(
-                test_run.project_name))
-            f.flush()
-            finish_report(report)
-            return
 
-        created_by_id = test_run.created_by_id
-        kw = {"limit": 100, "created_before": int(run_date)}
-        if created_by_id:
-            kw["created_by"] = created_by_id
-        plans = api.get_plans(project_id, test_run.plan_name, **kw)
-
-        last_plan = plans[0]
-        last_run_id = api.get_run_id(
-            api.get_entries(last_plan), test_run.run_name)
-        if not last_run_id:
-            f.write("No {} in {} plan\n".format(
-                test_run.run_name, last_plan))
-            f.flush()
-            finish_report(report)
-            return
-
-        failed_tests = api.get_failed_tests(last_run_id)
-        for test in failed_tests:
-            case_id = test["case_id"]
-
-            f.write("Processing test with id {}\n".format(test["id"]))
-            f.flush()
-
-            last_result = api.get_result_for_case(last_run_id, case_id)[0]
-
-            last_comment = apply_filters(
-                last_result["comment"], test_run)
-
-            for plan_id in plans[1:]:
-                sim_result, ratio = process_plan(
-                    plan_id, case_id, last_comment, test_run)
-                if sim_result:
-                    update_dict = {
-                        "status_id": sim_result["status_id"],
-                        "comment": "Marked by TestRailBot because "
-                                   "of similarity with test {} {}%".format(
-                            sim_result["test_id"], round(100.0 * ratio, 2)),
-                        "defects": sim_result["defects"]
-                    }
-                    f.write("Found similarity: {}\n".format(update_dict))
-                    f.flush()
-                    api.add_result(test["id"], update_dict)
-                    break
-                else:
-                    f.write(
-                        "Similarity not found due to similarity:{}%\n".format(
-                            round(100.0 * ratio, 2)))
-                    f.flush()
-            else:
-                f.write("Automatic test processing failed. Please process "
-                        "test manualy <a href=https://mirantis.testrail.com/"
-                        "index.php?/tests/view/{test_id}>{test_id}"
-                        "</a>\n".format(test_id=test["id"]))
-                f.flush()
-        f.write("Test processing finished")
-        f.flush()
-        finish_report(report)
+@shared_task
+def update_plot_data():
+    jenkins_pipeline.update_plot()
diff --git a/testrail_bot/control/celery_tasks/api.py b/testrail_bot/control/celery_tasks/test_rail_api.py
similarity index 100%
rename from testrail_bot/control/celery_tasks/api.py
rename to testrail_bot/control/celery_tasks/test_rail_api.py
diff --git a/testrail_bot/control/celery_tasks/testrail_pipeline.py b/testrail_bot/control/celery_tasks/testrail_pipeline.py
new file mode 100644
index 0000000..83d25a4
--- /dev/null
+++ b/testrail_bot/control/celery_tasks/testrail_pipeline.py
@@ -0,0 +1,163 @@
+import difflib
+
+from . import filters
+from .enums import StatusEnum
+from . import test_rail_api
+
+from .. import models
+
+__all__ = ("process_test_run",)
+
+
+def finish_report(report):
+    report.finished = True
+    report.save()
+
+
+def apply_filters(data, test_run):
+    if test_run.filter_last_traceback:
+        data = filters.last_traceback_filter(data)
+
+    if test_run.ip_filter:
+        data = filters.filter_ip(data)
+
+    if test_run.uuid_filter:
+        data = filters.filter_uuid(data)
+
+    if test_run.filter_func:
+        exec(test_run.filter_func)
+        data = locals()["custom_filter"](data)
+    return data
+
+
+def process_plan(plan_id, case_id, last_comment, run):
+    run_id = test_rail_api.get_run_id(test_rail_api.get_entries(plan_id), run.run_name)
+    if not run_id:
+        return None, -1.0
+
+    results = test_rail_api.get_result_for_case(run_id, case_id)
+
+    if not results:
+        return None, -2.0
+
+    status_code = str(results[0]["status_id"])
+    if status_code not in [StatusEnum.test_failed, StatusEnum.product_failed]:
+        return None, -3.0
+
+    comment = apply_filters(results[-1]["comment"], run)
+
+    ratio = difflib.SequenceMatcher(
+        lambda symbol: symbol in [" ", ",", "\n"],
+        last_comment, comment, autojunk=False).ratio()
+    if ratio > 0.9:
+        return results[0], ratio
+    return None, ratio
+
+
+def get_project_id(f, test_run, report):
+    project_id = test_rail_api.get_project_id(test_run.project_name)
+    if not project_id:
+        f.write("Incorrect Project {}. Stopping processing\n".format(
+            test_run.project_name))
+        f.flush()
+        finish_report(report)
+        return None
+    return project_id
+
+
+def get_plans(test_run, run_date, project_id):
+    created_by_id = test_run.created_by_id
+    kw = {"limit": 100, "created_before": int(run_date)}
+    if created_by_id:
+        kw["created_by"] = created_by_id
+    return test_rail_api.get_plans(project_id, test_run.plan_name, **kw)
+
+
+def get_last_run_id(f, last_plan, test_run, report):
+    last_run_id = test_rail_api.get_run_id(
+        test_rail_api.get_entries(last_plan), test_run.run_name)
+    if not last_run_id:
+        f.write("No {} in {} plan\n".format(
+            test_run.run_name, last_plan))
+        f.flush()
+        finish_report(report)
+        return None
+    return last_run_id
+
+
+def get_last_comment(last_run_id, case_id, test_run):
+    last_result = test_rail_api.get_result_for_case(
+        last_run_id, case_id)[0]
+
+    return apply_filters(
+        last_result["comment"], test_run)
+
+
+def process_old_test(f, plan_id, case_id, last_comment, test_run, test):
+    sim_result, ratio = process_plan(
+        plan_id, case_id, last_comment, test_run)
+    if sim_result:
+        update_dict = {
+            "status_id": sim_result["status_id"],
+            "comment": "Marked by TestRailBot because "
+                       "of similarity with test {} {}%".format(
+                sim_result["test_id"], round(100.0 * ratio, 2)),
+            "defects": sim_result["defects"]
+        }
+        f.write("Found similarity: {}\n".format(update_dict))
+        f.flush()
+        test_rail_api.add_result(test["id"], update_dict)
+        return True
+    else:
+        f.write(
+            "Similarity not found due to similarity:{}%\n".format(
+                round(100.0 * ratio, 2)))
+        f.flush()
+        return False
+
+
+def process_test(f, test, last_run_id, test_run, plans):
+    case_id = test["case_id"]
+
+    f.write("Processing test with id {}\n".format(test["id"]))
+    f.flush()
+
+    last_comment = get_last_comment(last_run_id, case_id, test_run)
+
+    for plan_id in plans[1:]:
+        found = process_old_test(
+            f, plan_id, case_id, last_comment, test_run, test)
+        if found:
+            break
+    else:
+        f.write("Automatic test processing failed. Please process "
+                "test manualy <a href=https://mirantis.testrail.com/"
+                "index.php?/tests/view/{test_id}>{test_id}"
+                "</a>\n".format(test_id=test["id"]))
+        f.flush()
+
+
+def process_test_run(run_id, report_id, path, run_date):
+    report = models.TestRailReport.objects.get(pk=report_id)
+    with open(path, "w") as f:
+        test_run = models.TestRailTestRun.objects.get(pk=run_id)
+        f.write("Start processing {}\n".format(test_run.run_name))
+        f.flush()
+
+        project_id = get_project_id(f, test_run, report)
+        if not project_id:
+            return
+
+        plans = get_plans(test_run, run_date, project_id)
+
+        last_plan = plans[0]
+        last_run_id = get_last_run_id(f, last_plan, test_run, report)
+        if not last_run_id:
+            return
+
+        failed_tests = test_rail_api.get_failed_tests(last_run_id)
+        for test in failed_tests:
+            process_test(f, test, last_run_id, test_run, plans)
+        f.write("Test processing finished")
+        f.flush()
+        finish_report(report)
diff --git a/testrail_bot/control/forms.py b/testrail_bot/control/forms.py
index e3d47da..00567e2 100644
--- a/testrail_bot/control/forms.py
+++ b/testrail_bot/control/forms.py
@@ -1,11 +1,11 @@
 from datetime import date
 from django import forms
-from .models import TestRun
+from .models import TestRailTestRun
 
 
 class TestRunForm(forms.ModelForm):
     class Meta:
-        model = TestRun
+        model = TestRailTestRun
         fields = "__all__"
         labels = {
             "project_name": "Name of the project",
diff --git a/testrail_bot/control/migrations/0016_auto_20200924_1021.py b/testrail_bot/control/migrations/0016_auto_20200924_1021.py
new file mode 100644
index 0000000..3fb3db0
--- /dev/null
+++ b/testrail_bot/control/migrations/0016_auto_20200924_1021.py
@@ -0,0 +1,29 @@
+# Generated by Django 3.0.7 on 2020-09-24 10:21
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('control', '0015_auto_20200731_1818'),
+    ]
+
+    operations = [
+        migrations.CreateModel(
+            name='ActionLog',
+            fields=[
+                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('name', models.CharField(max_length=500)),
+                ('date', models.DateTimeField()),
+            ],
+        ),
+        migrations.RenameModel(
+            old_name='Report',
+            new_name='TestRailReport',
+        ),
+        migrations.RenameModel(
+            old_name='TestRun',
+            new_name='TestRailTestRun',
+        ),
+    ]
diff --git a/testrail_bot/control/migrations/0017_auto_20200925_1255.py b/testrail_bot/control/migrations/0017_auto_20200925_1255.py
new file mode 100644
index 0000000..d2d77a7
--- /dev/null
+++ b/testrail_bot/control/migrations/0017_auto_20200925_1255.py
@@ -0,0 +1,18 @@
+# Generated by Django 3.0.7 on 2020-09-25 12:55
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('control', '0016_auto_20200924_1021'),
+    ]
+
+    operations = [
+        migrations.AlterField(
+            model_name='actionlog',
+            name='date',
+            field=models.DateTimeField(null=True),
+        ),
+    ]
diff --git a/testrail_bot/control/models.py b/testrail_bot/control/models.py
index 328ed29..7068d38 100644
--- a/testrail_bot/control/models.py
+++ b/testrail_bot/control/models.py
@@ -2,7 +2,7 @@
 from django.db import models
 
 
-class TestRun(models.Model):
+class TestRailTestRun(models.Model):
     project_name = models.CharField(max_length=300)
     plan_name = models.CharField(max_length=300)
     run_name = models.CharField(max_length=300)
@@ -16,9 +16,14 @@
 fs = FileSystemStorage()
 
 
-class Report(models.Model):
+class TestRailReport(models.Model):
     path = models.FileField(storage=fs, null=True, blank=True, max_length=500)
-    test_run = models.ForeignKey(TestRun, on_delete=models.CASCADE)
+    test_run = models.ForeignKey(TestRailTestRun, on_delete=models.CASCADE)
     report_name = models.CharField(max_length=300)
     finished = models.BooleanField(default=False)
     created_at = models.DateTimeField(auto_now_add=True)
+
+
+class ActionLog(models.Model):
+    name = models.CharField(max_length=500)
+    date = models.DateTimeField(null=True)
diff --git a/testrail_bot/control/templates/base.html b/testrail_bot/control/templates/base.html
index e21330b..927d912 100644
--- a/testrail_bot/control/templates/base.html
+++ b/testrail_bot/control/templates/base.html
@@ -24,6 +24,7 @@
         <li><a href="{% url 'create_run' %}">Create New Test Run</a></li>
         <li><a href="{% url 'list_reports' %}">Reports</a></li>
         <li><a href="{% url 'help' %}">Help</a></li>
+        <li><a href="{% url 'jenkins_plot' %}">Jenkins plot</a></li>
 
     </ul>
   </div>
diff --git a/testrail_bot/control/templates/control/jenkins_plot.html b/testrail_bot/control/templates/control/jenkins_plot.html
new file mode 100644
index 0000000..2a9bc5c
--- /dev/null
+++ b/testrail_bot/control/templates/control/jenkins_plot.html
@@ -0,0 +1,25 @@
+{% extends "base.html" %}
+{% load static %}
+{% block head %}
+<script src="{% static 'plot.js' %}" defer></script>
+{% endblock %}
+{% block section %}
+<div>
+    <p>Last updated: {{ update_date }}</p>
+    {% if update_started %}
+    <button class="btn btn-danger" disabled type="submit">Update in progress</button>
+    {% else %}
+    <button class="btn btn-danger" type="submit">Update jenkins data</button>
+    {% endif %}
+</div>
+<div>
+    <img src="{% static 'plot.png' %}" width="1300" height="800">
+</div>
+<div class="container">
+    <div class="row">
+        {% for index, job_name in job_names %}
+        <div class="col-lg-6"><b>{{ index}}</b>: {{ job_name }}</div>
+        {% endfor %}
+    </div>
+</div>
+{% endblock %}
\ No newline at end of file
diff --git a/testrail_bot/control/urls.py b/testrail_bot/control/urls.py
index d530635..0807794 100644
--- a/testrail_bot/control/urls.py
+++ b/testrail_bot/control/urls.py
@@ -11,4 +11,6 @@
     path("reports/<int:report_id>/", views.single_report, name="single_report"),
     path('index/', views.index, name='index'),
     path("help/", views.show_help, name="help"),
+    path("update_jenkins_plot", views.update_jenkins_plot, name="update_jenkins"),
+    path("jenkins_plot", views.jenkins_plot, name="jenkins_plot")
 ]
diff --git a/testrail_bot/control/views.py b/testrail_bot/control/views.py
index f813f86..67a368d 100644
--- a/testrail_bot/control/views.py
+++ b/testrail_bot/control/views.py
@@ -6,11 +6,11 @@
 
 from . import models
 from . import forms
-from .celery_tasks.tasks import process_run
+from .celery_tasks.tasks import process_run, update_plot_data
 
 
 def index(request):
-    runs = models.TestRun.objects.all()
+    runs = models.TestRailTestRun.objects.all()
     return render(request, "control/index.html", {"runs": runs})
 
 
@@ -19,7 +19,7 @@
 
 
 def single_run(request, run_id):
-    run = models.TestRun.objects.get(pk=run_id)
+    run = models.TestRailTestRun.objects.get(pk=run_id)
     if request.method == "POST":
         form = forms.TestRunForm(request.POST, instance=run)
         if form.is_valid():
@@ -45,12 +45,12 @@
 
 
 def list_reports(request):
-    reports = models.Report.objects.order_by("-created_at").all()
+    reports = models.TestRailReport.objects.order_by("-created_at").all()
     return render(request, "control/reports.html", {"reports": reports})
 
 
 def single_report(request, report_id):
-    report = models.Report.objects.get(pk=report_id)
+    report = models.TestRailReport.objects.get(pk=report_id)
     data = report.path.read().decode("utf-8")
     if request.method == "POST" and request.is_ajax():
         return HttpResponse(
@@ -61,7 +61,7 @@
 
 
 def submit_run(request, run_id):
-    run = models.TestRun.objects.get(pk=run_id)
+    run = models.TestRailTestRun.objects.get(pk=run_id)
     report_name = "{}-{}".format(
         run.run_name, datetime.datetime.isoformat(datetime.datetime.now()))
     path = os.path.join(models.fs.location, report_name)
@@ -74,7 +74,7 @@
             form.cleaned_data["timestamp"] + datetime.timedelta(days=1),
             datetime.datetime.min.time()).timestamp()
 
-    report = models.Report(
+    report = models.TestRailReport(
         test_run=run,
         report_name=report_name,
         path=path)
@@ -86,3 +86,43 @@
 
 def show_help(request):
     return render(request, "control/help.html")
+
+
+def update_jenkins_plot(request):
+    try:
+        models.ActionLog.objects.get(name="update_plot_started")
+        return HttpResponse("Update in progress", status=403)
+    except models.ActionLog.DoesNotExist:
+        pass
+    update = models.ActionLog(
+        name="update_plot_started", date=datetime.datetime.now())
+    update.save()
+    update_plot_data.delay()
+    return HttpResponse("Started Update", status=200)
+
+
+def jenkins_plot(request):
+    try:
+        update_date = models.ActionLog.objects.get(
+            name="update_jenkins_plot").date
+    except models.ActionLog.DoesNotExist:
+        update_date = None
+    try:
+        models.ActionLog.objects.get(name="update_plot_started")
+        update_started = True
+    except models.ActionLog.DoesNotExist:
+        update_started = False
+
+    job_names_path = os.path.join(models.fs.location, "job_names.txt")
+    job_names = []
+    if os.path.exists(job_names_path):
+        try:
+            with open(job_names_path, "r") as f:
+                job_names = json.load(f)
+        except:
+            pass
+
+    return render(
+        request, "control/jenkins_plot.html",
+        {"update_date": update_date, "update_started": update_started,
+         "job_names": enumerate(job_names, 1)})