perf-o-matic: generate dashboard images using Google Chart Tools
authorrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 29 Feb 2012 06:06:55 +0000 (06:06 +0000)
committerrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 29 Feb 2012 06:06:55 +0000 (06:06 +0000)
https://bugs.webkit.org/show_bug.cgi?id=79838

Reviewed by Hajime Morita.

Rename RunsJSONGenerator to Runs and added an ability to generate parameters for Google chart tool.
Also added RunsChartHandler to make url-fetches these images and DashboardImageHandler to serve them.
The image is stored in DashboardImage model.

We can't enable flip the switch to use images yet because we don't create images on fly (they're
generated when runs are updated; i.e. bots upload new results). We should be able to flip the switch
once this patch lands and all perf bots cycle.

We probably make way too many calls to Google chart tool's server with this preliminary design but we
can easily move this task into the backend and run it via a cron job once we know it works.

* Websites/webkit-perf.appspot.com/controller.py:
(schedule_runs_update):
(RunsUpdateHandler.post):
(RunsChartHandler):
(RunsChartHandler.get):
(RunsChartHandler.post):
(DashboardImageHandler):
(DashboardImageHandler.get):
(schedule_report_process):
* Websites/webkit-perf.appspot.com/json_generators.py:
(ManifestJSONGenerator.value):
(Runs):
(Runs.__init__):
(Runs.value):
(Runs.chart_params):
* Websites/webkit-perf.appspot.com/json_generators_unittest.py:
(RunsTest):
(RunsTest._create_results):
(RunsTest.test_generate_runs):
(RunsTest.test_value_without_results):
(RunsTest.test_value_with_results):
(RunsTest.test_run_from_build_and_result):
(RunsTest.test_chart_params_with_value):
(RunsTest.test_chart_params_with_value.split_as_int):
* Websites/webkit-perf.appspot.com/main.py:
* Websites/webkit-perf.appspot.com/models.py:
(PersistentCache.get_cache):
(DashboardImage):
(DashboardImage.key_name):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@109190 268f45cc-cd09-0410-ab3c-d52691b4dbfc

ChangeLog
Websites/webkit-perf.appspot.com/controller.py
Websites/webkit-perf.appspot.com/json_generators.py
Websites/webkit-perf.appspot.com/json_generators_unittest.py
Websites/webkit-perf.appspot.com/main.py
Websites/webkit-perf.appspot.com/models.py

index e46916c93876384c0bae950c96888c5361f0bdfe..7a5929d922b053c36bed070774aba50772b1c4e3 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,51 @@
+2012-02-28  Ryosuke Niwa  <rniwa@webkit.org>
+
+        perf-o-matic: generate dashboard images using Google Chart Tools
+        https://bugs.webkit.org/show_bug.cgi?id=79838
+
+        Reviewed by Hajime Morita.
+
+        Rename RunsJSONGenerator to Runs and added an ability to generate parameters for Google chart tool.
+        Also added RunsChartHandler to make url-fetches these images and DashboardImageHandler to serve them.
+        The image is stored in DashboardImage model.
+
+        We can't enable flip the switch to use images yet because we don't create images on fly (they're
+        generated when runs are updated; i.e. bots upload new results). We should be able to flip the switch
+        once this patch lands and all perf bots cycle.
+
+        We probably make way too many calls to Google chart tool's server with this preliminary design but we
+        can easily move this task into the backend and run it via a cron job once we know it works.
+
+        * Websites/webkit-perf.appspot.com/controller.py:
+        (schedule_runs_update):
+        (RunsUpdateHandler.post):
+        (RunsChartHandler):
+        (RunsChartHandler.get):
+        (RunsChartHandler.post):
+        (DashboardImageHandler):
+        (DashboardImageHandler.get):
+        (schedule_report_process):
+        * Websites/webkit-perf.appspot.com/json_generators.py:
+        (ManifestJSONGenerator.value):
+        (Runs):
+        (Runs.__init__):
+        (Runs.value):
+        (Runs.chart_params):
+        * Websites/webkit-perf.appspot.com/json_generators_unittest.py:
+        (RunsTest):
+        (RunsTest._create_results):
+        (RunsTest.test_generate_runs):
+        (RunsTest.test_value_without_results):
+        (RunsTest.test_value_with_results):
+        (RunsTest.test_run_from_build_and_result):
+        (RunsTest.test_chart_params_with_value):
+        (RunsTest.test_chart_params_with_value.split_as_int):
+        * Websites/webkit-perf.appspot.com/main.py:
+        * Websites/webkit-perf.appspot.com/models.py:
+        (PersistentCache.get_cache):
+        (DashboardImage):
+        (DashboardImage.key_name):
+
 2012-02-28  Dave Tu  <dtu@chromium.org>
 
         Add new GPU builders to flakiness dashboard.
index a8f186e5d44cb9510a08cc67af1efb9f155f73b6..6d9b45d1c487488a46918678ff70a19bc8d06d51 100644 (file)
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import urllib
 import webapp2
 from google.appengine.api import taskqueue
 from google.appengine.ext import db
 
 from json_generators import DashboardJSONGenerator
 from json_generators import ManifestJSONGenerator
-from json_generators import RunsJSONGenerator
+from json_generators import Runs
 from models import Branch
+from models import DashboardImage
 from models import Platform
 from models import Test
 from models import PersistentCache
@@ -97,6 +99,7 @@ def cache_runs(test_id, branch_id, platform_id, cache):
 
 def schedule_runs_update(test_id, branch_id, platform_id):
     taskqueue.add(url='/api/test/runs/update', params={'id': test_id, 'branchid': branch_id, 'platformid': platform_id})
+    taskqueue.add(url='/api/test/runs/chart', params={'id': test_id, 'branchid': branch_id, 'platformid': platform_id})
 
 
 def _get_test_branch_platform_ids(handler):
@@ -125,7 +128,7 @@ class RunsUpdateHandler(webapp2.RequestHandler):
         assert platform
         assert test
 
-        cache_runs(test_id, branch_id, platform_id, RunsJSONGenerator(branch, platform, test.name).to_json())
+        cache_runs(test_id, branch_id, platform_id, Runs(branch, platform, test.name).to_json())
         self.response.out.write('OK')
 
 
@@ -141,5 +144,48 @@ class CachedRunsHandler(webapp2.RequestHandler):
             schedule_runs_update(test_id, branch_id, platform_id)
 
 
+class RunsChartHandler(webapp2.RequestHandler):
+    def get(self):
+        self.post()
+
+    def post(self):
+        self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'
+        test_id, branch_id, platform_id = _get_test_branch_platform_ids(self)
+
+        branch = model_from_numeric_id(branch_id, Branch)
+        platform = model_from_numeric_id(platform_id, Platform)
+        test = model_from_numeric_id(test_id, Test)
+        display_days = int(self.request.get('displayDays'))
+        assert branch
+        assert platform
+        assert test
+
+        params = Runs(branch, platform, test.name).chart_params(display_days)
+        dashboard_chart_file = urllib.urlopen('http://chart.googleapis.com/chart', urllib.urlencode(params))
+
+        DashboardImage(key_name=DashboardImage.key_name(branch.id, platform.id, test.id, display_days),
+            image=dashboard_chart_file.read()).put()
+
+        self.response.out.write('Fetched http://chart.googleapis.com/chart?%s' % urllib.urlencode(params))
+
+
+class DashboardImageHandler(webapp2.RequestHandler):
+    def get(self, test_id, branch_id, platform_id, display_days):
+        try:
+            branch_id = int(branch_id)
+            platform_id = int(platform_id)
+            test_id = int(test_id)
+            display_days = int(display_days)
+        except ValueError:
+            self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'
+            self.response.out.write('Failed')
+
+        self.response.headers['Content-Type'] = 'image/png'
+        image = DashboardImage.get_by_key_name(DashboardImage.key_name(branch_id, platform_id, test_id, display_days))
+        if image:
+            self.response.out.write(image.image)
+
+
 def schedule_report_process(log):
+    self.response.headers['Content-Type'] = 'application/json'
     taskqueue.add(url='/api/test/report/process', params={'id': log.key().id()})
index ffd6558dff083454d6c91fa0bc462cd276d4bce7..b2dca9b78b425dbd007def04059a7ba79cdd3651 100644 (file)
@@ -28,6 +28,8 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 import json
+from datetime import datetime
+from datetime import timedelta
 from time import mktime
 
 from models import Build
@@ -118,20 +120,12 @@ class ManifestJSONGenerator(JSONGeneratorBase):
         return {'branchMap': self._branch_map, 'platformMap': self._platform_map, 'testMap': self._test_map}
 
 
-class RunsJSONGenerator(JSONGeneratorBase):
-    def __init__(self, branch, platform, test):
-        self._test_runs = []
-        self._averages = {}
-        values = []
-
-        for build, result in RunsJSONGenerator._generate_runs(branch, platform, test):
-            self._test_runs.append(RunsJSONGenerator._entry_from_build_and_result(build, result))
-            # FIXME: Calculate the average. In practice, we wouldn't have more than one value for a given revision.
-            self._averages[build.revision] = result.value
-            values.append(result.value)
-
-        self._min = min(values) if values else None
-        self._max = max(values) if values else None
+# FIXME: This isn't a JSON generator anymore. We should move it elsewhere or rename the file.
+class Runs(JSONGeneratorBase):
+    def __init__(self, branch, platform, test_name):
+        self._branch = branch
+        self._platform = platform
+        self._test_name = test_name
 
     @staticmethod
     def _generate_runs(branch, platform, test_name):
@@ -167,10 +161,58 @@ class RunsJSONGenerator(JSONGeneratorBase):
             builder_id, statistics]
 
     def value(self):
+        _test_runs = []
+        _averages = {}
+        values = []
+
+        for build, result in Runs._generate_runs(self._branch, self._platform, self._test_name):
+            _test_runs.append(Runs._entry_from_build_and_result(build, result))
+            # FIXME: Calculate the average. In practice, we wouldn't have more than one value for a given revision.
+            _averages[build.revision] = result.value
+            values.append(result.value)
+
+        _min = min(values) if values else None
+        _max = max(values) if values else None
+
         return {
-            'test_runs': self._test_runs,
-            'averages': self._averages,
-            'min': self._min,
-            'max': self._max,
+            'test_runs': _test_runs,
+            'averages': _averages,
+            'min': _min,
+            'max': _max,
             'date_range': None,  # Never used by common.js.
             'stat': 'ok'}
+
+    def chart_params(self, display_days, now=datetime.now()):
+        chart_data_x = []
+        chart_data_y = []
+        end_time = now
+        start_timestamp = mktime((end_time - timedelta(display_days)).timetuple())
+        end_timestamp = mktime(end_time.timetuple())
+
+        for build, result in self._generate_runs(self._branch, self._platform, self._test_name):
+            timestamp = mktime(build.timestamp.timetuple())
+            if timestamp < start_timestamp or timestamp > end_timestamp:
+                continue
+            chart_data_x.append(timestamp)
+            chart_data_y.append(result.value)
+
+        dates = [end_time + timedelta(day - display_days) for day in range(0, display_days + 1)]
+
+        y_max = max(chart_data_y) * 1.1
+        y_grid_step = y_max / 5
+        y_axis_label_step = int(y_grid_step + 0.5)  # This won't work for decimal numbers
+
+        return {
+            'cht': 'lxy',  # Specify with X and Y coordinates
+            'chxt': 'x,y',  # Display both X and Y axies
+            'chxl': '0:|' + '|'.join([date.strftime('%b %d') for date in dates]),  # X-axis labels
+            'chxr': '1,0,%f,%f' % (int(y_max + 0.5), y_axis_label_step),  # Y-axis range: min=0, max, step
+            'chds': '%f,%f,%f,%f' % (start_timestamp, end_timestamp, 0, y_max),  # X, Y data range
+            'chxs': '1,676767,11.167,0,l,676767',  # Y-axis label: 1,color,font-size,centerd on tick,axis line/no ticks, tick color
+            'chs': '360x240',  # Image size: 360px by 240px
+            'chco': 'ff0000',  # Plot line color
+            'chg': '%f,%f,0,0' % (100 / (len(dates) - 1), y_grid_step),  # X, Y grid line step sizes - max for X is 100.
+            'chls': '3',  # Line thickness
+            'chf': 'bg,s,eff6fd',  # Transparent background
+            'chd': 't:' + ','.join([str(x) for x in chart_data_x]) + '|' + ','.join([str(y) for y in chart_data_y]),  # X, Y data
+        }
index 4b64a789403d0a58171617fff2d6e73601044124..f9af990420b749c244d11b1b4da160266366bffa 100644 (file)
@@ -33,10 +33,11 @@ import unittest
 
 from google.appengine.ext import testbed
 from datetime import datetime
+from datetime import timedelta
 from json_generators import JSONGeneratorBase
 from json_generators import DashboardJSONGenerator
 from json_generators import ManifestJSONGenerator
-from json_generators import RunsJSONGenerator
+from json_generators import Runs
 from models_unittest import DataStoreTestsBase
 from models import Branch
 from models import Build
@@ -185,12 +186,12 @@ class ManifestJSONGeneratorTest(DataStoreTestsBase):
             other_test.id: {'name': other_test.name, 'branchIds': [some_branch.id], 'platformIds': [some_platform.id]}})
 
 
-class RunsJSONGeneratorTest(DataStoreTestsBase):
-    def _create_results(self, branch, platform, builder, test_name, values):
+class RunsTest(DataStoreTestsBase):
+    def _create_results(self, branch, platform, builder, test_name, values, timestamps=None):
         results = []
         for i, value in enumerate(values):
             build = Build(branch=branch, platform=platform, builder=builder,
-                buildNumber=i, revision=100 + i, timestamp=datetime.now())
+                buildNumber=i, revision=100 + i, timestamp=timestamps[i] if timestamps else datetime.now())
             build.put()
             result = TestResult(name=test_name, build=build, value=value)
             result.put()
@@ -204,7 +205,7 @@ class RunsJSONGeneratorTest(DataStoreTestsBase):
 
         results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0])
         last_i = 0
-        for i, (build, result) in enumerate(RunsJSONGenerator._generate_runs(some_branch, some_platform, "some-test")):
+        for i, (build, result) in enumerate(Runs._generate_runs(some_branch, some_platform, "some-test")):
             self.assertEqual(build.buildNumber, i)
             self.assertEqual(build.revision, 100 + i)
             self.assertEqual(result.name, 'some-test')
@@ -217,7 +218,7 @@ class RunsJSONGeneratorTest(DataStoreTestsBase):
         some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
         self.assertThereIsNoInstanceOf(Test)
         self.assertThereIsNoInstanceOf(TestResult)
-        self.assertEqual(RunsJSONGenerator(some_branch, some_platform, 'some-test').value(), {
+        self.assertEqual(Runs(some_branch, some_platform, 'some-test').value(), {
             'test_runs': [],
             'averages': {},
             'min': None,
@@ -231,7 +232,7 @@ class RunsJSONGeneratorTest(DataStoreTestsBase):
         some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
         results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0])
 
-        value = RunsJSONGenerator(some_branch, some_platform, 'some-test').value()
+        value = Runs(some_branch, some_platform, 'some-test').value()
         self.assertEqualUnorderedList(value.keys(), ['test_runs', 'averages', 'min', 'max', 'date_range', 'stat'])
         self.assertEqual(value['stat'], 'ok')
         self.assertEqual(value['min'], 48.0)
@@ -274,28 +275,28 @@ class RunsJSONGeneratorTest(DataStoreTestsBase):
         build = create_build(1, 101)
         result = TestResult(name=test_name, value=123.0, build=build)
         result.put()
-        self._assert_entry(RunsJSONGenerator._entry_from_build_and_result(build, result), build, result, 123.0)
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 123.0)
 
         build = create_build(2, 102)
         result = TestResult(name=test_name, value=456.0, valueMedian=789.0, build=build)
         result.put()
-        self._assert_entry(RunsJSONGenerator._entry_from_build_and_result(build, result), build, result, 456.0)
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)
 
         result.valueStdev = 7.0
         result.put()
-        self._assert_entry(RunsJSONGenerator._entry_from_build_and_result(build, result), build, result, 456.0)
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)
 
         result.valueStdev = None
         result.valueMin = 123.0
         result.valueMax = 789.0
         result.put()
-        self._assert_entry(RunsJSONGenerator._entry_from_build_and_result(build, result), build, result, 456.0)
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)
 
         result.valueStdev = 8.0
         result.valueMin = 123.0
         result.valueMax = 789.0
         result.put()
-        self._assert_entry(RunsJSONGenerator._entry_from_build_and_result(build, result), build, result, 456.0,
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
             statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})
 
         result.valueMedian = 345.0  # Median is never used by the frontend.
@@ -303,9 +304,35 @@ class RunsJSONGeneratorTest(DataStoreTestsBase):
         result.valueMin = 123.0
         result.valueMax = 789.0
         result.put()
-        self._assert_entry(RunsJSONGenerator._entry_from_build_and_result(build, result), build, result, 456.0,
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
             statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})
 
+    def test_chart_params_with_value(self):
+        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
+        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
+        some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
+
+        start_time = datetime(2011, 2, 21, 12, 0, 0)
+        end_time = datetime(2011, 2, 28, 12, 0, 0)
+        results = self._create_results(some_branch, some_platform, some_builder, 'some-test',
+            [50.0, 51.0, 52.0, 49.0, 48.0, 51.9, 50.7, 51.1],
+            [start_time + timedelta(day) for day in range(0, 8)])
+
+        # Use int despite of its impreciseness since tests may fail due to rounding errors otherwise.
+        def split_as_int(string):
+            return [int(float(value)) for value in string.split(',')]
+
+        params = Runs(some_branch, some_platform, 'some-test').chart_params(7, end_time)
+        self.assertEqual(params['chxl'], '0:|Feb 21|Feb 22|Feb 23|Feb 24|Feb 25|Feb 26|Feb 27|Feb 28')
+        self.assertEqual(split_as_int(params['chxr']), [1, 0, 57, int(52 * 1.1 / 5 + 0.5)])
+        x_min, x_max, y_min, y_max = split_as_int(params['chds'])
+        self.assertEqual(datetime.fromtimestamp(x_min), start_time)
+        self.assertEqual(datetime.fromtimestamp(x_max), end_time)
+        self.assertEqual(y_min, 0)
+        self.assertEqual(y_max, int(52 * 1.1))
+        self.assertEqual(split_as_int(params['chg']), [int(100 / 7), int(52 * 1.1 / 5), 0, 0])
+
+
 
 if __name__ == '__main__':
     unittest.main()
index 0d784539c51cc58b909e7eace3c1b5f42bd3515e..2553663df85bb73f81c72e602e86143f24dc6b6b 100644 (file)
@@ -26,8 +26,10 @@ from admin_handlers import MergeTestsHandler
 from controller import CachedDashboardHandler
 from controller import CachedManifestHandler
 from controller import CachedRunsHandler
+from controller import DashboardImageHandler
 from controller import DashboardUpdateHandler
 from controller import ManifestUpdateHandler
+from controller import RunsChartHandler
 from controller import RunsUpdateHandler
 from create_handler import CreateHandler
 from report_handler import ReportHandler
@@ -41,6 +43,7 @@ routes = [
     ('/admin/report-logs/?', ReportLogsHandler),
     ('/admin/create/(.*)', CreateHandler),
     (r'/admin/([A-Za-z\-]*)', AdminDashboardHandler),
+
     ('/api/user/is-admin', IsAdminHandler),
     ('/api/test/?', CachedManifestHandler),
     ('/api/test/update', ManifestUpdateHandler),
@@ -48,9 +51,11 @@ routes = [
     ('/api/test/report/process', ReportProcessHandler),
     ('/api/test/runs/?', CachedRunsHandler),
     ('/api/test/runs/update', RunsUpdateHandler),
+    ('/api/test/runs/chart', RunsChartHandler),
     ('/api/test/dashboard/?', CachedDashboardHandler),
     ('/api/test/dashboard/update', DashboardUpdateHandler),
-]
+
+    ('/images/dashboard/flot-(\d+)-(\d+)-(\d+)_(\d+).png', DashboardImageHandler)]
 
 
 def main():
index f9bb584a9728b0d3de49c18f768858cbadb8d632..6a54e50557c2677c46e2128dbdd1cefe6d634c92 100644 (file)
@@ -322,3 +322,11 @@ class PersistentCache(db.Model):
             return None
         memcache.set(name, cache.value)
         return cache.value
+
+
+class DashboardImage(db.Model):
+    image = db.BlobProperty(required=True)
+
+    @staticmethod
+    def key_name(branch_id, platform_id, test_id, display_days):
+        return '%d:%d:%d:%d' % (branch_id, platform_id, test_id, display_days)