Turn Runs class into a proper model to implement incremental JSON update
authorrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 8 Mar 2012 04:27:19 +0000 (04:27 +0000)
committerrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 8 Mar 2012 04:27:19 +0000 (04:27 +0000)
https://bugs.webkit.org/show_bug.cgi?id=80364

Reviewed by Hajime Morita.

Move Runs from json_generators.py to models.py and turn it into a proper Model.

Now most JSON responses are held by Runs objects instead of PersistentCache.
Added some tests around update_or_insert and json_by_ids to test PersistentCache-like
behavior but there should be no user-visible behavioral differences.

* Websites/webkit-perf.appspot.com/controller.py:
(RunsUpdateHandler.post):
(CachedRunsHandler.get):
(RunsChartHandler.post):
* Websites/webkit-perf.appspot.com/json_generators.py:
(ManifestJSONGenerator.value):
* Websites/webkit-perf.appspot.com/json_generators_unittest.py:
(ManifestJSONGeneratorTest.test_value_two_tests):
* Websites/webkit-perf.appspot.com/models.py:
(Test):
(Runs):
(Runs._generate_runs):
(Runs._entry_from_build_and_result):
(Runs._key_name):
(Runs.update_or_insert):
(Runs.json_by_ids):
(Runs.to_json):
(Runs.chart_params):
* Websites/webkit-perf.appspot.com/models_unittest.py:
(RunsTest):
(RunsTest.setUp):
(RunsTest._create_results):
(RunsTest.test_generate_runs):
(RunsTest.test_update_or_insert):
(RunsTest.test_json_by_ids):
(RunsTest.test_to_json_without_results):
(RunsTest.test_to_json_with_results):
(RunsTest._assert_entry):
(RunsTest.test_run_from_build_and_result):
(RunsTest.test_run_from_build_and_result.create_build):
(RunsTest.test_chart_params_with_value):
(RunsTest.test_chart_params_with_value.split_as_int):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@110142 268f45cc-cd09-0410-ab3c-d52691b4dbfc

ChangeLog
Websites/webkit-perf.appspot.com/controller.py
Websites/webkit-perf.appspot.com/json_generators.py
Websites/webkit-perf.appspot.com/json_generators_unittest.py
Websites/webkit-perf.appspot.com/models.py
Websites/webkit-perf.appspot.com/models_unittest.py

index 7036cdd..518074e 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,49 @@
+2012-03-05  Ryosuke Niwa  <rniwa@webkit.org>
+
+        Turn Runs class into a proper model to implement incremental JSON update
+        https://bugs.webkit.org/show_bug.cgi?id=80364
+
+        Reviewed by Hajime Morita.
+
+        Move Runs from json_generators.py to models.py and turn it into a proper Model.
+
+        Now most JSON responses are held by Runs objects instead of PersistentCache.
+        Added some tests around update_or_insert and json_by_ids to test PersistentCache-like
+        behavior but there should be no user-visible behavioral differences.
+
+        * Websites/webkit-perf.appspot.com/controller.py:
+        (RunsUpdateHandler.post):
+        (CachedRunsHandler.get):
+        (RunsChartHandler.post):
+        * Websites/webkit-perf.appspot.com/json_generators.py:
+        (ManifestJSONGenerator.value):
+        * Websites/webkit-perf.appspot.com/json_generators_unittest.py:
+        (ManifestJSONGeneratorTest.test_value_two_tests):
+        * Websites/webkit-perf.appspot.com/models.py:
+        (Test):
+        (Runs):
+        (Runs._generate_runs):
+        (Runs._entry_from_build_and_result):
+        (Runs._key_name):
+        (Runs.update_or_insert):
+        (Runs.json_by_ids):
+        (Runs.to_json):
+        (Runs.chart_params):
+        * Websites/webkit-perf.appspot.com/models_unittest.py:
+        (RunsTest):
+        (RunsTest.setUp):
+        (RunsTest._create_results):
+        (RunsTest.test_generate_runs):
+        (RunsTest.test_update_or_insert):
+        (RunsTest.test_json_by_ids):
+        (RunsTest.test_to_json_without_results):
+        (RunsTest.test_to_json_with_results):
+        (RunsTest._assert_entry):
+        (RunsTest.test_run_from_build_and_result):
+        (RunsTest.test_run_from_build_and_result.create_build):
+        (RunsTest.test_chart_params_with_value):
+        (RunsTest.test_chart_params_with_value.split_as_int):
+
 2012-03-07  Simon Hausmann  <simon.hausmann@nokia.com>
 
         [Qt] Fix compilation without QtQuick1
index 3436050..71d450b 100644 (file)
@@ -34,12 +34,12 @@ from google.appengine.ext import db
 
 from json_generators import DashboardJSONGenerator
 from json_generators import ManifestJSONGenerator
-from json_generators import Runs
 from models import Branch
 from models import DashboardImage
+from models import PersistentCache
 from models import Platform
+from models import Runs
 from models import Test
-from models import PersistentCache
 from models import model_from_numeric_id
 
 
@@ -116,7 +116,7 @@ class RunsUpdateHandler(webapp2.RequestHandler):
         assert platform
         assert test
 
-        PersistentCache.set_cache(Test.cache_key(test_id, branch_id, platform_id), Runs(branch, platform, test.name).to_json())
+        Runs.update_or_insert(branch, platform, test)
         self.response.out.write('OK')
 
 
@@ -125,10 +125,10 @@ class CachedRunsHandler(webapp2.RequestHandler):
         self.response.headers['Content-Type'] = 'application/json'
 
         test_id, branch_id, platform_id = _get_test_branch_platform_ids(self)
-        runs = PersistentCache.get_cache(Test.cache_key(test_id, branch_id, platform_id))
+        runs = Runs.json_by_ids(branch_id, platform_id, test_id)
         if runs:
             self.response.out.write(runs)
-        else:
+        elif model_from_numeric_id(branch_id, Branch) and model_from_numeric_id(platform_id, Platform) and model_from_numeric_id(test_id, Test):
             schedule_runs_update(test_id, branch_id, platform_id)
 
 
@@ -145,7 +145,7 @@ class RunsChartHandler(webapp2.RequestHandler):
         assert platform
         assert test
 
-        params = Runs(branch, platform, test.name).chart_params(display_days)
+        params = Runs.update_or_insert(branch, platform, test).chart_params(display_days)
         dashboard_chart_file = urllib.urlopen('http://chart.googleapis.com/chart', urllib.urlencode(params))
 
         DashboardImage.create(branch.id, platform.id, test.id, display_days, dashboard_chart_file.read())
index 4e8b196..84dc1bb 100644 (file)
@@ -118,100 +118,3 @@ class ManifestJSONGenerator(JSONGeneratorBase):
 
     def value(self):
         return {'branchMap': self._branch_map, 'platformMap': self._platform_map, 'testMap': self._test_map}
-
-
-# FIXME: This isn't a JSON generator anymore. We should move it elsewhere or rename the file.
-class Runs(JSONGeneratorBase):
-    def __init__(self, branch, platform, test_name):
-        self._branch = branch
-        self._platform = platform
-        self._test_name = test_name
-
-    @staticmethod
-    def _generate_runs(branch, platform, test_name):
-        builds = Build.all()
-        builds.filter('branch =', branch)
-        builds.filter('platform =', platform)
-
-        for build in builds:
-            results = TestResult.all()
-            results.filter('name =', test_name)
-            results.filter('build =', build)
-            for result in results:
-                yield build, result
-        raise StopIteration
-
-    @staticmethod
-    def _entry_from_build_and_result(build, result):
-        builder_id = build.builder.key().id()
-        timestamp = mktime(build.timestamp.timetuple())
-        statistics = None
-        supplementary_revisions = None
-
-        if result.valueStdev != None and result.valueMin != None and result.valueMax != None:
-            statistics = {'stdev': result.valueStdev, 'min': result.valueMin, 'max': result.valueMax}
-
-        if build.chromiumRevision != None:
-            supplementary_revisions = {'Chromium': build.chromiumRevision}
-
-        return [result.key().id(),
-            [build.key().id(), build.buildNumber, build.revision, supplementary_revisions],
-            timestamp, result.value, 0,  # runNumber
-            [],  # annotations
-            builder_id, statistics]
-
-    def value(self):
-        _test_runs = []
-        _averages = {}
-        values = []
-
-        for build, result in Runs._generate_runs(self._branch, self._platform, self._test_name):
-            _test_runs.append(Runs._entry_from_build_and_result(build, result))
-            # FIXME: Calculate the average. In practice, we wouldn't have more than one value for a given revision.
-            _averages[build.revision] = result.value
-            values.append(result.value)
-
-        _min = min(values) if values else None
-        _max = max(values) if values else None
-
-        return {
-            'test_runs': _test_runs,
-            'averages': _averages,
-            'min': _min,
-            'max': _max,
-            'date_range': None,  # Never used by common.js.
-            'stat': 'ok'}
-
-    def chart_params(self, display_days, now=datetime.now().replace(hour=12, minute=0, second=0, microsecond=0)):
-        chart_data_x = []
-        chart_data_y = []
-        end_time = now
-        start_timestamp = mktime((end_time - timedelta(display_days)).timetuple())
-        end_timestamp = mktime(end_time.timetuple())
-
-        for build, result in self._generate_runs(self._branch, self._platform, self._test_name):
-            timestamp = mktime(build.timestamp.timetuple())
-            if timestamp < start_timestamp or timestamp > end_timestamp:
-                continue
-            chart_data_x.append(timestamp)
-            chart_data_y.append(result.value)
-
-        dates = [end_time - timedelta(display_days / 7.0 * (7 - i)) for i in range(0, 8)]
-
-        y_max = max(chart_data_y) * 1.1
-        y_axis_label_step = int(y_max / 5 + 0.5)  # This won't work for decimal numbers
-
-        return {
-            'cht': 'lxy',  # Specify with X and Y coordinates
-            'chxt': 'x,y',  # Display both X and Y axies
-            'chxl': '0:|' + '|'.join([date.strftime('%b %d') for date in dates]),  # X-axis labels
-            'chxr': '1,0,%f,%f' % (int(y_max + 0.5), y_axis_label_step),  # Y-axis range: min=0, max, step
-            'chds': '%f,%f,%f,%f' % (start_timestamp, end_timestamp, 0, y_max),  # X, Y data range
-            'chxs': '1,676767,11.167,0,l,676767',  # Y-axis label: 1,color,font-size,centerd on tick,axis line/no ticks, tick color
-            'chs': '360x240',  # Image size: 360px by 240px
-            'chco': 'ff0000',  # Plot line color
-            'chg': '%f,20,0,0' % (100 / (len(dates) - 1)),  # X, Y grid line step sizes - max is 100.
-            'chls': '3',  # Line thickness
-            'chf': 'bg,s,eff6fd',  # Transparent background
-            'chd': 't:' + ','.join([str(x) for x in chart_data_x]) + '|' + ','.join([str(y) for y in chart_data_y]),  # X, Y data
-        }
index 3c819bf..adef9f6 100644 (file)
@@ -37,7 +37,6 @@ from datetime import timedelta
 from json_generators import JSONGeneratorBase
 from json_generators import DashboardJSONGenerator
 from json_generators import ManifestJSONGenerator
-from json_generators import Runs
 from models_unittest import DataStoreTestsBase
 from models import Branch
 from models import Build
@@ -186,162 +185,5 @@ class ManifestJSONGeneratorTest(DataStoreTestsBase):
             other_test.id: {'name': other_test.name, 'branchIds': [some_branch.id], 'platformIds': [some_platform.id]}})
 
 
-class RunsTest(DataStoreTestsBase):
-    def _create_results(self, branch, platform, builder, test_name, values, timestamps=None):
-        results = []
-        for i, value in enumerate(values):
-            build = Build(branch=branch, platform=platform, builder=builder,
-                buildNumber=i, revision=100 + i, timestamp=timestamps[i] if timestamps else datetime.now())
-            build.put()
-            result = TestResult(name=test_name, build=build, value=value)
-            result.put()
-            results.append(result)
-        return results
-
-    def test_generate_runs(self):
-        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
-        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
-        some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
-
-        results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0])
-        last_i = 0
-        for i, (build, result) in enumerate(Runs._generate_runs(some_branch, some_platform, "some-test")):
-            self.assertEqual(build.buildNumber, i)
-            self.assertEqual(build.revision, 100 + i)
-            self.assertEqual(result.name, 'some-test')
-            self.assertEqual(result.value, results[i].value)
-            last_i = i
-        self.assertTrue(last_i + 1, len(results))
-
-    def test_value_without_results(self):
-        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
-        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
-        self.assertThereIsNoInstanceOf(Test)
-        self.assertThereIsNoInstanceOf(TestResult)
-        self.assertEqual(Runs(some_branch, some_platform, 'some-test').value(), {
-            'test_runs': [],
-            'averages': {},
-            'min': None,
-            'max': None,
-            'date_range': None,
-            'stat': 'ok'})
-
-    def test_value_with_results(self):
-        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
-        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
-        some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
-        results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0])
-
-        value = Runs(some_branch, some_platform, 'some-test').value()
-        self.assertEqualUnorderedList(value.keys(), ['test_runs', 'averages', 'min', 'max', 'date_range', 'stat'])
-        self.assertEqual(value['stat'], 'ok')
-        self.assertEqual(value['min'], 48.0)
-        self.assertEqual(value['max'], 52.0)
-        self.assertEqual(value['date_range'], None)  # date_range is never given
-
-        self.assertEqual(len(value['test_runs']), len(results))
-        for i, run in enumerate(value['test_runs']):
-            result = results[i]
-            self.assertEqual(run[0], result.key().id())
-            self.assertEqual(run[1][1], i)  # Build number
-            self.assertEqual(run[1][2], 100 + i)  # Revision
-            self.assertEqual(run[1][3], None)  # Supplementary revision
-            self.assertEqual(run[3], result.value)
-            self.assertEqual(run[6], some_builder.key().id())
-            self.assertEqual(run[7], None)  # Statistics
-
-    def _assert_entry(self, entry, build, result, value, statistics=None, supplementary_revisions=None):
-        entry = entry[:]
-        entry[2] = None  # timestamp
-        self.assertEqual(entry, [result.key().id(), [build.key().id(), build.buildNumber, build.revision, supplementary_revisions],
-            None,  # timestamp
-            value, 0,  # runNumber
-            [],  # annotations
-            build.builder.key().id(), statistics])
-
-    def test_run_from_build_and_result(self):
-        branch = Branch.create_if_possible('some-branch', 'Some Branch')
-        platform = Platform.create_if_possible('some-platform', 'Some Platform')
-        builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
-        test_name = ' some-test'
-
-        def create_build(build_number, revision):
-            timestamp = datetime.now().replace(microsecond=0)
-            build = Build(branch=branch, platform=platform, builder=builder, buildNumber=build_number,
-                revision=revision, timestamp=timestamp)
-            build.put()
-            return build
-
-        build = create_build(1, 101)
-        result = TestResult(name=test_name, value=123.0, build=build)
-        result.put()
-        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 123.0)
-
-        build = create_build(2, 102)
-        result = TestResult(name=test_name, value=456.0, valueMedian=789.0, build=build)
-        result.put()
-        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)
-
-        result.valueStdev = 7.0
-        result.put()
-        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)
-
-        result.valueStdev = None
-        result.valueMin = 123.0
-        result.valueMax = 789.0
-        result.put()
-        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)
-
-        result.valueStdev = 8.0
-        result.valueMin = 123.0
-        result.valueMax = 789.0
-        result.put()
-        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
-            statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})
-
-        result.valueMedian = 345.0  # Median is never used by the frontend.
-        result.valueStdev = 8.0
-        result.valueMin = 123.0
-        result.valueMax = 789.0
-        result.put()
-        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
-            statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})
-
-    def test_chart_params_with_value(self):
-        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
-        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
-        some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
-
-        start_time = datetime(2011, 2, 21, 12, 0, 0)
-        end_time = datetime(2011, 2, 28, 12, 0, 0)
-        results = self._create_results(some_branch, some_platform, some_builder, 'some-test',
-            [50.0, 51.0, 52.0, 49.0, 48.0, 51.9, 50.7, 51.1],
-            [start_time + timedelta(day) for day in range(0, 8)])
-
-        # Use int despite of its impreciseness since tests may fail due to rounding errors otherwise.
-        def split_as_int(string):
-            return [int(float(value)) for value in string.split(',')]
-
-        params = Runs(some_branch, some_platform, 'some-test').chart_params(7, end_time)
-        self.assertEqual(params['chxl'], '0:|Feb 21|Feb 22|Feb 23|Feb 24|Feb 25|Feb 26|Feb 27|Feb 28')
-        self.assertEqual(split_as_int(params['chxr']), [1, 0, 57, int(52 * 1.1 / 5 + 0.5)])
-        x_min, x_max, y_min, y_max = split_as_int(params['chds'])
-        self.assertEqual(datetime.fromtimestamp(x_min), start_time)
-        self.assertEqual(datetime.fromtimestamp(x_max), end_time)
-        self.assertEqual(y_min, 0)
-        self.assertEqual(y_max, int(52 * 1.1))
-        self.assertEqual(split_as_int(params['chg']), [int(100 / 7), 20, 0, 0])
-
-        params = Runs(some_branch, some_platform, 'some-test').chart_params(14, end_time)
-        self.assertEqual(params['chxl'], '0:|Feb 14|Feb 16|Feb 18|Feb 20|Feb 22|Feb 24|Feb 26|Feb 28')
-        self.assertEqual(split_as_int(params['chxr']), [1, 0, 57, int(52 * 1.1 / 5 + 0.5)])
-        x_min, x_max, y_min, y_max = split_as_int(params['chds'])
-        self.assertEqual(datetime.fromtimestamp(x_min), datetime(2011, 2, 14, 12, 0, 0))
-        self.assertEqual(datetime.fromtimestamp(x_max), end_time)
-        self.assertEqual(y_min, 0)
-        self.assertEqual(y_max, int(52 * 1.1))
-        self.assertEqual(split_as_int(params['chg']), [int(100 / 7), 20, 0, 0])
-
-
 if __name__ == '__main__':
     unittest.main()
index c4433ac..51f029f 100644 (file)
@@ -150,10 +150,6 @@ class Test(db.Model):
     platforms = db.ListProperty(db.Key)
 
     @staticmethod
-    def cache_key(test_id, branch_id, platform_id):
-        return 'runs:%d,%d,%d' % (test_id, branch_id, platform_id)
-
-    @staticmethod
     def update_or_insert(test_name, branch, platform):
         existing_test = [None]
 
@@ -317,6 +313,127 @@ class PersistentCache(db.Model):
         return cache.value
 
 
+class Runs(db.Model):
+    branch = db.ReferenceProperty(Branch, required=True, collection_name='runs_branch')
+    platform = db.ReferenceProperty(Platform, required=True, collection_name='runs_platform')
+    test = db.ReferenceProperty(Test, required=True, collection_name='runs_test')
+    json_runs = db.TextProperty()
+    json_averages = db.TextProperty()
+    json_min = db.FloatProperty()
+    json_max = db.FloatProperty()
+
+    @staticmethod
+    def _generate_runs(branch, platform, test_name):
+        builds = Build.all()
+        builds.filter('branch =', branch)
+        builds.filter('platform =', platform)
+
+        for build in builds:
+            results = TestResult.all()
+            results.filter('name =', test_name)
+            results.filter('build =', build)
+            for result in results:
+                yield build, result
+        raise StopIteration
+
+    @staticmethod
+    def _entry_from_build_and_result(build, result):
+        builder_id = build.builder.key().id()
+        timestamp = mktime(build.timestamp.timetuple())
+        statistics = None
+        supplementary_revisions = None
+
+        if result.valueStdev != None and result.valueMin != None and result.valueMax != None:
+            statistics = {'stdev': result.valueStdev, 'min': result.valueMin, 'max': result.valueMax}
+
+        if build.chromiumRevision != None:
+            supplementary_revisions = {'Chromium': build.chromiumRevision}
+
+        return [result.key().id(),
+            [build.key().id(), build.buildNumber, build.revision, supplementary_revisions],
+            timestamp, result.value, 0,  # runNumber
+            [],  # annotations
+            builder_id, statistics]
+
+    @staticmethod
+    def _key_name(branch_id, platform_id, test_id):
+        return 'runs:%d,%d,%d' % (test_id, branch_id, platform_id)
+
+    @classmethod
+    def update_or_insert(cls, branch, platform, test):
+        test_runs = []
+        averages = {}
+        values = []
+
+        for build, result in cls._generate_runs(branch, platform, test.name):
+            test_runs.append(cls._entry_from_build_and_result(build, result))
+            # FIXME: Calculate the average. In practice, we wouldn't have more than one value for a given revision.
+            averages[build.revision] = result.value
+            values.append(result.value)
+
+        min_value = min(values) if values else None
+        max_value = max(values) if values else None
+
+        key_name = cls._key_name(branch.id, platform.id, test.id)
+        runs = Runs(key_name=key_name, branch=branch, platform=platform, test=test,
+            json_runs=json.dumps(test_runs)[1:-1], json_averages=json.dumps(averages)[1:-1], json_min=min_value, json_max=max_value)
+        runs.put()
+        memcache.set(key_name, runs.to_json())
+        return runs
+
+    @classmethod
+    def json_by_ids(cls, branch_id, platform_id, test_id):
+        key_name = cls._key_name(branch_id, platform_id, test_id)
+        runs_json = memcache.get(key_name)
+        if not runs_json:
+            runs = cls.get_by_key_name(key_name)
+            if not runs:
+                return None
+            runs_json = runs.to_json()
+            memcache.set(key_name, runs_json)
+        return runs_json
+
+    def to_json(self):
+        # date_range is never used by common.js.
+        return '{"test_runs": [%s], "averages": {%s}, "min": %s, "max": %s, "date_range": null, "stat": "ok"}' % (self.json_runs,
+            self.json_averages, str(self.json_min) if self.json_min else 'null', str(self.json_max) if self.json_max else 'null')
+
+    # FIXME: Use data in JSON to compute values to avoid iterating through the datastore.
+    def chart_params(self, display_days, now=datetime.now().replace(hour=12, minute=0, second=0, microsecond=0)):
+        chart_data_x = []
+        chart_data_y = []
+        end_time = now
+        start_timestamp = mktime((end_time - timedelta(display_days)).timetuple())
+        end_timestamp = mktime(end_time.timetuple())
+
+        for build, result in self._generate_runs(self.branch, self.platform, self.test.name):
+            timestamp = mktime(build.timestamp.timetuple())
+            if timestamp < start_timestamp or timestamp > end_timestamp:
+                continue
+            chart_data_x.append(timestamp)
+            chart_data_y.append(result.value)
+
+        dates = [end_time - timedelta(display_days / 7.0 * (7 - i)) for i in range(0, 8)]
+
+        y_max = max(chart_data_y) * 1.1
+        y_axis_label_step = int(y_max / 5 + 0.5)  # This won't work for decimal numbers
+
+        return {
+            'cht': 'lxy',  # Specify with X and Y coordinates
+            'chxt': 'x,y',  # Display both X and Y axies
+            'chxl': '0:|' + '|'.join([date.strftime('%b %d') for date in dates]),  # X-axis labels
+            'chxr': '1,0,%f,%f' % (int(y_max + 0.5), y_axis_label_step),  # Y-axis range: min=0, max, step
+            'chds': '%f,%f,%f,%f' % (start_timestamp, end_timestamp, 0, y_max),  # X, Y data range
+            'chxs': '1,676767,11.167,0,l,676767',  # Y-axis label: 1,color,font-size,centerd on tick,axis line/no ticks, tick color
+            'chs': '360x240',  # Image size: 360px by 240px
+            'chco': 'ff0000',  # Plot line color
+            'chg': '%f,20,0,0' % (100 / (len(dates) - 1)),  # X, Y grid line step sizes - max is 100.
+            'chls': '3',  # Line thickness
+            'chf': 'bg,s,eff6fd',  # Transparent background
+            'chd': 't:' + ','.join([str(x) for x in chart_data_x]) + '|' + ','.join([str(y) for y in chart_data_y]),  # X, Y data
+        }
+
+
 class DashboardImage(db.Model):
     image = db.BlobProperty(required=True)
     createdAt = db.DateTimeProperty(required=True, auto_now=True)
index 57e0554..7b5f8f1 100644 (file)
@@ -27,6 +27,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import json
 import models
 import unittest
 
@@ -45,6 +46,7 @@ from models import Test
 from models import TestResult
 from models import ReportLog
 from models import PersistentCache
+from models import Runs
 from models import DashboardImage
 from models import create_in_transaction_with_numeric_id_holder
 from models import delete_model_with_numeric_id_holder
@@ -520,6 +522,220 @@ class PersistentCacheTests(DataStoreTestsBase):
         self.assertEqual(PersistentCache.get_cache('some-cache'), 'some data')
 
 
+class RunsTest(DataStoreTestsBase):
+    def setUp(self):
+        super(RunsTest, self).setUp()
+        self.testbed.init_memcache_stub()
+
+    def _create_results(self, branch, platform, builder, test_name, values, timestamps=None):
+        results = []
+        for i, value in enumerate(values):
+            build = Build(branch=branch, platform=platform, builder=builder,
+                buildNumber=i, revision=100 + i, timestamp=timestamps[i] if timestamps else datetime.now())
+            build.put()
+            result = TestResult(name=test_name, build=build, value=value)
+            result.put()
+            results.append(result)
+        return results
+
+    def test_generate_runs(self):
+        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
+        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
+        some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
+        some_test = Test.update_or_insert('some-test', some_branch, some_platform)
+
+        results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0])
+        last_i = 0
+        for i, (build, result) in enumerate(Runs._generate_runs(some_branch, some_platform, some_test)):
+            self.assertEqual(build.buildNumber, i)
+            self.assertEqual(build.revision, 100 + i)
+            self.assertEqual(result.name, 'some-test')
+            self.assertEqual(result.value, results[i].value)
+            last_i = i
+        self.assertTrue(last_i + 1, len(results))
+
+    def test_update_or_insert(self):
+        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
+        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
+        some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
+        some_test = Test.update_or_insert('some-test', some_branch, some_platform)
+        self.assertThereIsNoInstanceOf(Runs)
+
+        runs = Runs.update_or_insert(some_branch, some_platform, some_test)
+        self.assertOnlyInstance(runs)
+        self.assertEqual(runs.json_runs, '')
+        self.assertEqual(runs.json_averages, '')
+        self.assertEqual(runs.json_min, None)
+        self.assertEqual(runs.json_max, None)
+        old_memcache_value = memcache.get(Runs._key_name(some_branch.id, some_platform.id, some_test.id))
+        self.assertTrue(old_memcache_value)
+
+        runs.delete()
+        self.assertThereIsNoInstanceOf(Runs)
+
+        results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0])
+        runs = Runs.update_or_insert(some_branch, some_platform, some_test)
+        self.assertOnlyInstance(runs)
+        self.assertTrue(runs.json_runs.startswith('[5, [4, 0, 100, null],'))
+        self.assertEqual(runs.json_averages, '"100": 50.0')
+        self.assertEqual(runs.json_min, 50.0)
+        self.assertEqual(runs.json_max, 50.0)
+        self.assertNotEqual(memcache.get(Runs._key_name(some_branch.id, some_platform.id, some_test.id)), old_memcache_value)
+
+    def test_json_by_ids(self):
+        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
+        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
+        some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
+        some_test = Test.update_or_insert('some-test', some_branch, some_platform)
+
+        self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0])
+        runs = Runs.update_or_insert(some_branch, some_platform, some_test)
+        runs_json = runs.to_json()
+
+        key_name = Runs._key_name(some_branch.id, some_platform.id, some_test.id)
+        self.assertEqual(Runs.json_by_ids(some_branch.id, some_platform.id, some_test.id), runs_json)
+        self.assertEqual(memcache.get(key_name), runs_json)
+
+        memcache.set(key_name, 'blah')
+        self.assertEqual(Runs.json_by_ids(some_branch.id, some_platform.id, some_test.id), 'blah')
+
+        memcache.delete(key_name)
+        self.assertEqual(Runs.json_by_ids(some_branch.id, some_platform.id, some_test.id), runs_json)
+        self.assertEqual(memcache.get(key_name), runs_json)
+
+    def test_to_json_without_results(self):
+        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
+        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
+        some_test = Test.update_or_insert('some-test', some_branch, some_platform)
+        self.assertOnlyInstance(some_test)
+        self.assertThereIsNoInstanceOf(TestResult)
+        self.assertEqual(json.loads(Runs.update_or_insert(some_branch, some_platform, some_test).to_json()), {
+            'test_runs': [],
+            'averages': {},
+            'min': None,
+            'max': None,
+            'date_range': None,
+            'stat': 'ok'})
+
+    def test_to_json_with_results(self):
+        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
+        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
+        some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
+        some_test = Test.update_or_insert('some-test', some_branch, some_platform)
+        results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0])
+
+        value = json.loads(Runs.update_or_insert(some_branch, some_platform, some_test).to_json())
+        self.assertEqualUnorderedList(value.keys(), ['test_runs', 'averages', 'min', 'max', 'date_range', 'stat'])
+        self.assertEqual(value['stat'], 'ok')
+        self.assertEqual(value['min'], 48.0)
+        self.assertEqual(value['max'], 52.0)
+        self.assertEqual(value['date_range'], None)  # date_range is never given
+
+        self.assertEqual(len(value['test_runs']), len(results))
+        for i, run in enumerate(value['test_runs']):
+            result = results[i]
+            self.assertEqual(run[0], result.key().id())
+            self.assertEqual(run[1][1], i)  # Build number
+            self.assertEqual(run[1][2], 100 + i)  # Revision
+            self.assertEqual(run[1][3], None)  # Supplementary revision
+            self.assertEqual(run[3], result.value)
+            self.assertEqual(run[6], some_builder.key().id())
+            self.assertEqual(run[7], None)  # Statistics
+
+    def _assert_entry(self, entry, build, result, value, statistics=None, supplementary_revisions=None):
+        entry = entry[:]
+        entry[2] = None  # timestamp
+        self.assertEqual(entry, [result.key().id(), [build.key().id(), build.buildNumber, build.revision, supplementary_revisions],
+            None,  # timestamp
+            value, 0,  # runNumber
+            [],  # annotations
+            build.builder.key().id(), statistics])
+
+    def test_run_from_build_and_result(self):
+        branch = Branch.create_if_possible('some-branch', 'Some Branch')
+        platform = Platform.create_if_possible('some-platform', 'Some Platform')
+        builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
+        test_name = ' some-test'
+
+        def create_build(build_number, revision):
+            timestamp = datetime.now().replace(microsecond=0)
+            build = Build(branch=branch, platform=platform, builder=builder, buildNumber=build_number,
+                revision=revision, timestamp=timestamp)
+            build.put()
+            return build
+
+        build = create_build(1, 101)
+        result = TestResult(name=test_name, value=123.0, build=build)
+        result.put()
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 123.0)
+
+        build = create_build(2, 102)
+        result = TestResult(name=test_name, value=456.0, valueMedian=789.0, build=build)
+        result.put()
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)
+
+        result.valueStdev = 7.0
+        result.put()
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)
+
+        result.valueStdev = None
+        result.valueMin = 123.0
+        result.valueMax = 789.0
+        result.put()
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)
+
+        result.valueStdev = 8.0
+        result.valueMin = 123.0
+        result.valueMax = 789.0
+        result.put()
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
+            statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})
+
+        result.valueMedian = 345.0  # Median is never used by the frontend.
+        result.valueStdev = 8.0
+        result.valueMin = 123.0
+        result.valueMax = 789.0
+        result.put()
+        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
+            statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})
+
+    def test_chart_params_with_value(self):
+        some_branch = Branch.create_if_possible('some-branch', 'Some Branch')
+        some_platform = Platform.create_if_possible('some-platform', 'Some Platform')
+        some_builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
+        some_test = Test.update_or_insert('some-test', some_branch, some_platform)
+
+        start_time = datetime(2011, 2, 21, 12, 0, 0)
+        end_time = datetime(2011, 2, 28, 12, 0, 0)
+        results = self._create_results(some_branch, some_platform, some_builder, 'some-test',
+            [50.0, 51.0, 52.0, 49.0, 48.0, 51.9, 50.7, 51.1],
+            [start_time + timedelta(day) for day in range(0, 8)])
+
+        # Use int despite of its impreciseness since tests may fail due to rounding errors otherwise.
+        def split_as_int(string):
+            return [int(float(value)) for value in string.split(',')]
+
+        params = Runs.update_or_insert(some_branch, some_platform, some_test).chart_params(7, end_time)
+        self.assertEqual(params['chxl'], '0:|Feb 21|Feb 22|Feb 23|Feb 24|Feb 25|Feb 26|Feb 27|Feb 28')
+        self.assertEqual(split_as_int(params['chxr']), [1, 0, 57, int(52 * 1.1 / 5 + 0.5)])
+        x_min, x_max, y_min, y_max = split_as_int(params['chds'])
+        self.assertEqual(datetime.fromtimestamp(x_min), start_time)
+        self.assertEqual(datetime.fromtimestamp(x_max), end_time)
+        self.assertEqual(y_min, 0)
+        self.assertEqual(y_max, int(52 * 1.1))
+        self.assertEqual(split_as_int(params['chg']), [int(100 / 7), 20, 0, 0])
+
+        params = Runs.update_or_insert(some_branch, some_platform, some_test).chart_params(14, end_time)
+        self.assertEqual(params['chxl'], '0:|Feb 14|Feb 16|Feb 18|Feb 20|Feb 22|Feb 24|Feb 26|Feb 28')
+        self.assertEqual(split_as_int(params['chxr']), [1, 0, 57, int(52 * 1.1 / 5 + 0.5)])
+        x_min, x_max, y_min, y_max = split_as_int(params['chds'])
+        self.assertEqual(datetime.fromtimestamp(x_min), datetime(2011, 2, 14, 12, 0, 0))
+        self.assertEqual(datetime.fromtimestamp(x_max), end_time)
+        self.assertEqual(y_min, 0)
+        self.assertEqual(y_max, int(52 * 1.1))
+        self.assertEqual(split_as_int(params['chg']), [int(100 / 7), 20, 0, 0])
+
+
 class DashboardImageTests(DataStoreTestsBase):
     def setUp(self):
         super(DashboardImageTests, self).setUp()