From: rniwa@webkit.org Date: Thu, 8 Mar 2012 22:58:54 +0000 (+0000) Subject: perf-o-matic should incrementally update JSON responses X-Git-Url: https://git.webkit.org/?p=WebKit-https.git;a=commitdiff_plain;h=f6753c1a9443a9dda534d875ff8d2e3a9f16d60e perf-o-matic should incrementally update JSON responses https://bugs.webkit.org/show_bug.cgi?id=79898 Reviewed by Eric Seidel. Now that Runs object store test_runs and averages separately, we can update JSON data incrementally without degrading values. Also fixed the order of arguments passed to DashboardImage.needs_update in schedule_runs_update. This bug had caused all chart images to be updated on every new report. * Websites/webkit-perf.appspot.com/controller.py: (schedule_runs_update): * Websites/webkit-perf.appspot.com/models.py: (Runs.update_or_insert): (Runs.update_incrementally): (Runs): (Runs.get_by_objects): * Websites/webkit-perf.appspot.com/models_unittest.py: (RunsTest._create_results): (RunsTest.test_generate_runs): (RunsTest.test_update_or_insert): (RunsTest.test_update_incrementally): (RunsTest.test_to_json_with_results): * Websites/webkit-perf.appspot.com/report_process_handler.py: (ReportProcessHandler.post): git-svn-id: https://svn.webkit.org/repository/webkit/trunk@110210 268f45cc-cd09-0410-ab3c-d52691b4dbfc --- diff --git a/ChangeLog b/ChangeLog index c3bdb608b297..3df5af6166f3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,33 @@ +2012-03-08 Ryosuke Niwa + + perf-o-matic should incrementally update JSON responses + https://bugs.webkit.org/show_bug.cgi?id=79898 + + Reviewed by Eric Seidel. + + Now that Runs object store test_runs and averages separately, + we can update JSON data incrementally without degrading values. + + Also fixed the order of arguments passed to DashboardImage.needs_update + in schedule_runs_update. This bug had caused all chart images to be + updated on every new report. + + * Websites/webkit-perf.appspot.com/controller.py: + (schedule_runs_update): + * Websites/webkit-perf.appspot.com/models.py: + (Runs.update_or_insert): + (Runs.update_incrementally): + (Runs): + (Runs.get_by_objects): + * Websites/webkit-perf.appspot.com/models_unittest.py: + (RunsTest._create_results): + (RunsTest.test_generate_runs): + (RunsTest.test_update_or_insert): + (RunsTest.test_update_incrementally): + (RunsTest.test_to_json_with_results): + * Websites/webkit-perf.appspot.com/report_process_handler.py: + (ReportProcessHandler.post): + 2012-03-08 Max Vujovic Add a method to window.internals to enable testing of inspector highlight rects diff --git a/Websites/webkit-perf.appspot.com/controller.py b/Websites/webkit-perf.appspot.com/controller.py index 71d450b1c1e0..9623d9384b42 100644 --- a/Websites/webkit-perf.appspot.com/controller.py +++ b/Websites/webkit-perf.appspot.com/controller.py @@ -85,10 +85,11 @@ class CachedDashboardHandler(webapp2.RequestHandler): schedule_dashboard_update() -def schedule_runs_update(test_id, branch_id, platform_id): - taskqueue.add(url='/api/test/runs/update', params={'id': test_id, 'branchid': branch_id, 'platformid': platform_id}) +def schedule_runs_update(test_id, branch_id, platform_id, regenerate_runs=True): + if regenerate_runs: + taskqueue.add(url='/api/test/runs/update', params={'id': test_id, 'branchid': branch_id, 'platformid': platform_id}) for display_days in [7, 30, 90, 365]: - if DashboardImage.needs_update(branch_id, test_id, platform_id, display_days): + if DashboardImage.needs_update(branch_id, platform_id, test_id, display_days): taskqueue.add(url='/api/test/runs/chart', params={'id': test_id, 'branchid': branch_id, 'platformid': platform_id, 'displayDays': display_days}) diff --git a/Websites/webkit-perf.appspot.com/models.py b/Websites/webkit-perf.appspot.com/models.py index 51f029f31daa..df7ab7ef39f7 100644 --- a/Websites/webkit-perf.appspot.com/models.py +++ b/Websites/webkit-perf.appspot.com/models.py @@ -361,26 +361,45 @@ class Runs(db.Model): @classmethod def update_or_insert(cls, branch, platform, test): - test_runs = [] - averages = {} - values = [] + key_name = cls._key_name(branch.id, platform.id, test.id) + runs = Runs(key_name=key_name, branch=branch, platform=platform, test=test, json_runs='', json_averages='') for build, result in cls._generate_runs(branch, platform, test.name): - test_runs.append(cls._entry_from_build_and_result(build, result)) - # FIXME: Calculate the average. In practice, we wouldn't have more than one value for a given revision. - averages[build.revision] = result.value - values.append(result.value) - - min_value = min(values) if values else None - max_value = max(values) if values else None + runs.update_incrementally(build, result, check_duplicates_and_save=False) - key_name = cls._key_name(branch.id, platform.id, test.id) - runs = Runs(key_name=key_name, branch=branch, platform=platform, test=test, - json_runs=json.dumps(test_runs)[1:-1], json_averages=json.dumps(averages)[1:-1], json_min=min_value, json_max=max_value) runs.put() memcache.set(key_name, runs.to_json()) return runs + def update_incrementally(self, build, result, check_duplicates_and_save=True): + new_entry = Runs._entry_from_build_and_result(build, result) + + # Check for duplicate entries + if check_duplicates_and_save: + revision_is_in_runs = str(build.revision) in json.loads('{' + self.json_averages + '}') + if revision_is_in_runs and new_entry[1] in [entry[1] for entry in json.loads('[' + self.json_runs + ']')]: + return + + if self.json_runs: + self.json_runs += ',' + + if self.json_averages: + self.json_averages += ',' + + self.json_runs += json.dumps(new_entry) + # FIXME: Calculate the average. In practice, we wouldn't have more than one value for a given revision. + self.json_averages += '"%d": %f' % (build.revision, result.value) + self.json_min = min(self.json_min, result.value) if self.json_min != None else result.value + self.json_max = max(self.json_max, result.value) + + if check_duplicates_and_save: + self.put() + memcache.set(self.key().name(), self.to_json()) + + @staticmethod + def get_by_objects(branch, platform, test): + return Runs.get_by_key_name(Runs._key_name(branch.id, platform.id, test.id)) + @classmethod def json_by_ids(cls, branch_id, platform_id, test_id): key_name = cls._key_name(branch_id, platform_id, test_id) diff --git a/Websites/webkit-perf.appspot.com/models_unittest.py b/Websites/webkit-perf.appspot.com/models_unittest.py index 7b5f8f1aa15c..fa4856b745e3 100644 --- a/Websites/webkit-perf.appspot.com/models_unittest.py +++ b/Websites/webkit-perf.appspot.com/models_unittest.py @@ -527,16 +527,18 @@ class RunsTest(DataStoreTestsBase): super(RunsTest, self).setUp() self.testbed.init_memcache_stub() - def _create_results(self, branch, platform, builder, test_name, values, timestamps=None): + def _create_results(self, branch, platform, builder, test_name, values, timestamps=None, starting_revision=100): + builds = [] results = [] for i, value in enumerate(values): build = Build(branch=branch, platform=platform, builder=builder, - buildNumber=i, revision=100 + i, timestamp=timestamps[i] if timestamps else datetime.now()) + buildNumber=i, revision=starting_revision + i, timestamp=timestamps[i] if timestamps else datetime.now()) build.put() result = TestResult(name=test_name, build=build, value=value) result.put() + builds.append(build) results.append(result) - return results + return builds, results def test_generate_runs(self): some_branch = Branch.create_if_possible('some-branch', 'Some Branch') @@ -544,7 +546,7 @@ class RunsTest(DataStoreTestsBase): some_builder = Builder.get(Builder.create('some-builder', 'Some Builder')) some_test = Test.update_or_insert('some-test', some_branch, some_platform) - results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0]) + builds, results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0]) last_i = 0 for i, (build, result) in enumerate(Runs._generate_runs(some_branch, some_platform, some_test)): self.assertEqual(build.buildNumber, i) @@ -573,15 +575,47 @@ class RunsTest(DataStoreTestsBase): runs.delete() self.assertThereIsNoInstanceOf(Runs) - results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0]) + builds, results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0]) runs = Runs.update_or_insert(some_branch, some_platform, some_test) self.assertOnlyInstance(runs) self.assertTrue(runs.json_runs.startswith('[5, [4, 0, 100, null],')) - self.assertEqual(runs.json_averages, '"100": 50.0') + self.assertEqual(json.loads('{' + runs.json_averages + '}'), {"100": 50.0}) self.assertEqual(runs.json_min, 50.0) self.assertEqual(runs.json_max, 50.0) self.assertNotEqual(memcache.get(Runs._key_name(some_branch.id, some_platform.id, some_test.id)), old_memcache_value) + def test_update_incrementally(self): + some_branch = Branch.create_if_possible('some-branch', 'Some Branch') + some_platform = Platform.create_if_possible('some-platform', 'Some Platform') + some_builder = Builder.get(Builder.create('some-builder', 'Some Builder')) + some_test = Test.update_or_insert('some-test', some_branch, some_platform) + self.assertThereIsNoInstanceOf(Runs) + + timestamps = [datetime.now(), datetime.now()] + builds, results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 52.0], timestamps) + runs = Runs.update_or_insert(some_branch, some_platform, some_test) + self.assertOnlyInstance(runs) + self.assertEqual(json.loads('[' + runs.json_runs + ']'), + [[5, [4, 0, 100, None], mktime(timestamps[0].timetuple()), 50.0, 0, [], None, None], + [7, [6, 1, 101, None], mktime(timestamps[1].timetuple()), 52.0, 0, [], None, None]]) + self.assertEqual(json.loads('{' + runs.json_averages + '}'), {"100": 50.0, "101": 52.0}) + self.assertEqual(runs.json_min, 50.0) + self.assertEqual(runs.json_max, 52.0) + + timestamps.append(datetime.now()) + builds, results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [48.0], + timestamps[2:], starting_revision=102) + runs.update_incrementally(builds[0], results[0]) + + self.assertOnlyInstance(runs) + self.assertEqual(json.loads('[' + runs.json_runs + ']'), + [[5, [4, 0, 100, None], mktime(timestamps[0].timetuple()), 50.0, 0, [], None, None], + [7, [6, 1, 101, None], mktime(timestamps[1].timetuple()), 52.0, 0, [], None, None], + [9, [8, 0, 102, None], mktime(timestamps[2].timetuple()), 48.0, 0, [], None, None]]) + self.assertEqual(json.loads('{' + runs.json_averages + '}'), {"100": 50.0, "101": 52.0, "102": 48.0}) + self.assertEqual(runs.json_min, 48.0) + self.assertEqual(runs.json_max, 52.0) + def test_json_by_ids(self): some_branch = Branch.create_if_possible('some-branch', 'Some Branch') some_platform = Platform.create_if_possible('some-platform', 'Some Platform') @@ -622,7 +656,7 @@ class RunsTest(DataStoreTestsBase): some_platform = Platform.create_if_possible('some-platform', 'Some Platform') some_builder = Builder.get(Builder.create('some-builder', 'Some Builder')) some_test = Test.update_or_insert('some-test', some_branch, some_platform) - results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0]) + builds, results = self._create_results(some_branch, some_platform, some_builder, 'some-test', [50.0, 51.0, 52.0, 49.0, 48.0]) value = json.loads(Runs.update_or_insert(some_branch, some_platform, some_test).to_json()) self.assertEqualUnorderedList(value.keys(), ['test_runs', 'averages', 'min', 'max', 'date_range', 'stat']) diff --git a/Websites/webkit-perf.appspot.com/report_process_handler.py b/Websites/webkit-perf.appspot.com/report_process_handler.py index ecf6e38379e4..7103785c4162 100644 --- a/Websites/webkit-perf.appspot.com/report_process_handler.py +++ b/Websites/webkit-perf.appspot.com/report_process_handler.py @@ -37,6 +37,7 @@ from controller import schedule_dashboard_update from controller import schedule_manifest_update from models import Build from models import ReportLog +from models import Runs from models import Test from models import TestResult @@ -56,10 +57,15 @@ class ReportProcessHandler(webapp2.RequestHandler): platform = log.platform() build = Build.get_or_insert_from_log(log) - for test_name, result in log.results().iteritems(): + for test_name, result_value in log.results().iteritems(): test = Test.update_or_insert(test_name, branch, platform) - TestResult.get_or_insert_from_parsed_json(test_name, build, result) - schedule_runs_update(test.id, branch.id, platform.id) + result = TestResult.get_or_insert_from_parsed_json(test_name, build, result_value) + runs = Runs.get_by_objects(branch, platform, test) + regenerate_runs = True + if runs: + runs.update_incrementally(build, result) + regenerate_runs = False + schedule_runs_update(test.id, branch.id, platform.id, regenerate_runs) log = ReportLog.get(log.key()) log.delete()