Analyzing a chart that does not exist should not halt whole run-analysis script.
authordewei_zhu@apple.com <dewei_zhu@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 23 Jan 2019 04:28:57 +0000 (04:28 +0000)
committerdewei_zhu@apple.com <dewei_zhu@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 23 Jan 2019 04:28:57 +0000 (04:28 +0000)
https://bugs.webkit.org/show_bug.cgi?id=193563

Reviewed by Ryosuke Niwa.

Halting whole run-analysis script while there is any invalid chart specified in Manifest makes the script fragile.
Run-analysis is also responsible for adding retry and sending notification which should not be block by this error.
Skipping analyzing the corresponding configuration seems reasonable.

* public/v3/models/measurement-set.js:
(MeasurementSet.prototype._ensureClusterPromise): Only add callback when callback is specified.
This will help to fix 'UnhandledPromiseRejectionWarning' while running the test.
* tools/js/measurement-set-analyzer.js:
(MeasurementSetAnalyzer.prototype.async._analyzeMeasurementSet): Catch the exception while failing to fetch a measurement set and skip the analysis for this config.
* unit-tests/measurement-set-analyzer-tests.js: Added unit tests for this.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@240319 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Websites/perf.webkit.org/ChangeLog
Websites/perf.webkit.org/public/v3/models/measurement-set.js
Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js
Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js

index 5cefdca..4d3a26a 100644 (file)
@@ -1,5 +1,23 @@
 2019-01-17  Dewei Zhu  <dewei_zhu@apple.com>
 
+        Analyzing a chart that does not exist should not halt whole run-analysis script.
+        https://bugs.webkit.org/show_bug.cgi?id=193563
+
+        Reviewed by Ryosuke Niwa.
+
+        Halting whole run-analysis script while there is any invalid chart specified in Manifest makes the script fragile.
+        Run-analysis is also responsible for adding retry and sending notification which should not be block by this error.
+        Skipping analyzing the corresponding configuration seems reasonable.
+
+        * public/v3/models/measurement-set.js:
+        (MeasurementSet.prototype._ensureClusterPromise): Only add callback when callback is specified.
+        This will help to fix 'UnhandledPromiseRejectionWarning' while running the test.
+        * tools/js/measurement-set-analyzer.js:
+        (MeasurementSetAnalyzer.prototype.async._analyzeMeasurementSet): Catch the exception while failing to fetch a measurement set and skip the analysis for this config.
+        * unit-tests/measurement-set-analyzer-tests.js: Added unit tests for this.
+
+2019-01-17  Dewei Zhu  <dewei_zhu@apple.com>
+
         Updating commit in OSBuildFetcher should respect revision range in config.
         https://bugs.webkit.org/show_bug.cgi?id=193558
 
index 57a24ce..c2cd016 100644 (file)
@@ -62,7 +62,8 @@ class MeasurementSet {
         if (!this._primaryClusterPromise)
             this._primaryClusterPromise = this._fetchPrimaryCluster(noCache);
         var self = this;
-        this._primaryClusterPromise.catch(callback);
+        if (callback)
+            this._primaryClusterPromise.catch(callback);
         return this._primaryClusterPromise.then(function () {
             self._allFetches[self._primaryClusterEndTime] = self._primaryClusterPromise;
             return Promise.all(self.findClusters(startTime, endTime).map(function (clusterEndTime) {
@@ -76,12 +77,14 @@ class MeasurementSet {
         if (!this._callbackMap.has(clusterEndTime))
             this._callbackMap.set(clusterEndTime, new Set);
         var callbackSet = this._callbackMap.get(clusterEndTime);
-        callbackSet.add(callback);
+        if (callback)
+            callbackSet.add(callback);
 
         var promise = this._allFetches[clusterEndTime];
-        if (promise)
-            promise.then(callback, callback);
-        else {
+        if (promise) {
+            if (callback)
+                promise.then(callback, callback);
+        } else {
             promise = this._fetchSecondaryCluster(clusterEndTime);
             for (var existingCallback of callbackSet)
                 promise.then(existingCallback, existingCallback);
index ae3a2c4..ab86a1d 100644 (file)
@@ -49,7 +49,14 @@ class MeasurementSetAnalyzer {
         const metric = Metric.findById(measurementSet.metricId());
         const platform = Platform.findById(measurementSet.platformId());
         this._logger.info(`==== "${metric.fullName()}" on "${platform.name()}" ====`);
-        await measurementSet.fetchBetween(this._startTime, this._endTime);
+        try {
+            await measurementSet.fetchBetween(this._startTime, this._endTime);
+        } catch (error) {
+            if (error != 'ConfigurationNotFound')
+                throw error;
+            this._logger.warn(`Skipping analysis for "${metric.fullName()}" on "${platform.name()}" as time series does not exit.`);
+            return;
+        }
         const currentTimeSeries = measurementSet.fetchedTimeSeries('current', false, false);
         const rawValues = currentTimeSeries.values();
         if (!rawValues || rawValues.length < 2)
index 1f21491..7abeae2 100644 (file)
@@ -35,10 +35,12 @@ describe('MeasurementSetAnalyzer', () => {
     {
         const info_logs = [];
         const error_logs =[];
+        const warn_logs =[];
         return {
             info: (message) => info_logs.push(message),
+            warn: (message) => warn_logs.push(message),
             error: (message) => error_logs.push(message),
-            info_logs, error_logs
+            info_logs, warn_logs, error_logs
         };
     }
 
@@ -89,6 +91,44 @@ describe('MeasurementSetAnalyzer', () => {
             assert.deepEqual(logger.error_logs, []);
         });
 
+        it('should not analyze if no corresponding time series for a measurement set', async () => {
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].reject('ConfigurationNotFound');
+
+            try {
+                await analysisPromise;
+            } catch (error) {
+                assert(false, 'Should not throw any exception here');
+            }
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
+            assert.deepEqual(logger.warn_logs, [`Skipping analysis for "${MockModels.someMetric.fullName()}" on "${MockModels.somePlatform.name()}" as time series does not exit.`]);
+            assert.deepEqual(logger.error_logs, []);
+        });
+
+        it('should throw an error if "measurementSet.fetchBetween" is not failed due to "ConfugurationNotFound"', async () => {
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].reject('SomeError');
+
+            try {
+                await analysisPromise;
+            } catch (error) {
+                assert.equal(error, 'SomeError');
+            }
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
+            assert.deepEqual(logger.warn_logs, []);
+            assert.deepEqual(logger.error_logs, []);
+        });
+
         it('should not analyze if there is only one data point in the measurement set', async () => {
             const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
             const logger = mockLogger();
@@ -297,7 +337,6 @@ describe('MeasurementSetAnalyzer', () => {
             assert.deepEqual(logger.error_logs, []);
         });
 
-
         it('should not create confirming A/B tests if a new regression is detected but no triggerable available', async () => {
             PrivilegedAPI.configure('test', 'password');
             const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);