Write a script that detects chart changes by using v3 API.
authordewei_zhu@apple.com <dewei_zhu@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 2 May 2018 06:16:16 +0000 (06:16 +0000)
committerdewei_zhu@apple.com <dewei_zhu@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 2 May 2018 06:16:16 +0000 (06:16 +0000)
https://bugs.webkit.org/show_bug.cgi?id=184419

Reviewed by Ryosuke Niwa.

Added a script that detects chart changes and schedule confirming analysis task.

* browser-tests/async-task-tests.js: Added a unit test for 'AsyncTask'.
* browser-tests/index.html: Added import for 'AsyncTask' for testing.
* public/shared/statistics.js: Make 'findRangesForChangeDetectionsWithWelchsTTest' defaults to 0.99 one sided possibility.
(Statistics.new.this.findRangesForChangeDetectionsWithWelchsTTest):
* public/v3/async-task.js:
(AsyncTask.isAvailable): Helper function to determine whether AsyncTask is available or not as 'Worker' is
not available in nodejs.
(AsyncTask):
* public/v3/models/bug.js: Export as a module.
* public/v3/models/measurement-set.js:
(MeasurementSet.prototype._invokeSegmentationAlgorithm): Added a check to avoid using AsyncTask when running in NodeJs.
(MeasurementSet):
* server-tests/resources/common-operations.js: Added a helper function to assert certain exception is thrown.
(async.assertThrows):
* tools/js/measurement-set-analyzer.js: Added 'MeasurementSetAnalyzer' module for analyzing measurement set.
(MeasurementSetAnalyzer):
(MeasurementSetAnalyzer.prototype.async.analyzeOnce):
(MeasurementSetAnalyzer.measurementSetListForAnalysis):
(MeasurementSetAnalyzer.prototype.async._analyzeMeasurementSet):
* tools/js/v3-models.js: Added import for 'Bug' object.
* tools/run-analysis.js: Added this script to detect measurement set changes.
(main):
(async.analysisLoop):
* unit-tests/measurement-set-analyzer-tests.js: Added unit tests for 'MeasurementSetAnalyzer'.
* unit-tests/resources/mock-v3-models.js: Reset 'MeasurementSet._set' every time.
Update mock platform to include lastModifiedByMetric information.
(MockModels.inject):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@231225 268f45cc-cd09-0410-ab3c-d52691b4dbfc

13 files changed:
Websites/perf.webkit.org/ChangeLog
Websites/perf.webkit.org/browser-tests/async-task-tests.js [new file with mode: 0644]
Websites/perf.webkit.org/browser-tests/index.html
Websites/perf.webkit.org/public/shared/statistics.js
Websites/perf.webkit.org/public/v3/async-task.js
Websites/perf.webkit.org/public/v3/models/bug.js
Websites/perf.webkit.org/public/v3/models/measurement-set.js
Websites/perf.webkit.org/server-tests/resources/common-operations.js
Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js [new file with mode: 0644]
Websites/perf.webkit.org/tools/js/v3-models.js
Websites/perf.webkit.org/tools/run-analysis.js [new file with mode: 0644]
Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js [new file with mode: 0644]
Websites/perf.webkit.org/unit-tests/resources/mock-v3-models.js

index 04442ec..81b237e 100644 (file)
@@ -1,3 +1,40 @@
+2018-05-01  Dewei Zhu  <dewei_zhu@apple.com>
+
+        Write a script that detects chart changes by using v3 API.
+        https://bugs.webkit.org/show_bug.cgi?id=184419
+
+        Reviewed by Ryosuke Niwa.
+
+        Added a script that detects chart changes and schedule confirming analysis task.
+
+        * browser-tests/async-task-tests.js: Added a unit test for 'AsyncTask'.
+        * browser-tests/index.html: Added import for 'AsyncTask' for testing.
+        * public/shared/statistics.js: Make 'findRangesForChangeDetectionsWithWelchsTTest' defaults to 0.99 one sided possibility.
+        (Statistics.new.this.findRangesForChangeDetectionsWithWelchsTTest):
+        * public/v3/async-task.js:
+        (AsyncTask.isAvailable): Helper function to determine whether AsyncTask is available or not as 'Worker' is
+        not available in nodejs.
+        (AsyncTask):
+        * public/v3/models/bug.js: Export as a module.
+        * public/v3/models/measurement-set.js:
+        (MeasurementSet.prototype._invokeSegmentationAlgorithm): Added a check to avoid using AsyncTask when running in NodeJs.
+        (MeasurementSet):
+        * server-tests/resources/common-operations.js: Added a helper function to assert certain exception is thrown.
+        (async.assertThrows):
+        * tools/js/measurement-set-analyzer.js: Added 'MeasurementSetAnalyzer' module for analyzing measurement set.
+        (MeasurementSetAnalyzer):
+        (MeasurementSetAnalyzer.prototype.async.analyzeOnce):
+        (MeasurementSetAnalyzer.measurementSetListForAnalysis):
+        (MeasurementSetAnalyzer.prototype.async._analyzeMeasurementSet):
+        * tools/js/v3-models.js: Added import for 'Bug' object.
+        * tools/run-analysis.js: Added this script to detect measurement set changes.
+        (main):
+        (async.analysisLoop):
+        * unit-tests/measurement-set-analyzer-tests.js: Added unit tests for 'MeasurementSetAnalyzer'.
+        * unit-tests/resources/mock-v3-models.js: Reset 'MeasurementSet._set' every time.
+        Update mock platform to include lastModifiedByMetric information.
+        (MockModels.inject):
+
 2018-04-30  Ryosuke Niwa  <rniwa@webkit.org>
 
         Creating a custom analysis task after fetching all analysis tasks fail
diff --git a/Websites/perf.webkit.org/browser-tests/async-task-tests.js b/Websites/perf.webkit.org/browser-tests/async-task-tests.js
new file mode 100644 (file)
index 0000000..ca9d4ad
--- /dev/null
@@ -0,0 +1,8 @@
+
+describe('AsyncTask', () => {
+    it('should have "AsyncTask" available for computing segmentation', async () => {
+        const context = new BrowsingContext;
+        await ChartTest.importChartScripts(context);
+        expect(context.symbols.AsyncTask.isAvailable()).to.be(true);
+    });
+});
\ No newline at end of file
index 6b25815..517ae85 100644 (file)
@@ -15,6 +15,7 @@ mocha.setup('bdd');
 </head>
 <body>
 <div id="mocha"></div>
+<script src="async-task-tests.js"></script>
 <script src="component-base-tests.js"></script>
 <script src="page-tests.js"></script>
 <script src="page-router-tests.js"></script>
@@ -208,6 +209,7 @@ const ChartTest = {
     importChartScripts(context)
     {
         return context.importScripts([
+            'async-task.js',
             '../shared/statistics.js',
             'lazily-evaluated-function.js',
             'instrumentation.js',
@@ -226,7 +228,7 @@ const ChartTest = {
             'components/time-series-chart.js',
             'components/interactive-time-series-chart.js'],
             'ComponentBase', 'TimeSeriesChart', 'InteractiveTimeSeriesChart',
-            'Platform', 'Metric', 'Test', 'Repository', 'MeasurementSet', 'MockRemoteAPI').then(() => {
+            'Platform', 'Metric', 'Test', 'Repository', 'MeasurementSet', 'MockRemoteAPI', 'AsyncTask').then(() => {
                 return context.symbols.TimeSeriesChart;
             })
     },
index a3164cf..056ba94 100644 (file)
@@ -110,7 +110,7 @@ var Statistics = new (function () {
         };
     }
 
-    this.findRangesForChangeDetectionsWithWelchsTTest = function (values, segmentations, oneSidedPossibility) {
+    this.findRangesForChangeDetectionsWithWelchsTTest = function (values, segmentations, oneSidedPossibility=0.99) {
         if (!values.length)
             return [];
 
index 11b54ff..ee81c84 100644 (file)
@@ -29,6 +29,10 @@ class AsyncTask {
         });
     }
 
+    static isAvailable()
+    {
+        return typeof Worker !== 'undefined';
+    }
 }
 
 AsyncTask._asyncMessageId = 0;
@@ -149,3 +153,6 @@ if (typeof module == 'undefined' && typeof window == 'undefined' && typeof impor
     importScripts('/shared/statistics.js');
     onmessage = AsyncTaskWorker.workerDidRecieveMessage.bind(AsyncTaskWorker);
 }
+
+if (typeof module != 'undefined')
+    module.exports.AsyncTask = AsyncTask;
\ No newline at end of file
index 0329b7e..c158867 100644 (file)
@@ -29,3 +29,6 @@ class Bug extends DataModelObject {
     label() { return this.bugNumber(); }
     title() { return `${this._bugTracker.label()}: ${this.bugNumber()}`; }
 }
+
+if (typeof module != 'undefined')
+    module.exports.Bug = Bug;
\ No newline at end of file
index 65c95bc..e4a8a07 100644 (file)
@@ -290,7 +290,7 @@ class MeasurementSet {
         var args = [timeSeriesValues].concat(parameters || []);
 
         var timeSeriesIsShortEnoughForSyncComputation = timeSeriesValues.length < 100;
-        if (timeSeriesIsShortEnoughForSyncComputation) {
+        if (timeSeriesIsShortEnoughForSyncComputation || !AsyncTask.isAvailable()) {
             Instrumentation.startMeasuringTime('_invokeSegmentationAlgorithm', 'syncSegmentation');
             var segmentation = Statistics[segmentationName].apply(timeSeriesValues, args);
             Instrumentation.endMeasuringTime('_invokeSegmentationAlgorithm', 'syncSegmentation');
index e1463ca..58b5113 100644 (file)
@@ -1,4 +1,4 @@
-
+const assert = require('assert');
 const crypto = require('crypto');
 
 function addBuilderForReport(report)
@@ -49,9 +49,22 @@ function submitReport(report)
     });
 }
 
+async function assertThrows(expectedError, testFunction)
+{
+    let thrownException = false;
+    try {
+            await testFunction()
+    } catch(error) {
+        thrownException = true;
+        assert.equal(error, expectedError);
+    }
+    assert.ok(thrownException);
+}
+
 if (typeof module != 'undefined') {
     module.exports.addBuilderForReport = addBuilderForReport;
     module.exports.addSlaveForReport = addSlaveForReport;
     module.exports.prepareServerTest = prepareServerTest;
     module.exports.submitReport = submitReport;
+    module.exports.assertThrows = assertThrows;
 }
diff --git a/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js b/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js
new file mode 100644 (file)
index 0000000..b329aa7
--- /dev/null
@@ -0,0 +1,113 @@
+const Statistics = require('../../public/shared/statistics.js');
+
+class MeasurementSetAnalyzer {
+    constructor(measurementSetList, startTime, endTime, logger)
+    {
+        this._measurementSetList = measurementSetList;
+        this._startTime = startTime;
+        this._endTime = endTime;
+        this._logger = logger;
+    }
+
+    async analyzeOnce()
+    {
+        for (const measurementSet of this._measurementSetList)
+            await this._analyzeMeasurementSet(measurementSet);
+    }
+
+    // FIXME: This code should be shared with DashboardPage.
+    static measurementSetListForAnalysis(manifest)
+    {
+        const measurementSetList = [];
+        for (const dashboard of Object.values(manifest.dashboards)) {
+            for (const row of dashboard) {
+                for (const cell of row) {
+                    if (cell instanceof Array) {
+                        if (cell.length < 2)
+                            continue;
+                        const platformId = parseInt(cell[0]);
+                        const metricId = parseInt(cell[1]);
+                        if (isNaN(platformId) || isNaN(metricId))
+                            continue;
+                        const platform = Platform.findById(platformId);
+                        const metric = Metric.findById(metricId);
+                        console.assert(platform);
+                        console.assert(metric);
+
+                        const measurementSet = MeasurementSet.findSet(platform.id(), metric.id(), platform.lastModified(metric));
+                        console.assert(measurementSet);
+                        measurementSetList.push(measurementSet);
+                    }
+                }
+            }
+        }
+        return measurementSetList;
+    }
+
+    async _analyzeMeasurementSet(measurementSet)
+    {
+        const metric = Metric.findById(measurementSet.metricId());
+        const platform = Platform.findById(measurementSet.platformId());
+        this._logger.info(`==== "${metric.fullName()}" on "${platform.name()}" ====`);
+        await measurementSet.fetchBetween(this._startTime, this._endTime);
+        const currentTimeSeries = measurementSet.fetchedTimeSeries('current', false, false);
+        const rawValues = currentTimeSeries.values();
+        if (!rawValues || rawValues.length < 2)
+            return;
+
+        const segmentedValues = await measurementSet.fetchSegmentation('segmentTimeSeriesByMaximizingSchwarzCriterion', [], 'current', false);
+
+        const progressionString = 'progression';
+        const regressionString = 'regression';
+        const ranges = Statistics.findRangesForChangeDetectionsWithWelchsTTest(rawValues, segmentedValues).map((range) =>({
+            startPoint: currentTimeSeries.findPointByIndex(range.startIndex),
+            endPoint: currentTimeSeries.findPointByIndex(range.endIndex),
+            valueChangeSummary: metric.labelForDifference(range.segmentationStartValue, range.segmentationEndValue,
+                progressionString, regressionString)
+        }));
+
+        const analysisTasks = await AnalysisTask.fetchByPlatformAndMetric(platform.id(), metric.id());
+        const filteredRanges = ranges.filter((range) => {
+            const rangeEndsBeforeAnalysisStarts = range.endPoint.time < this._startTime;
+            if (rangeEndsBeforeAnalysisStarts)
+                return false;
+            for (const task of analysisTasks) {
+                const taskEndsBeforeRangeStart = task.endTime() < range.startPoint.time;
+                const taskStartsAfterRangeEnd = range.endPoint.time < task.startTime();
+                if (!(taskEndsBeforeRangeStart || taskStartsAfterRangeEnd))
+                    return false;
+            }
+            return true;
+        });
+
+        let rangeWithMostSignificantChange = null;
+        let largestWeightFavoringRegression = 0;
+        for (const range of filteredRanges) {
+            const relativeChangeAbsoluteValue = Math.abs(range.valueChangeSummary.relativeChange);
+            const weightFavoringRegression = range.valueChangeSummary.changeType === regressionString ?
+                relativeChangeAbsoluteValue : Math.sqrt(relativeChangeAbsoluteValue);
+
+            if (weightFavoringRegression > largestWeightFavoringRegression) {
+                largestWeightFavoringRegression = weightFavoringRegression;
+                rangeWithMostSignificantChange = range;
+            }
+        }
+
+        if (!rangeWithMostSignificantChange) {
+            this._logger.info('Nothing to analyze');
+            return;
+        }
+
+        const startCommitSet = rangeWithMostSignificantChange.startPoint.commitSet();
+        const endCommitSet = rangeWithMostSignificantChange.endPoint.commitSet();
+        const summary = `Potential ${rangeWithMostSignificantChange.valueChangeSummary.changeLabel} on ${platform.name()} between ${CommitSet.diff(startCommitSet, endCommitSet)}`;
+
+        // FIXME: The iteration count should be smarter than hard-coding.
+        const response = await AnalysisTask.create(summary, rangeWithMostSignificantChange.startPoint,
+            rangeWithMostSignificantChange.endPoint, 'Confirm', 4);
+        this._logger.info(`Created analysis task with id "${response.taskId}" to confirm: "${summary}".`);
+    }
+}
+
+if (typeof module !== 'undefined')
+    module.exports.MeasurementSetAnalyzer = MeasurementSetAnalyzer;
\ No newline at end of file
index 6cfd83b..e1b249c 100644 (file)
@@ -11,6 +11,7 @@ importFromV3('models/data-model.js', 'DataModelObject');
 importFromV3('models/data-model.js', 'LabeledObject');
 
 importFromV3('models/analysis-task.js', 'AnalysisTask');
+importFromV3('models/bug.js', 'Bug');
 importFromV3('models/bug-tracker.js', 'BugTracker');
 importFromV3('models/build-request.js', 'BuildRequest');
 importFromV3('models/builder.js', 'Build');
@@ -37,5 +38,6 @@ importFromV3('models/uploaded-file.js', 'UploadedFile');
 importFromV3('instrumentation.js', 'Instrumentation');
 importFromV3('lazily-evaluated-function.js', 'LazilyEvaluatedFunction');
 importFromV3('commit-set-range-bisector.js', 'CommitSetRangeBisector');
+importFromV3('async-task.js', 'AsyncTask');
 
 global.Statistics = require('../../public/shared/statistics.js');
\ No newline at end of file
diff --git a/Websites/perf.webkit.org/tools/run-analysis.js b/Websites/perf.webkit.org/tools/run-analysis.js
new file mode 100644 (file)
index 0000000..081a0d1
--- /dev/null
@@ -0,0 +1,52 @@
+#!/usr/local/bin/node
+
+const fs = require('fs');
+const parseArguments = require('./js/parse-arguments.js').parseArguments;
+const RemoteAPI = require('./js/remote.js').RemoteAPI;
+const MeasurementSetAnalyzer = require('./js/measurement-set-analyzer').MeasurementSetAnalyzer;
+require('./js/v3-models.js');
+global.PrivilegedAPI = require('./js/privileged-api.js').PrivilegedAPI;
+
+function main(argv)
+{
+    const options = parseArguments(argv, [
+        {name: '--server-config-json', required: true},
+        {name: '--analysis-range-in-days', type: parseFloat, default: 10},
+        {name: '--seconds-to-sleep', type: parseFloat, default: 1200},
+    ]);
+
+    if (!options)
+        return;
+
+    analysisLoop(options);
+}
+
+async function analysisLoop(options)
+{
+    let secondsToSleep;
+    try {
+        const serverConfig = JSON.parse(fs.readFileSync(options['--server-config-json'], 'utf-8'));
+        const analysisRangeInDays = options['--analysis-range-in-days'];
+        secondsToSleep = options['--seconds-to-sleep'];
+        global.RemoteAPI = new RemoteAPI(serverConfig.server);
+        PrivilegedAPI.configure(serverConfig.slave.name, serverConfig.slave.password);
+
+        const manifest = await Manifest.fetch();
+        const measurementSetList = MeasurementSetAnalyzer.measurementSetListForAnalysis(manifest);
+
+        const endTime = Date.now();
+        const startTime = endTime - analysisRangeInDays * 24 * 3600 * 1000;
+        const analyzer = new MeasurementSetAnalyzer(measurementSetList, startTime, endTime, console);
+
+        console.log(`Start analyzing last ${analysisRangeInDays} days measurement sets.`);
+        await analyzer.analyzeOnce();
+    } catch(error) {
+        console.error(`Failed analyze measurement sets due to ${error}`);
+    }
+
+    console.log(`Sleeping for ${secondsToSleep} seconds.`);
+    setTimeout(() => analysisLoop(options), secondsToSleep * 1000);
+}
+
+
+main(process.argv);
diff --git a/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js b/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js
new file mode 100644 (file)
index 0000000..1eb9e41
--- /dev/null
@@ -0,0 +1,422 @@
+'use strict';
+
+require('../tools/js/v3-models.js');
+const assert = require('assert');
+const assertThrows = require('../server-tests/resources/common-operations').assertThrows;
+const MockRemoteAPI = require('./resources/mock-remote-api.js').MockRemoteAPI;
+const MockModels = require('./resources/mock-v3-models.js').MockModels;
+const MeasurementSetAnalyzer = require('../tools/js/measurement-set-analyzer.js').MeasurementSetAnalyzer;
+const NodePrivilegedAPI = require('../tools/js/privileged-api.js').PrivilegedAPI;
+
+describe('MeasurementSetAnalyzer', () => {
+    MockModels.inject();
+    const requests = MockRemoteAPI.inject('http://build.webkit.org', NodePrivilegedAPI);
+
+    describe('measurementSetListForAnalysis', () => {
+        it('should generate empty list if no dashboard configurations', () => {
+            const configurations =  MeasurementSetAnalyzer.measurementSetListForAnalysis({dashboards: {}});
+            assert.equal(configurations.length, 0);
+        });
+
+        it('should generate a list of measurement set', () => {
+            const configurations = MeasurementSetAnalyzer.measurementSetListForAnalysis({dashboards: {
+                "macOS": [["some metric", "plt-mean"], [['Some Platform'], [65, 2884], [65, 1158]]]
+            }});
+            assert.equal(configurations.length, 2);
+            const [measurementSet0, measurementSet1] = configurations;
+            assert.equal(measurementSet0.metricId(), MockModels.someMetric.id());
+            assert.equal(measurementSet0.platformId(), MockModels.somePlatform.id());
+            assert.equal(measurementSet1.metricId(), MockModels.pltMean.id());
+            assert.equal(measurementSet1.platformId(), MockModels.somePlatform.id());
+        });
+    });
+
+    function mockLogger()
+    {
+        const info_logs = [];
+        const error_logs =[];
+        return {
+            info: (message) => info_logs.push(message),
+            error: (message) => error_logs.push(message),
+            info_logs, error_logs
+        };
+    }
+
+    describe('analyzeOnce', () => {
+        const simpleSegmentableValues = [
+            1546.5603, 1548.1536, 1563.5452, 1539.7823, 1546.4184, 1548.9299, 1532.5444, 1546.2800, 1547.1760, 1551.3507,
+            1548.3277, 1544.7673, 1542.7157, 1538.1700, 1538.0948, 1543.0364, 1537.9737, 1542.2611, 1543.9685, 1546.4901,
+            1544.4080, 1540.8671, 1537.3353, 1549.4331, 1541.4436, 1544.1299, 1550.1770, 1553.1872, 1549.3417, 1542.3788,
+            1543.5094, 1541.7905, 1537.6625, 1547.3840, 1538.5185, 1549.6764, 1556.6138, 1552.0476, 1541.7629, 1544.7006,
+            /* segments changes here */
+            1587.1390, 1594.5451, 1586.2430, 1596.7310, 1548.1423
+        ];
+
+        const dataBeforeSmallProgression = [1587.1390, 1594.5451, 1586.2430, 1596.7310, 1548.1423];
+        const dataBeforeHugeProgression = [1700.1390, 1704.5451, 1703.2430, 1706.7310, 1689.1423];
+
+        function makeSampleRuns(values, startRunId, startTime, timeIncrement)
+        {
+            let runId = startRunId;
+            let buildId = 3400;
+            let buildNumber = 1;
+            let commit_id = 1;
+            let revision = 1;
+            const makeRun = (value, commitTime) => [runId++, value, 1, value, value, false, [[commit_id++, MockModels.webkit.id(), revision++, 0, 0]], commitTime, commitTime + 10, buildId++, buildNumber++, MockModels.builder.id()];
+            timeIncrement = Math.floor(timeIncrement);
+            return values.map((value, index) => makeRun(value, startTime + index * timeIncrement));
+        }
+
+        it('should not analyze if no measurement set is available', async () => {
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].resolve({
+                'clusterStart': 1000,
+                'clusterSize': 1000,
+                'formatMap': [],
+                'configurations': {current: []},
+                'startTime': 4000,
+                'endTime': 5000,
+                'lastModified': 5000,
+                'clusterCount': 4,
+                'status': 'OK'});
+            await analysisPromise;
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
+            assert.deepEqual(logger.error_logs, []);
+        });
+
+        it('should not analyze if there is only one data point in the measurement set', async () => {
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].resolve({
+                'clusterStart': 1000,
+                'clusterSize': 1000,
+                'formatMap': [],
+                'configurations': {current: makeSampleRuns(simpleSegmentableValues.slice(0, 1), 6400, 4000, 1000 / 50)},
+                'startTime': 4000,
+                'endTime': 5000,
+                'lastModified': 5000,
+                'clusterCount': 4,
+                'status': 'OK'});
+            await analysisPromise;
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
+            assert.deepEqual(logger.error_logs, []);
+        });
+
+        it('should not analyze if no regression is detected', async () => {
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].resolve({
+                'clusterStart': 1000,
+                'clusterSize': 1000,
+                'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'],
+                'configurations': {current: makeSampleRuns(simpleSegmentableValues.slice(0, 39), 6400, 4000, 1000 / 50)},
+                'startTime': 4000,
+                'endTime': 5000,
+                'lastModified': 5000,
+                'clusterCount': 4,
+                'status': 'OK'});
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 2);
+            assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884');
+            requests[1].resolve({
+                analysisTasks: [],
+                bugs: [],
+                commits: [],
+                status: 'OK'
+            });
+
+            await analysisPromise;
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====',
+                'Nothing to analyze']);
+            assert.deepEqual(logger.error_logs, []);
+        });
+
+        it('should not show created analysis task logging if failed to create analysis task', async () => {
+            PrivilegedAPI.configure('test', 'password');
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            measurementSetAnalyzer._startTime = 4000;
+            measurementSetAnalyzer._endTime = 5000;
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].resolve({
+                'clusterStart': 1000,
+                'clusterSize': 1000,
+                'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'],
+                'configurations': {current: makeSampleRuns(simpleSegmentableValues, 6400, 4000, 1000 / 50)},
+                'startTime': 4000,
+                'endTime': 5000,
+                'lastModified': 5000,
+                'clusterCount': 4,
+                'status': 'OK'});
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 2);
+            assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884');
+            requests[1].resolve({
+                analysisTasks: [],
+                bugs: [],
+                commits: [],
+                status: 'OK'
+            });
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 3);
+            assert.equal(requests[2].url, '/privileged-api/create-analysis-task');
+            assert.deepEqual(requests[2].data, {
+                slaveName: 'test',
+                slavePassword: 'password',
+                name: 'Potential 2.38% regression on Some platform between WebKit: r35-r44',
+                startRun: 6434,
+                endRun: 6443,
+                repetitionCount: 4,
+                testGroupName: 'Confirm',
+                revisionSets: [{'11': {revision: 35, ownerRevision: null, patch: null}},
+                    {'11': {revision: 44, ownerRevision: null, patch: null}}]
+            });
+            requests[2].reject('TriggerableNotFoundForTask');
+
+            assertThrows('TriggerableNotFoundForTask', async () => await analysisPromise);
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====']);
+            assert.deepEqual(logger.error_logs, []);
+        });
+
+        it('should analyze if a new regression is detected', async () => {
+            PrivilegedAPI.configure('test', 'password');
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            measurementSetAnalyzer._startTime = 4000;
+            measurementSetAnalyzer._endTime = 5000;
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].resolve({
+                'clusterStart': 1000,
+                'clusterSize': 1000,
+                'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'],
+                'configurations': {current: makeSampleRuns(simpleSegmentableValues, 6400, 4000, 1000 / 50)},
+                'startTime': 4000,
+                'endTime': 5000,
+                'lastModified': 5000,
+                'clusterCount': 4,
+                'status': 'OK'});
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 2);
+            assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884');
+            requests[1].resolve({
+                analysisTasks: [],
+                bugs: [],
+                commits: [],
+                status: 'OK'
+            });
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 3);
+            assert.equal(requests[2].url, '/privileged-api/create-analysis-task');
+            assert.deepEqual(requests[2].data, {
+                slaveName: 'test',
+                slavePassword: 'password',
+                name: 'Potential 2.38% regression on Some platform between WebKit: r35-r44',
+                startRun: 6434,
+                endRun: 6443,
+                repetitionCount: 4,
+                testGroupName: 'Confirm',
+                revisionSets: [{'11': {revision: 35, ownerRevision: null, patch: null}},
+                    {'11': {revision: 44, ownerRevision: null, patch: null}}]
+            });
+            requests[2].resolve({taskId: '5255', status: 'OK'});
+
+            await analysisPromise;
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====',
+                'Created analysis task with id "5255" to confirm: "Potential 2.38% regression on Some platform between WebKit: r35-r44".']);
+            assert.deepEqual(logger.error_logs, []);
+        });
+
+        it('should not analyze if there is an overlapped existing analysis task', async () => {
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            measurementSetAnalyzer._startTime = 4000;
+            measurementSetAnalyzer._endTime = 5000;
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].resolve({
+                'clusterStart': 1000,
+                'clusterSize': 1000,
+                'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'],
+                'configurations': {current: makeSampleRuns(simpleSegmentableValues, 6400, 4000, 1000 / 50)},
+                'startTime': 4000,
+                'endTime': 5000,
+                'lastModified': 5000,
+                'clusterCount': 4,
+                'status': 'OK'});
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 2);
+            assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884');
+            requests[1].resolve({
+                analysisTasks: [{
+                    author: null,
+                    bugs: [],
+                    buildRequestCount: 14,
+                    finishedBuildRequestCount: 6,
+                    category: 'identified',
+                    causes: [],
+                    createdAt: 4500,
+                    endRun: 6434,
+                    endRunTime:  5000,
+                    fixes: [],
+                    id: 1082,
+                    metric: MockModels.someMetric.id(),
+                    name: 'Potential 2.38% regression on Some platform between WebKit: r35-r44',
+                    needed: null,
+                    platform: MockModels.somePlatform.id(),
+                    result: 'regression',
+                    segmentationStrategy: 1,
+                    startRun: 6434,
+                    statrRunTime: 4000,
+                    testRangeStrategy: 2
+                }],
+                bugs: [],
+                commits: [],
+                status: 'OK'
+            });
+
+            await analysisPromise;
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====',
+                'Nothing to analyze']);
+            assert.deepEqual(logger.error_logs, []);
+        });
+
+        it('should favor regression if the progression is not big enough', async () => {
+            PrivilegedAPI.configure('test', 'password');
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            measurementSetAnalyzer._startTime = 4000;
+            measurementSetAnalyzer._endTime = 5000;
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].resolve({
+                'clusterStart': 1000,
+                'clusterSize': 1000,
+                'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'],
+                'configurations': {current: makeSampleRuns(dataBeforeSmallProgression.concat(simpleSegmentableValues), 6400, 4000, 1000 / 50)},
+                'startTime': 4000,
+                'endTime': 5000,
+                'lastModified': 5000,
+                'clusterCount': 4,
+                'status': 'OK'});
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 2);
+            assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884');
+            requests[1].resolve({
+                analysisTasks: [],
+                bugs: [],
+                commits: [],
+                status: 'OK'
+            });
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 3);
+            assert.equal(requests[2].url, '/privileged-api/create-analysis-task');
+            assert.deepEqual(requests[2].data, {
+                slaveName: 'test',
+                slavePassword: 'password',
+                name: 'Potential 2.38% regression on Some platform between WebKit: r40-r49',
+                startRun: 6439,
+                endRun: 6448,
+                repetitionCount: 4,
+                testGroupName: 'Confirm',
+                revisionSets: [{'11': {revision: 40, ownerRevision: null, patch: null}},
+                    {'11': {revision: 49, ownerRevision: null, patch: null}}]
+            });
+            requests[2].resolve({taskId: '5255', status: 'OK'});
+
+            await analysisPromise;
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====',
+                'Created analysis task with id "5255" to confirm: "Potential 2.38% regression on Some platform between WebKit: r40-r49".']);
+            assert.deepEqual(logger.error_logs, []);
+        });
+
+        it('should choose analyze progression when it is big enough', async () => {
+            PrivilegedAPI.configure('test', 'password');
+            const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000);
+            const logger = mockLogger();
+            const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
+            measurementSetAnalyzer._startTime = 4000;
+            measurementSetAnalyzer._endTime = 5000;
+            const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet);
+
+            assert.equal(requests.length, 1);
+            assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`);
+            requests[0].resolve({
+                'clusterStart': 1000,
+                'clusterSize': 1000,
+                'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'],
+                'configurations': {current: makeSampleRuns(dataBeforeHugeProgression.concat(simpleSegmentableValues), 6400, 4000, 1000 / 50)},
+                'startTime': 4000,
+                'endTime': 5000,
+                'lastModified': 5000,
+                'clusterCount': 4,
+                'status': 'OK'});
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 2);
+            assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884');
+            requests[1].resolve({
+                analysisTasks: [],
+                bugs: [],
+                commits: [],
+                status: 'OK'
+            });
+
+            await MockRemoteAPI.waitForRequest();
+            assert.equal(requests.length, 3);
+            assert.equal(requests[2].url, '/privileged-api/create-analysis-task');
+            assert.deepEqual(requests[2].data, {
+                slaveName: 'test',
+                slavePassword: 'password',
+                name: 'Potential 9.15% progression on Some platform between WebKit: r3-r8',
+                startRun: 6402,
+                endRun: 6407,
+                repetitionCount: 4,
+                testGroupName: 'Confirm',
+                revisionSets: [{'11': {revision: 3, ownerRevision: null, patch: null}},
+                    {'11': {revision: 8, ownerRevision: null, patch: null}}]
+            });
+            requests[2].resolve({taskId: '5255', status: 'OK'});
+
+            await analysisPromise;
+            assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====',
+                'Created analysis task with id "5255" to confirm: "Potential 9.15% progression on Some platform between WebKit: r3-r8".']);
+            assert.deepEqual(logger.error_logs, []);
+        });
+    });
+});
\ No newline at end of file
index c4277b0..b7c39a3 100644 (file)
@@ -14,6 +14,7 @@ var MockModels = {
             TestGroup.clearStaticMap();
             BuildRequest.clearStaticMap();
             Triggerable.clearStaticMap();
+            MeasurementSet._set = null;
 
             MockModels.osx = Repository.ensureSingleton(9, {name: 'OS X'});
             MockModels.ios = Repository.ensureSingleton(22, {name: 'iOS'});
@@ -27,7 +28,8 @@ var MockModels = {
 
             MockModels.someTest = Test.ensureSingleton(1, {name: 'Some test'});
             MockModels.someMetric = Metric.ensureSingleton(2884, {name: 'Some metric', test: MockModels.someTest});
-            MockModels.somePlatform = Platform.ensureSingleton(65, {name: 'Some platform', metrics: [MockModels.someMetric]});
+            MockModels.somePlatform = Platform.ensureSingleton(65, {name: 'Some platform', metrics: [MockModels.someMetric],
+                lastModifiedByMetric: {'2884': 5000, '1158': 5000}});
 
             MockModels.speedometer = Test.ensureSingleton(1928, {name: 'Speedometer'});
             MockModels.jetstream = Test.ensureSingleton(1886, {name: 'JetStream'});