Skip to content

Commit

Permalink
Add benchmark to CI (MetaMask#7871)
Browse files Browse the repository at this point in the history
* Add benchmark to CI

The page load benchmark for Chrome is now run during CI, and the
results are collected and summarized in the `metamaskbot` comment.

Closes MetaMask#6881

* Double default number of samples

The number of default samples was changed from 10 to 20. The results
from 10 samples would show statistically significant changes in page
load times between builds, so weren't a sufficiently useful metric.
  • Loading branch information
Gudahtt authored and yqrashawn committed Feb 10, 2020
1 parent 7fd20fa commit 78e8eb4
Show file tree
Hide file tree
Showing 3 changed files with 119 additions and 2 deletions.
25 changes: 25 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,14 @@ workflows:
# - test-e2e-firefox
- test-integration-flat-chrome
- test-integration-flat-firefox
- benchmark:
requires:
- prep-build-test
- job-publish-prerelease:
requires:
- prep-deps
- prep-build
- benchmark
- all-tests-pass
- job-publish-release:
filters:
Expand Down Expand Up @@ -271,6 +275,27 @@ jobs:
path: test-artifacts
destination: test-artifacts

benchmark:
docker:
- image: circleci/node:10.17-browsers
steps:
- checkout
- attach_workspace:
at: .
- run:
name: Move test build to dist
command: mv ./dist-test ./dist
- run:
name: Run page load benchmark
command: yarn benchmark:chrome --out test-artifacts/chrome/benchmark/pageload.json
- store_artifacts:
path: test-artifacts
destination: test-artifacts
- persist_to_workspace:
root: .
paths:
- test-artifacts

job-publish-prerelease:
docker:
- image: circleci/node:10.17-browsers
Expand Down
93 changes: 92 additions & 1 deletion development/metamaskbot-build-announce.js
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
#!/usr/bin/env node
const { promises: fs } = require('fs')
const path = require('path')
const request = require('request-promise')
const VERSION = require('../dist/chrome/manifest.json').version // eslint-disable-line import/no-unresolved

start().catch(console.error)

function capitalizeFirstLetter (string) {
return string.charAt(0).toUpperCase() + string.slice(1)
}

async function start () {
const CIRCLE_PROJECT_USERNAME = process.env.CIRCLE_PROJECT_USERNAME
const CIRCLE_PROJECT_REPONAME = process.env.CIRCLE_PROJECT_REPONAME
Expand Down Expand Up @@ -68,7 +74,92 @@ async function start () {
const hiddenContent =
`<ul>` + contentRows.map(row => `<li>${row}</li>`).join('\n') + `</ul>`
const exposedContent = `Builds ready [${SHORT_SHA1}]`
const commentBody = `<details><summary>${exposedContent}</summary>${hiddenContent}</details>`
const artifactsBody = `<details><summary>${exposedContent}</summary>${hiddenContent}</details>`

const benchmarkResults = {}
for (const platform of platforms) {
const benchmarkPath = path.resolve(__dirname, '..', path.join('test-artifacts', platform, 'benchmark', 'pageload.json'))
try {
const data = await fs.readFile(benchmarkPath, 'utf8')
const benchmark = JSON.parse(data)
benchmarkResults[platform] = benchmark
} catch (error) {
if (error.code === 'ENOENT') {
console.log(`No benchmark data found for ${platform}; skipping`)
} else {
console.error(`Error encountered processing benchmark data for '${platform}': '${error}'`)
}
}
}

let commentBody
if (!benchmarkResults.chrome) {
console.log(`No results for Chrome found; skipping benchmark`)
commentBody = artifactsBody
} else {
try {
const chromePageLoad = Math.round(parseFloat(benchmarkResults.chrome.notification.average.load))
const chromePageLoadMarginOfError = Math.round(parseFloat(benchmarkResults.chrome.notification.marginOfError.load))
const benchmarkSummary = `Page Load Metrics (${chromePageLoad} ± ${chromePageLoadMarginOfError} ms)`

const allPlatforms = new Set()
const allPages = new Set()
const allMetrics = new Set()
const allMeasures = new Set()
for (const platform of Object.keys(benchmarkResults)) {
allPlatforms.add(platform)
const platformBenchmark = benchmarkResults[platform]
const pages = Object.keys(platformBenchmark)
for (const page of pages) {
allPages.add(page)
const pageBenchmark = platformBenchmark[page]
const measures = Object.keys(pageBenchmark)
for (const measure of measures) {
allMeasures.add(measure)
const measureBenchmark = pageBenchmark[measure]
const metrics = Object.keys(measureBenchmark)
for (const metric of metrics) {
allMetrics.add(metric)
}
}
}
}

const tableRows = []
for (const platform of allPlatforms) {
const pageRows = []
for (const page of allPages) {
const metricRows = []
for (const metric of allMetrics) {
let metricData = `<td>${metric}</td>`
for (const measure of allMeasures) {
metricData += `<td align="right">${Math.round(parseFloat(benchmarkResults[platform][page][measure][metric]))}</td>`
}
metricRows.push(metricData)
}
metricRows[0] = `<td rowspan="${allMetrics.size}">${capitalizeFirstLetter(page)}</td>${metricRows[0]}`
pageRows.push(...metricRows)
}
pageRows[0] = `<td rowspan="${allPages.size * allMetrics.size}">${capitalizeFirstLetter(platform)}</td>${pageRows[0]}`
for (const row of pageRows) {
tableRows.push(`<tr>${row}</tr>`)
}
}

const benchmarkTableHeaders = ['Platform', 'Page', 'Metric']
for (const measure of allMeasures) {
benchmarkTableHeaders.push(`${capitalizeFirstLetter(measure)} (ms)`)
}
const benchmarkTableHeader = `<thead><tr>${benchmarkTableHeaders.map(header => `<th>${header}</th>`).join('')}</tr></thead>`
const benchmarkTableBody = `<tbody>${tableRows.join('')}</tbody>`
const benchmarkTable = `<table>${benchmarkTableHeader}${benchmarkTableBody}</table>`
const benchmarkBody = `<details><summary>${benchmarkSummary}</summary>${benchmarkTable}</details>`
commentBody = `${artifactsBody}${benchmarkBody}`
} catch (error) {
console.error(`Error constructing benchmark results: '${error}'`)
commentBody = artifactsBody
}
}

const JSON_PAYLOAD = JSON.stringify({ body: commentBody })
const POST_COMMENT_URI = `https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/issues/${CIRCLE_PR_NUMBER}/comments`
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/benchmark.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ const { By, Key } = require('selenium-webdriver')
const { withFixtures } = require('./helpers')
const { PAGES } = require('./webdriver/driver')

const DEFAULT_NUM_SAMPLES = 10
const DEFAULT_NUM_SAMPLES = 20
const ALL_PAGES = Object.values(PAGES)

async function measurePage (pageName) {
Expand All @@ -16,6 +16,7 @@ async function measurePage (pageName) {
const passwordField = await driver.findElement(By.css('#password'))
await passwordField.sendKeys('correct horse battery staple')
await passwordField.sendKeys(Key.ENTER)
await driver.findElement(By.css('.account-details__account-name'))
await driver.navigate(pageName)
await driver.delay(1000)
metrics = await driver.collectMetrics()
Expand Down

0 comments on commit 78e8eb4

Please sign in to comment.