mirror of
https://github.com/kremalicious/metamask-extension.git
synced 2024-11-22 01:47:00 +01:00
Add benchmark to CI (#7871)
* Add benchmark to CI The page load benchmark for Chrome is now run during CI, and the results are collected and summarized in the `metamaskbot` comment. Closes #6881 * Double default number of samples The number of default samples was changed from 10 to 20. The results from 10 samples would show statistically significant changes in page load times between builds, so weren't a sufficiently useful metric.
This commit is contained in:
parent
e79d18de2a
commit
550fba2466
@ -65,10 +65,14 @@ workflows:
|
||||
- test-e2e-firefox
|
||||
- test-integration-flat-chrome
|
||||
- test-integration-flat-firefox
|
||||
- benchmark:
|
||||
requires:
|
||||
- prep-build-test
|
||||
- job-publish-prerelease:
|
||||
requires:
|
||||
- prep-deps
|
||||
- prep-build
|
||||
- benchmark
|
||||
- all-tests-pass
|
||||
- job-publish-release:
|
||||
filters:
|
||||
@ -271,6 +275,27 @@ jobs:
|
||||
path: test-artifacts
|
||||
destination: test-artifacts
|
||||
|
||||
benchmark:
|
||||
docker:
|
||||
- image: circleci/node:10.17-browsers
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: Move test build to dist
|
||||
command: mv ./dist-test ./dist
|
||||
- run:
|
||||
name: Run page load benchmark
|
||||
command: yarn benchmark:chrome --out test-artifacts/chrome/benchmark/pageload.json
|
||||
- store_artifacts:
|
||||
path: test-artifacts
|
||||
destination: test-artifacts
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- test-artifacts
|
||||
|
||||
job-publish-prerelease:
|
||||
docker:
|
||||
- image: circleci/node:10.17-browsers
|
||||
|
@ -1,9 +1,15 @@
|
||||
#!/usr/bin/env node
|
||||
const { promises: fs } = require('fs')
|
||||
const path = require('path')
|
||||
const request = require('request-promise')
|
||||
const VERSION = require('../dist/chrome/manifest.json').version // eslint-disable-line import/no-unresolved
|
||||
|
||||
start().catch(console.error)
|
||||
|
||||
function capitalizeFirstLetter (string) {
|
||||
return string.charAt(0).toUpperCase() + string.slice(1)
|
||||
}
|
||||
|
||||
async function start () {
|
||||
|
||||
const GITHUB_COMMENT_TOKEN = process.env.GITHUB_COMMENT_TOKEN
|
||||
@ -54,7 +60,92 @@ async function start () {
|
||||
]
|
||||
const hiddenContent = `<ul>` + contentRows.map(row => `<li>${row}</li>`).join('\n') + `</ul>`
|
||||
const exposedContent = `Builds ready [${SHORT_SHA1}]`
|
||||
const commentBody = `<details><summary>${exposedContent}</summary>${hiddenContent}</details>`
|
||||
const artifactsBody = `<details><summary>${exposedContent}</summary>${hiddenContent}</details>`
|
||||
|
||||
const benchmarkResults = {}
|
||||
for (const platform of platforms) {
|
||||
const benchmarkPath = path.resolve(__dirname, '..', path.join('test-artifacts', platform, 'benchmark', 'pageload.json'))
|
||||
try {
|
||||
const data = await fs.readFile(benchmarkPath, 'utf8')
|
||||
const benchmark = JSON.parse(data)
|
||||
benchmarkResults[platform] = benchmark
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT') {
|
||||
console.log(`No benchmark data found for ${platform}; skipping`)
|
||||
} else {
|
||||
console.error(`Error encountered processing benchmark data for '${platform}': '${error}'`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let commentBody
|
||||
if (!benchmarkResults.chrome) {
|
||||
console.log(`No results for Chrome found; skipping benchmark`)
|
||||
commentBody = artifactsBody
|
||||
} else {
|
||||
try {
|
||||
const chromePageLoad = Math.round(parseFloat(benchmarkResults.chrome.notification.average.load))
|
||||
const chromePageLoadMarginOfError = Math.round(parseFloat(benchmarkResults.chrome.notification.marginOfError.load))
|
||||
const benchmarkSummary = `Page Load Metrics (${chromePageLoad} ± ${chromePageLoadMarginOfError} ms)`
|
||||
|
||||
const allPlatforms = new Set()
|
||||
const allPages = new Set()
|
||||
const allMetrics = new Set()
|
||||
const allMeasures = new Set()
|
||||
for (const platform of Object.keys(benchmarkResults)) {
|
||||
allPlatforms.add(platform)
|
||||
const platformBenchmark = benchmarkResults[platform]
|
||||
const pages = Object.keys(platformBenchmark)
|
||||
for (const page of pages) {
|
||||
allPages.add(page)
|
||||
const pageBenchmark = platformBenchmark[page]
|
||||
const measures = Object.keys(pageBenchmark)
|
||||
for (const measure of measures) {
|
||||
allMeasures.add(measure)
|
||||
const measureBenchmark = pageBenchmark[measure]
|
||||
const metrics = Object.keys(measureBenchmark)
|
||||
for (const metric of metrics) {
|
||||
allMetrics.add(metric)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const tableRows = []
|
||||
for (const platform of allPlatforms) {
|
||||
const pageRows = []
|
||||
for (const page of allPages) {
|
||||
const metricRows = []
|
||||
for (const metric of allMetrics) {
|
||||
let metricData = `<td>${metric}</td>`
|
||||
for (const measure of allMeasures) {
|
||||
metricData += `<td align="right">${Math.round(parseFloat(benchmarkResults[platform][page][measure][metric]))}</td>`
|
||||
}
|
||||
metricRows.push(metricData)
|
||||
}
|
||||
metricRows[0] = `<td rowspan="${allMetrics.size}">${capitalizeFirstLetter(page)}</td>${metricRows[0]}`
|
||||
pageRows.push(...metricRows)
|
||||
}
|
||||
pageRows[0] = `<td rowspan="${allPages.size * allMetrics.size}">${capitalizeFirstLetter(platform)}</td>${pageRows[0]}`
|
||||
for (const row of pageRows) {
|
||||
tableRows.push(`<tr>${row}</tr>`)
|
||||
}
|
||||
}
|
||||
|
||||
const benchmarkTableHeaders = ['Platform', 'Page', 'Metric']
|
||||
for (const measure of allMeasures) {
|
||||
benchmarkTableHeaders.push(`${capitalizeFirstLetter(measure)} (ms)`)
|
||||
}
|
||||
const benchmarkTableHeader = `<thead><tr>${benchmarkTableHeaders.map(header => `<th>${header}</th>`).join('')}</tr></thead>`
|
||||
const benchmarkTableBody = `<tbody>${tableRows.join('')}</tbody>`
|
||||
const benchmarkTable = `<table>${benchmarkTableHeader}${benchmarkTableBody}</table>`
|
||||
const benchmarkBody = `<details><summary>${benchmarkSummary}</summary>${benchmarkTable}</details>`
|
||||
commentBody = `${artifactsBody}${benchmarkBody}`
|
||||
} catch (error) {
|
||||
console.error(`Error constructing benchmark results: '${error}'`)
|
||||
commentBody = artifactsBody
|
||||
}
|
||||
}
|
||||
|
||||
const JSON_PAYLOAD = JSON.stringify({ body: commentBody })
|
||||
const POST_COMMENT_URI = `https://api.github.com/repos/metamask/metamask-extension/issues/${CIRCLE_PR_NUMBER}/comments`
|
||||
|
@ -7,7 +7,7 @@ const { By, Key } = require('selenium-webdriver')
|
||||
const { withFixtures } = require('./helpers')
|
||||
const { PAGES } = require('./webdriver/driver')
|
||||
|
||||
const DEFAULT_NUM_SAMPLES = 10
|
||||
const DEFAULT_NUM_SAMPLES = 20
|
||||
const ALL_PAGES = Object.values(PAGES)
|
||||
|
||||
async function measurePage (pageName) {
|
||||
@ -16,6 +16,7 @@ async function measurePage (pageName) {
|
||||
const passwordField = await driver.findElement(By.css('#password'))
|
||||
await passwordField.sendKeys('correct horse battery staple')
|
||||
await passwordField.sendKeys(Key.ENTER)
|
||||
await driver.findElement(By.css('.account-details__account-name'))
|
||||
await driver.navigate(pageName)
|
||||
await driver.delay(1000)
|
||||
metrics = await driver.collectMetrics()
|
||||
|
Loading…
Reference in New Issue
Block a user