2020-01-13 16:07:32 +01:00
|
|
|
const { promises: fs } = require('fs')
|
|
|
|
const { until, error: webdriverError } = require('selenium-webdriver')
|
|
|
|
const { strict: assert } = require('assert')
|
|
|
|
|
|
|
|
class Driver {
|
|
|
|
/**
|
|
|
|
* @param {!ThenableWebDriver} driver - A {@code WebDriver} instance
|
|
|
|
* @param {string} browser - The type of browser this driver is controlling
|
|
|
|
* @param {number} timeout
|
|
|
|
*/
|
2020-01-20 18:03:07 +01:00
|
|
|
constructor (driver, browser, extensionUrl, timeout = 10000) {
|
2020-01-13 16:07:32 +01:00
|
|
|
this.driver = driver
|
|
|
|
this.browser = browser
|
2020-01-20 18:03:07 +01:00
|
|
|
this.extensionUrl = extensionUrl
|
2020-01-13 16:07:32 +01:00
|
|
|
this.timeout = timeout
|
|
|
|
}
|
|
|
|
|
|
|
|
async delay (time) {
|
2020-02-15 21:34:12 +01:00
|
|
|
await new Promise((resolve) => setTimeout(resolve, time))
|
2020-01-13 16:07:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
async wait (condition, timeout = this.timeout) {
|
|
|
|
await this.driver.wait(condition, timeout)
|
|
|
|
}
|
|
|
|
|
|
|
|
async quit () {
|
|
|
|
await this.driver.quit()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Element interactions
|
|
|
|
|
|
|
|
async findElement (locator) {
|
|
|
|
return await this.driver.wait(until.elementLocated(locator), this.timeout)
|
|
|
|
}
|
|
|
|
|
|
|
|
async findVisibleElement (locator) {
|
|
|
|
const element = await this.findElement(locator)
|
|
|
|
await this.driver.wait(until.elementIsVisible(element), this.timeout)
|
|
|
|
return element
|
|
|
|
}
|
|
|
|
|
2020-01-15 20:34:15 +01:00
|
|
|
async findClickableElement (locator) {
|
|
|
|
const element = await this.findElement(locator)
|
|
|
|
await Promise.all([
|
|
|
|
this.driver.wait(until.elementIsVisible(element), this.timeout),
|
|
|
|
this.driver.wait(until.elementIsEnabled(element), this.timeout),
|
|
|
|
])
|
|
|
|
return element
|
|
|
|
}
|
|
|
|
|
|
|
|
async findElements (locator) {
|
|
|
|
return await this.driver.wait(until.elementsLocated(locator), this.timeout)
|
|
|
|
}
|
|
|
|
|
|
|
|
async findClickableElements (locator) {
|
|
|
|
const elements = await this.findElements(locator)
|
|
|
|
await Promise.all(elements
|
|
|
|
.reduce((acc, element) => {
|
|
|
|
acc.push(
|
|
|
|
this.driver.wait(until.elementIsVisible(element), this.timeout),
|
|
|
|
this.driver.wait(until.elementIsEnabled(element), this.timeout),
|
|
|
|
)
|
|
|
|
return acc
|
|
|
|
}, [])
|
|
|
|
)
|
|
|
|
return elements
|
2020-01-13 16:07:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
async clickElement (locator) {
|
2020-01-15 20:34:15 +01:00
|
|
|
const element = await this.findClickableElement(locator)
|
2020-01-13 16:07:32 +01:00
|
|
|
await element.click()
|
|
|
|
}
|
|
|
|
|
2020-05-27 17:31:53 +02:00
|
|
|
async clickPoint (locator, x, y) {
|
|
|
|
const element = await this.findElement(locator)
|
|
|
|
await this.driver
|
|
|
|
.actions()
|
|
|
|
.move({ origin: element, x, y })
|
|
|
|
.click()
|
|
|
|
.perform()
|
|
|
|
}
|
|
|
|
|
2020-01-13 16:07:32 +01:00
|
|
|
async scrollToElement (element) {
|
|
|
|
await this.driver.executeScript('arguments[0].scrollIntoView(true)', element)
|
|
|
|
}
|
|
|
|
|
|
|
|
async assertElementNotPresent (locator) {
|
|
|
|
let dataTab
|
|
|
|
try {
|
|
|
|
dataTab = await this.findElement(locator)
|
|
|
|
} catch (err) {
|
|
|
|
assert(err instanceof webdriverError.NoSuchElementError || err instanceof webdriverError.TimeoutError)
|
|
|
|
}
|
|
|
|
assert.ok(!dataTab, 'Found element that should not be present')
|
|
|
|
}
|
|
|
|
|
2020-01-20 18:03:07 +01:00
|
|
|
// Navigation
|
|
|
|
|
|
|
|
async navigate (page = Driver.PAGES.HOME) {
|
|
|
|
return await this.driver.get(`${this.extensionUrl}/${page}.html`)
|
|
|
|
}
|
|
|
|
|
Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
2020-01-21 17:02:45 +01:00
|
|
|
// Metrics
|
|
|
|
|
|
|
|
async collectMetrics () {
|
|
|
|
return await this.driver.executeScript(collectMetrics)
|
|
|
|
}
|
|
|
|
|
2020-01-13 16:07:32 +01:00
|
|
|
// Window management
|
|
|
|
|
|
|
|
async openNewPage (url) {
|
|
|
|
const newHandle = await this.driver.switchTo().newWindow()
|
|
|
|
await this.driver.get(url)
|
|
|
|
return newHandle
|
|
|
|
}
|
|
|
|
|
|
|
|
async switchToWindow (handle) {
|
|
|
|
await this.driver.switchTo().window(handle)
|
|
|
|
}
|
|
|
|
|
|
|
|
async getAllWindowHandles () {
|
|
|
|
return await this.driver.getAllWindowHandles()
|
|
|
|
}
|
|
|
|
|
|
|
|
async waitUntilXWindowHandles (x, delayStep = 1000, timeout = 5000) {
|
|
|
|
let timeElapsed = 0
|
|
|
|
while (timeElapsed <= timeout) {
|
|
|
|
const windowHandles = await this.driver.getAllWindowHandles()
|
|
|
|
if (windowHandles.length === x) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
await this.delay(delayStep)
|
|
|
|
timeElapsed += delayStep
|
|
|
|
}
|
|
|
|
throw new Error('waitUntilXWindowHandles timed out polling window handles')
|
|
|
|
}
|
|
|
|
|
|
|
|
async switchToWindowWithTitle (title, windowHandles) {
|
|
|
|
if (!windowHandles) {
|
|
|
|
windowHandles = await this.driver.getAllWindowHandles()
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const handle of windowHandles) {
|
|
|
|
await this.driver.switchTo().window(handle)
|
|
|
|
const handleTitle = await this.driver.getTitle()
|
|
|
|
if (handleTitle === title) {
|
|
|
|
return handle
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new Error('No window with title: ' + title)
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Closes all windows except those in the given list of exceptions
|
2020-01-13 19:36:36 +01:00
|
|
|
* @param {Array<string>} exceptions - The list of window handle exceptions
|
|
|
|
* @param {Array} [windowHandles] - The full list of window handles
|
2020-01-13 16:07:32 +01:00
|
|
|
* @returns {Promise<void>}
|
|
|
|
*/
|
|
|
|
async closeAllWindowHandlesExcept (exceptions, windowHandles) {
|
|
|
|
windowHandles = windowHandles || await this.driver.getAllWindowHandles()
|
|
|
|
|
|
|
|
for (const handle of windowHandles) {
|
|
|
|
if (!exceptions.includes(handle)) {
|
|
|
|
await this.driver.switchTo().window(handle)
|
|
|
|
await this.delay(1000)
|
|
|
|
await this.driver.close()
|
|
|
|
await this.delay(1000)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error handling
|
|
|
|
|
|
|
|
async verboseReportOnFailure (test) {
|
|
|
|
const artifactDir = `./test-artifacts/${this.browser}/${test.title}`
|
|
|
|
const filepathBase = `${artifactDir}/test-failure`
|
|
|
|
await fs.mkdir(artifactDir, { recursive: true })
|
|
|
|
const screenshot = await this.driver.takeScreenshot()
|
|
|
|
await fs.writeFile(`${filepathBase}-screenshot.png`, screenshot, { encoding: 'base64' })
|
|
|
|
const htmlSource = await this.driver.getPageSource()
|
|
|
|
await fs.writeFile(`${filepathBase}-dom.html`, htmlSource)
|
2020-04-30 18:30:24 +02:00
|
|
|
const uiState = await this.driver.executeScript(() => window.getCleanAppState())
|
|
|
|
await fs.writeFile(`${filepathBase}-state.json`, JSON.stringify(uiState, null, 2))
|
2020-01-13 16:07:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
async checkBrowserForConsoleErrors () {
|
|
|
|
const ignoredLogTypes = ['WARNING']
|
|
|
|
const ignoredErrorMessages = [
|
|
|
|
// Third-party Favicon 404s show up as errors
|
|
|
|
'favicon.ico - Failed to load resource: the server responded with a status of 404 (Not Found)',
|
|
|
|
]
|
|
|
|
const browserLogs = await this.driver.manage().logs().get('browser')
|
2020-02-15 21:34:12 +01:00
|
|
|
const errorEntries = browserLogs.filter((entry) => !ignoredLogTypes.includes(entry.level.toString()))
|
|
|
|
const errorObjects = errorEntries.map((entry) => entry.toJSON())
|
|
|
|
return errorObjects.filter((entry) => !ignoredErrorMessages.some((message) => entry.message.includes(message)))
|
2020-01-13 16:07:32 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
2020-01-21 17:02:45 +01:00
|
|
|
function collectMetrics () {
|
|
|
|
const results = {
|
|
|
|
paint: {},
|
|
|
|
navigation: [],
|
|
|
|
}
|
|
|
|
|
2020-04-15 19:23:27 +02:00
|
|
|
window.performance.getEntriesByType('paint').forEach((paintEntry) => {
|
Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
2020-01-21 17:02:45 +01:00
|
|
|
results.paint[paintEntry.name] = paintEntry.startTime
|
|
|
|
})
|
|
|
|
|
2020-04-15 19:23:27 +02:00
|
|
|
window.performance.getEntriesByType('navigation').forEach((navigationEntry) => {
|
Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
2020-01-21 17:02:45 +01:00
|
|
|
results.navigation.push({
|
|
|
|
domContentLoaded: navigationEntry.domContentLoadedEventEnd,
|
|
|
|
load: navigationEntry.loadEventEnd,
|
|
|
|
domInteractive: navigationEntry.domInteractive,
|
|
|
|
redirectCount: navigationEntry.redirectCount,
|
|
|
|
type: navigationEntry.type,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
2020-01-20 18:03:07 +01:00
|
|
|
Driver.PAGES = {
|
|
|
|
HOME: 'home',
|
|
|
|
NOTIFICATION: 'notification',
|
|
|
|
POPUP: 'popup',
|
|
|
|
}
|
|
|
|
|
2020-01-13 16:07:32 +01:00
|
|
|
module.exports = Driver
|