diff --git a/.eslintignore b/.eslintignore index 22ee61dbab63..b99210eb6e5b 100644 --- a/.eslintignore +++ b/.eslintignore @@ -19,5 +19,3 @@ third-party/** **/*.d.cts page-functions-test-case*out*.js -# TODO(15841): remove when importing Lantern from npm -core/lib/lantern/**/*.test.js diff --git a/core/audits/byte-efficiency/byte-efficiency-audit.js b/core/audits/byte-efficiency/byte-efficiency-audit.js index cb8193838638..76089481b1e2 100644 --- a/core/audits/byte-efficiency/byte-efficiency-audit.js +++ b/core/audits/byte-efficiency/byte-efficiency-audit.js @@ -14,9 +14,6 @@ import {LCPImageRecord} from '../../computed/lcp-image-record.js'; const str_ = i18n.createIcuMessageFn(import.meta.url, {}); -/** @typedef {import('../../lib/lantern/simulation/Simulator.js').Simulator} Simulator */ -/** @typedef {import('../../lib/lantern/BaseNode.js').Node} Node */ - // Parameters for log-normal distribution scoring. These values were determined by fitting the // log-normal cumulative distribution function curve to the former method of linear interpolation // scoring between the control points {average = 300 ms, poor = 750 ms, zero = 5000 ms} using the @@ -96,8 +93,8 @@ class ByteEfficiencyAudit extends Audit { * Computes the estimated effect of all the byte savings on the provided graph. * * @param {Array} results The array of byte savings results per resource - * @param {Node} graph - * @param {Simulator} simulator + * @param {LH.Gatherer.Simulation.GraphNode} graph + * @param {LH.Gatherer.Simulation.Simulator} simulator * @param {{label?: string, providedWastedBytesByUrl?: Map}=} options * @return {{savings: number, simulationBeforeChanges: LH.Gatherer.Simulation.Result, simulationAfterChanges: LH.Gatherer.Simulation.Result}} */ @@ -151,7 +148,7 @@ class ByteEfficiencyAudit extends Audit { /** * @param {ByteEfficiencyProduct} result - * @param {Simulator} simulator + * @param {LH.Gatherer.Simulation.Simulator} simulator * @param {LH.Artifacts.MetricComputationDataInput} metricComputationInput * @param {LH.Audit.Context} context * @return {Promise} diff --git a/core/audits/byte-efficiency/render-blocking-resources.js b/core/audits/byte-efficiency/render-blocking-resources.js index e8488ceebc93..c5e8d94eddee 100644 --- a/core/audits/byte-efficiency/render-blocking-resources.js +++ b/core/audits/byte-efficiency/render-blocking-resources.js @@ -8,10 +8,9 @@ * @fileoverview Audit a page to see if it does have resources that are blocking first paint */ - import {Audit} from '../audit.js'; import * as i18n from '../../lib/i18n/i18n.js'; -import {BaseNode} from '../../lib/lantern/lantern.js'; +import * as Lantern from '../../lib/lantern/lantern.js'; import {UnusedCSS} from '../../computed/unused-css.js'; import {NetworkRequest} from '../../lib/network-request.js'; import {LoadSimulator} from '../../computed/load-simulator.js'; @@ -19,11 +18,6 @@ import {FirstContentfulPaint} from '../../computed/metrics/first-contentful-pain import {LCPImageRecord} from '../../computed/lcp-image-record.js'; import {NavigationInsights} from '../../computed/navigation-insights.js'; - -/** @typedef {import('../../lib/lantern/simulation/Simulator.js').Simulator} Simulator */ -/** @typedef {import('../../lib/lantern/BaseNode.js').Node} Node */ -/** @typedef {import('../../lib/lantern/NetworkNode.js').NetworkNode} NetworkNode */ - // Because of the way we detect blocking stylesheets, asynchronously loaded // CSS with link[rel=preload] and an onload handler (see https://github.com/filamentgroup/loadCSS) // can be falsely flagged as blocking. Therefore, ignore stylesheets that loaded fast enough @@ -44,10 +38,10 @@ const str_ = i18n.createIcuMessageFn(import.meta.url, UIStrings); /** * Given a simulation's nodeTimings, return an object with the nodes/timing keyed by network URL * @param {LH.Gatherer.Simulation.Result['nodeTimings']} nodeTimings - * @return {Map} + * @return {Map} */ function getNodesAndTimingByRequestId(nodeTimings) { - /** @type {Map} */ + /** @type {Map} */ const requestIdToNode = new Map(); for (const [node, nodeTiming] of nodeTimings) { @@ -61,8 +55,8 @@ function getNodesAndTimingByRequestId(nodeTimings) { /** * Adjust the timing of a node and its dependencies to account for stack specific overrides. - * @param {Map} adjustedNodeTimings - * @param {Node} node + * @param {Map} adjustedNodeTimings + * @param {LH.Gatherer.Simulation.GraphNode} node * @param {LH.Artifacts.DetectedStack[]} Stacks */ function adjustNodeTimings(adjustedNodeTimings, node, Stacks) { @@ -83,7 +77,7 @@ function adjustNodeTimings(adjustedNodeTimings, node, Stacks) { * Any stack specific timing overrides should go in this function. * @see https://github.com/GoogleChrome/lighthouse/issues/2832#issuecomment-591066081 * - * @param {Node} node + * @param {LH.Gatherer.Simulation.GraphNode} node * @param {LH.Gatherer.Simulation.NodeTiming} nodeTiming * @param {LH.Artifacts.DetectedStack[]} Stacks */ @@ -93,7 +87,7 @@ function computeStackSpecificTiming(node, nodeTiming, Stacks) { // AMP will load a linked stylesheet asynchronously if it has not been loaded after 2.1 seconds: // https://github.com/ampproject/amphtml/blob/8e03ac2f315774070651584a7e046ff24212c9b1/src/font-stylesheet-timeout.js#L54-L59 // Any potential savings must only include time spent on AMP stylesheet nodes before 2.1 seconds. - if (node.type === BaseNode.TYPES.NETWORK && + if (node.type === Lantern.Graph.BaseNode.types.NETWORK && node.request.resourceType === NetworkRequest.TYPES.Stylesheet && nodeTiming.endTime > 2100) { stackSpecificTiming.endTime = Math.max(nodeTiming.startTime, 2100); @@ -208,8 +202,8 @@ class RenderBlockingResources extends Audit { * devs that they should be able to get to a reasonable first paint without JS, which is not a bad * thing. * - * @param {Simulator} simulator - * @param {Node} fcpGraph + * @param {LH.Gatherer.Simulation.Simulator} simulator + * @param {LH.Gatherer.Simulation.GraphNode} fcpGraph * @param {Set} deferredIds * @param {Map} wastedCssBytesByUrl * @param {LH.Artifacts.DetectedStack[]} Stacks @@ -225,7 +219,7 @@ class RenderBlockingResources extends Audit { // If a node can be deferred, exclude it from the new FCP graph const canDeferRequest = deferredIds.has(node.id); - if (node.type !== BaseNode.TYPES.NETWORK) return !canDeferRequest; + if (node.type !== Lantern.Graph.BaseNode.types.NETWORK) return !canDeferRequest; const isStylesheet = node.request.resourceType === NetworkRequest.TYPES.Stylesheet; diff --git a/core/audits/dobetterweb/uses-http2.js b/core/audits/dobetterweb/uses-http2.js index a399213dcfbd..01e28a0cb429 100644 --- a/core/audits/dobetterweb/uses-http2.js +++ b/core/audits/dobetterweb/uses-http2.js @@ -9,9 +9,6 @@ * origin are over the http/2 protocol. */ -/** @typedef {import('../../lib/lantern/simulation/Simulator.js').Simulator} Simulator */ -/** @typedef {import('../../lib/lantern/BaseNode.js').Node} Node */ - import {Audit} from '../audit.js'; import {EntityClassification} from '../../computed/entity-classification.js'; import UrlUtils from '../../lib/url-utils.js'; @@ -69,8 +66,8 @@ class UsesHTTP2Audit extends Audit { * Computes the estimated effect of all results being converted to http/2 on the provided graph. * * @param {Array<{url: string}>} results - * @param {Node} graph - * @param {Simulator} simulator + * @param {LH.Gatherer.Simulation.GraphNode} graph + * @param {LH.Gatherer.Simulation.Simulator} simulator * @param {{label?: string}=} options * @return {{savings: number, simulationBefore: LH.Gatherer.Simulation.Result, simulationAfter: LH.Gatherer.Simulation.Result}} */ diff --git a/core/audits/long-tasks.js b/core/audits/long-tasks.js index 320bdb93943d..66acd3ed64d8 100644 --- a/core/audits/long-tasks.js +++ b/core/audits/long-tasks.js @@ -86,7 +86,7 @@ class LongTasks extends Audit { * most time will be attributed to 'other' (the category of the top-level * RunTask). See pruning in `PageDependencyGraph.linkCPUNodes`. * @param {LH.Artifacts.TaskNode} task - * @param {Map|undefined} taskTimingsByEvent + * @param {Map|undefined} taskTimingsByEvent * @param {Map} [timeByTaskGroup] * @return {{startTime: number, duration: number, timeByTaskGroup: Map}} */ @@ -117,7 +117,7 @@ class LongTasks extends Audit { /** * @param {Array} longTasks * @param {Set} jsUrls - * @param {Map|undefined} taskTimingsByEvent + * @param {Map|undefined} taskTimingsByEvent * @return {LH.Audit.Details.DebugData} */ static makeDebugData(longTasks, jsUrls, taskTimingsByEvent) { @@ -155,7 +155,7 @@ class LongTasks extends Audit { /** * Get timing from task, overridden by taskTimingsByEvent if provided. * @param {LH.Artifacts.TaskNode} task - * @param {Map|undefined} taskTimingsByEvent + * @param {Map|undefined} taskTimingsByEvent * @return {Timing} */ static getTiming(task, taskTimingsByEvent) { @@ -185,7 +185,7 @@ class LongTasks extends Audit { const metricComputationData = Audit.makeMetricComputationDataInput(artifacts, context); const tbtResult = await TotalBlockingTime.request(metricComputationData, context); - /** @type {Map|undefined} */ + /** @type {Map|undefined} */ let taskTimingsByEvent; if (settings.throttlingMethod === 'simulate') { diff --git a/core/audits/uses-rel-preload.js b/core/audits/uses-rel-preload.js index c74dd1b20269..8c9981f3906b 100644 --- a/core/audits/uses-rel-preload.js +++ b/core/audits/uses-rel-preload.js @@ -4,6 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ +import * as Lantern from '../lib/lantern/lantern.js'; import UrlUtils from '../lib/url-utils.js'; import {NetworkRequest} from '../lib/network-request.js'; import {Audit} from './audit.js'; @@ -61,8 +62,8 @@ class UsesRelPreloadAudit extends Audit { if (node.type !== 'network') return; // Don't include the node itself or any CPU nodes in the initiatorPath const path = traversalPath.slice(1).filter(initiator => initiator.type === 'network'); - if (!UsesRelPreloadAudit.shouldPreloadRequest(node.rawRequest, mainResource, path)) return; - urls.add(node.rawRequest.url); + if (!UsesRelPreloadAudit.shouldPreloadRequest(node.request, mainResource, path)) return; + urls.add(node.request.url); }); return urls; @@ -75,6 +76,7 @@ class UsesRelPreloadAudit extends Audit { * @return {Set} */ static getURLsFailedToPreload(graph) { + // TODO: add `fromPrefetchCache` to Lantern.Types.NetworkRequest, then use node.request here instead of rawRequest. /** @type {Array} */ const requests = []; graph.traverse(node => node.type === 'network' && requests.push(node.rawRequest)); @@ -109,8 +111,8 @@ class UsesRelPreloadAudit extends Audit { * Critical requests deeper than depth 2 are more likely to be a case-by-case basis such that it * would be a little risky to recommend blindly. * - * @param {LH.Artifacts.NetworkRequest} request - * @param {LH.Artifacts.NetworkRequest} mainResource + * @param {Lantern.Types.NetworkRequest} request + * @param {Lantern.Types.NetworkRequest} mainResource * @param {Array} initiatorPath * @return {boolean} */ @@ -157,7 +159,7 @@ class UsesRelPreloadAudit extends Audit { if (node.isMainDocument()) { mainDocumentNode = node; - } else if (node.rawRequest && urls.has(node.rawRequest.url)) { + } else if (node.request && urls.has(node.request.url)) { nodesToPreload.push(node); } }); @@ -189,7 +191,7 @@ class UsesRelPreloadAudit extends Audit { const wastedMs = Math.round(timingBefore.endTime - timingAfter.endTime); if (wastedMs < THRESHOLD_IN_MS) continue; - results.push({url: node.rawRequest.url, wastedMs}); + results.push({url: node.request.url, wastedMs}); } if (!results.length) { diff --git a/core/computed/critical-request-chains.js b/core/computed/critical-request-chains.js index 9fe8b7755a09..519a67883a82 100644 --- a/core/computed/critical-request-chains.js +++ b/core/computed/critical-request-chains.js @@ -4,6 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ +import * as Lantern from '../lib/lantern/lantern.js'; import {makeComputedArtifact} from './computed-artifact.js'; import {NetworkRequest} from '../lib/network-request.js'; import {MainResource} from './main-resource.js'; @@ -14,8 +15,8 @@ class CriticalRequestChains { * For now, we use network priorities as a proxy for "render-blocking"/critical-ness. * It's imperfect, but there is not a higher-fidelity signal available yet. * @see https://docs.google.com/document/d/1bCDuq9H1ih9iNjgzyAL0gpwNFiEP4TZS-YLRp_RuMlc - * @param {LH.Artifacts.NetworkRequest} request - * @param {LH.Artifacts.NetworkRequest} mainResource + * @param {Lantern.Types.NetworkRequest} request + * @param {Lantern.Types.NetworkRequest} mainResource * @return {boolean} */ static isCritical(request, mainResource) { @@ -105,7 +106,7 @@ class CriticalRequestChains { graph.traverse((node, traversalPath) => { seenNodes.add(node); if (node.type !== 'network') return; - if (!CriticalRequestChains.isCritical(node.rawRequest, mainResource)) return; + if (!CriticalRequestChains.isCritical(node.request, mainResource)) return; const networkPath = traversalPath .filter(/** @return {n is LH.Gatherer.Simulation.GraphNetworkNode} */ @@ -117,7 +118,7 @@ class CriticalRequestChains { if (networkPath.some(r => !CriticalRequestChains.isCritical(r, mainResource))) return; // Ignore non-network things (like data urls). - if (NetworkRequest.isNonNetworkRequest(node.rawRequest)) return; + if (NetworkRequest.isNonNetworkRequest(node.request)) return; addChain(networkPath); }, getNextNodes); diff --git a/core/computed/document-urls.js b/core/computed/document-urls.js index bfde1a295206..ebdc5ae49ae8 100644 --- a/core/computed/document-urls.js +++ b/core/computed/document-urls.js @@ -42,7 +42,7 @@ class DocumentUrls { if (!requestedUrl || !mainDocumentUrl) throw new Error('No main frame navigations found'); const initialRequest = - Lantern.Simulation.NetworkAnalyzer.findResourceForUrl(networkRecords, requestedUrl); + Lantern.Core.NetworkAnalyzer.findResourceForUrl(networkRecords, requestedUrl); if (initialRequest?.redirects?.length) requestedUrl = initialRequest.redirects[0].url; return {requestedUrl, mainDocumentUrl}; diff --git a/core/computed/load-simulator.js b/core/computed/load-simulator.js index ff969206ce67..a4a55aaf40d8 100644 --- a/core/computed/load-simulator.js +++ b/core/computed/load-simulator.js @@ -12,7 +12,7 @@ class LoadSimulator { /** * @param {{devtoolsLog: LH.DevtoolsLog, settings: LH.Audit.Context['settings']}} data * @param {LH.Artifacts.ComputedContext} context - * @return {Promise} + * @return {Promise} */ static async compute_(data, context) { const networkAnalysis = await NetworkAnalysis.request(data.devtoolsLog, context); diff --git a/core/computed/main-resource.js b/core/computed/main-resource.js index 0c592e81a7a7..93f4f3be4c38 100644 --- a/core/computed/main-resource.js +++ b/core/computed/main-resource.js @@ -29,7 +29,7 @@ class MainResource { // would have evicted the first request by the time `MainDocumentRequest` (a consumer // of this computed artifact) attempts to fetch the contents, resulting in a protocol error. const mainResource = - Lantern.Simulation.NetworkAnalyzer.findLastDocumentForUrl(records, mainDocumentUrl); + Lantern.Core.NetworkAnalyzer.findLastDocumentForUrl(records, mainDocumentUrl); if (!mainResource) { throw new Error('Unable to identify the main resource'); } diff --git a/core/computed/metrics/lantern-first-contentful-paint.js b/core/computed/metrics/lantern-first-contentful-paint.js index a50947ed055c..f3b441c2183e 100644 --- a/core/computed/metrics/lantern-first-contentful-paint.js +++ b/core/computed/metrics/lantern-first-contentful-paint.js @@ -8,13 +8,11 @@ import * as Lantern from '../../lib/lantern/lantern.js'; import {makeComputedArtifact} from '../computed-artifact.js'; import {getComputationDataParams, lanternErrorAdapter} from './lantern-metric.js'; -/** @typedef {import('../../lib/lantern/Metric.js').Extras} Extras */ - class LanternFirstContentfulPaint extends Lantern.Metrics.FirstContentfulPaint { /** * @param {LH.Artifacts.MetricComputationDataInput} data * @param {LH.Artifacts.ComputedContext} context - * @param {Omit=} extras + * @param {Omit=} extras * @return {Promise} */ static async computeMetricWithGraphs(data, context, extras) { diff --git a/core/computed/metrics/lantern-interactive.js b/core/computed/metrics/lantern-interactive.js index e6f6f342aa8e..51abaa9edbc0 100644 --- a/core/computed/metrics/lantern-interactive.js +++ b/core/computed/metrics/lantern-interactive.js @@ -9,13 +9,11 @@ import {makeComputedArtifact} from '../computed-artifact.js'; import {LanternLargestContentfulPaint} from './lantern-largest-contentful-paint.js'; import {getComputationDataParams, lanternErrorAdapter} from './lantern-metric.js'; -/** @typedef {import('../../lib/lantern/Metric.js').Extras} Extras */ - class LanternInteractive extends Lantern.Metrics.Interactive { /** * @param {LH.Artifacts.MetricComputationDataInput} data * @param {LH.Artifacts.ComputedContext} context - * @param {Omit=} extras + * @param {Omit=} extras * @return {Promise} */ static async computeMetricWithGraphs(data, context, extras) { diff --git a/core/computed/metrics/lantern-largest-contentful-paint.js b/core/computed/metrics/lantern-largest-contentful-paint.js index dc34f1a22581..77c26c2e3b35 100644 --- a/core/computed/metrics/lantern-largest-contentful-paint.js +++ b/core/computed/metrics/lantern-largest-contentful-paint.js @@ -9,13 +9,11 @@ import {makeComputedArtifact} from '../computed-artifact.js'; import {getComputationDataParams, lanternErrorAdapter} from './lantern-metric.js'; import {LanternFirstContentfulPaint} from './lantern-first-contentful-paint.js'; -/** @typedef {import('../../lib/lantern/Metric.js').Extras} Extras */ - class LanternLargestContentfulPaint extends Lantern.Metrics.LargestContentfulPaint { /** * @param {LH.Artifacts.MetricComputationDataInput} data * @param {LH.Artifacts.ComputedContext} context - * @param {Omit=} extras + * @param {Omit=} extras * @return {Promise} */ static async computeMetricWithGraphs(data, context, extras) { diff --git a/core/computed/metrics/lantern-max-potential-fid.js b/core/computed/metrics/lantern-max-potential-fid.js index d883023d2bd1..3d9580420406 100644 --- a/core/computed/metrics/lantern-max-potential-fid.js +++ b/core/computed/metrics/lantern-max-potential-fid.js @@ -9,13 +9,11 @@ import {makeComputedArtifact} from '../computed-artifact.js'; import {getComputationDataParams, lanternErrorAdapter} from './lantern-metric.js'; import {LanternFirstContentfulPaint} from './lantern-first-contentful-paint.js'; -/** @typedef {import('../../lib/lantern/Metric.js').Extras} Extras */ - class LanternMaxPotentialFID extends Lantern.Metrics.MaxPotentialFID { /** * @param {LH.Artifacts.MetricComputationDataInput} data * @param {LH.Artifacts.ComputedContext} context - * @param {Omit=} extras + * @param {Omit=} extras * @return {Promise} */ static async computeMetricWithGraphs(data, context, extras) { diff --git a/core/computed/metrics/lantern-metric.js b/core/computed/metrics/lantern-metric.js index 76a13693b364..f5af8a37182b 100644 --- a/core/computed/metrics/lantern-metric.js +++ b/core/computed/metrics/lantern-metric.js @@ -50,7 +50,7 @@ async function getComputationDataParamsFromTrace(data, context) { * @return {never} */ function lanternErrorAdapter(err) { - if (!(err instanceof Lantern.Error)) { + if (!(err instanceof Lantern.Core.LanternError)) { throw err; } diff --git a/core/computed/metrics/lantern-speed-index.js b/core/computed/metrics/lantern-speed-index.js index 94241716b9a0..9197fb93cd6c 100644 --- a/core/computed/metrics/lantern-speed-index.js +++ b/core/computed/metrics/lantern-speed-index.js @@ -10,13 +10,11 @@ import {getComputationDataParams, lanternErrorAdapter} from './lantern-metric.js import {Speedline} from '../speedline.js'; import {LanternFirstContentfulPaint} from './lantern-first-contentful-paint.js'; -/** @typedef {import('../../lib/lantern/Metric.js').Extras} Extras */ - class LanternSpeedIndex extends Lantern.Metrics.SpeedIndex { /** * @param {LH.Artifacts.MetricComputationDataInput} data * @param {LH.Artifacts.ComputedContext} context - * @param {Omit=} extras + * @param {Omit=} extras * @return {Promise} */ static async computeMetricWithGraphs(data, context, extras) { diff --git a/core/computed/metrics/lantern-total-blocking-time.js b/core/computed/metrics/lantern-total-blocking-time.js index 368d6a40ac74..960f5d502701 100644 --- a/core/computed/metrics/lantern-total-blocking-time.js +++ b/core/computed/metrics/lantern-total-blocking-time.js @@ -10,13 +10,11 @@ import {LanternFirstContentfulPaint} from './lantern-first-contentful-paint.js'; import {LanternInteractive} from './lantern-interactive.js'; import {getComputationDataParams} from './lantern-metric.js'; -/** @typedef {import('../../lib/lantern/Metric.js').Extras} Extras */ - class LanternTotalBlockingTime extends Lantern.Metrics.TotalBlockingTime { /** * @param {LH.Artifacts.MetricComputationDataInput} data * @param {LH.Artifacts.ComputedContext} context - * @param {Omit=} extras + * @param {Omit=} extras * @return {Promise} */ static async computeMetricWithGraphs(data, context, extras) { diff --git a/core/computed/metrics/total-blocking-time.js b/core/computed/metrics/total-blocking-time.js index 77a21ed43aad..63babc977da7 100644 --- a/core/computed/metrics/total-blocking-time.js +++ b/core/computed/metrics/total-blocking-time.js @@ -11,7 +11,7 @@ import {TraceProcessor} from '../../lib/tracehouse/trace-processor.js'; import {LanternTotalBlockingTime} from './lantern-total-blocking-time.js'; import {Interactive} from './interactive.js'; -const {calculateSumOfBlockingTime} = Lantern.TBTUtils; +const {calculateSumOfBlockingTime} = Lantern.Metrics.TBTUtils; /** * @fileoverview This audit determines Total Blocking Time. diff --git a/core/computed/network-analysis.js b/core/computed/network-analysis.js index a486c7b715cb..78c904ae13ab 100644 --- a/core/computed/network-analysis.js +++ b/core/computed/network-analysis.js @@ -16,7 +16,7 @@ class NetworkAnalysis { */ static async compute_(devtoolsLog, context) { const records = await NetworkRecords.request(devtoolsLog, context); - return Lantern.Simulation.NetworkAnalyzer.analyze(records); + return Lantern.Core.NetworkAnalyzer.analyze(records); } } diff --git a/core/computed/page-dependency-graph.js b/core/computed/page-dependency-graph.js index 061b3354921e..3780b3f8fbf1 100644 --- a/core/computed/page-dependency-graph.js +++ b/core/computed/page-dependency-graph.js @@ -11,13 +11,11 @@ import {ProcessedTrace} from './processed-trace.js'; import {NetworkRecords} from './network-records.js'; import {TraceEngineResult} from './trace-engine-result.js'; -/** @typedef {import('../lib/lantern/BaseNode.js').Node} Node */ - class PageDependencyGraph { /** * @param {{trace: LH.Trace, devtoolsLog: LH.DevtoolsLog, URL: LH.Artifacts['URL'], fromTrace?: boolean}} data * @param {LH.Artifacts.ComputedContext} context - * @return {Promise} + * @return {Promise} */ static async compute_(data, context) { const {trace, devtoolsLog, URL} = data; @@ -33,11 +31,12 @@ class PageDependencyGraph { Lantern.TraceEngineComputationData.createNetworkRequests(trace, traceEngineData); const graph = Lantern.TraceEngineComputationData.createGraph(requests, trace, traceEngineData, URL); + // @ts-expect-error for now, ignore that this is a SyntheticNetworkEvent instead of LH's NetworkEvent. return graph; } const lanternRequests = networkRecords.map(NetworkRequest.asLanternNetworkRequest); - return Lantern.PageDependencyGraph.createGraph(mainThreadEvents, lanternRequests, URL); + return Lantern.Graph.PageDependencyGraph.createGraph(mainThreadEvents, lanternRequests, URL); } } diff --git a/core/computed/tbt-impact-tasks.js b/core/computed/tbt-impact-tasks.js index 61f0040acfac..1a8097cff1f7 100644 --- a/core/computed/tbt-impact-tasks.js +++ b/core/computed/tbt-impact-tasks.js @@ -12,7 +12,7 @@ import {Interactive} from './metrics/interactive.js'; import {TotalBlockingTime} from './metrics/total-blocking-time.js'; import {ProcessedTrace} from './processed-trace.js'; -const {calculateTbtImpactForEvent} = Lantern.TBTUtils; +const {calculateTbtImpactForEvent} = Lantern.Metrics.TBTUtils; class TBTImpactTasks { /** @@ -134,7 +134,7 @@ class TBTImpactTasks { /** @type {Map} */ const topLevelTaskToEvent = new Map(); - /** @type {Map} */ + /** @type {Map} */ const traceEventToTask = new Map(); for (const task of tasks) { traceEventToTask.set(task.event, task); diff --git a/core/lib/asset-saver.js b/core/lib/asset-saver.js index 55512b49bc05..89322bb6701b 100644 --- a/core/lib/asset-saver.js +++ b/core/lib/asset-saver.js @@ -449,7 +449,7 @@ function saveDevtoolsLog(devtoolsLog, devtoolLogFilename, options = {}) { async function saveLanternDebugTraces(pathWithBasename) { if (!process.env.LANTERN_DEBUG) return; - for (const [label, nodeTimings] of Lantern.Simulation.Simulator.ALL_NODE_TIMINGS) { + for (const [label, nodeTimings] of Lantern.Simulation.Simulator.allNodeTimings) { if (lanternTraceSaver.simulationNamesToIgnore.includes(label)) continue; const traceFilename = `${pathWithBasename}-${label}${traceSuffix}`; diff --git a/core/lib/lantern-trace-saver.js b/core/lib/lantern-trace-saver.js index a9a10cdd3e5f..84544c9e27bd 100644 --- a/core/lib/lantern-trace-saver.js +++ b/core/lib/lantern-trace-saver.js @@ -4,11 +4,10 @@ * SPDX-License-Identifier: Apache-2.0 */ -/** @typedef {import('./lantern/BaseNode.js').Node} Node */ -/** @typedef {import('./lantern/simulation/Simulator.js').CompleteNodeTiming} CompleteNodeTiming */ +/** @typedef {import('./lantern/lantern.js').Simulation.CompleteNodeTiming} CompleteNodeTiming */ /** - * @param {Map} nodeTimings + * @param {Map} nodeTimings * @return {LH.Trace} */ function convertNodeTimingsToTrace(nodeTimings) { diff --git a/core/lib/lantern/BaseNode.js b/core/lib/lantern/BaseNode.js deleted file mode 100644 index cecd085c0ff2..000000000000 --- a/core/lib/lantern/BaseNode.js +++ /dev/null @@ -1,369 +0,0 @@ -/** - * @license - * Copyright 2017 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -/** - * A union of all types derived from BaseNode, allowing type check discrimination - * based on `node.type`. If a new node type is created, it should be added here. - * @template [T=any] - * @typedef {import('./CpuNode.js').CPUNode | import('./NetworkNode.js').NetworkNode} Node - */ - -/** - * @fileoverview This class encapsulates logic for handling resources and tasks used to model the - * execution dependency graph of the page. A node has a unique identifier and can depend on other - * nodes/be depended on. The construction of the graph maintains some important invariants that are - * inherent to the model: - * - * 1. The graph is a DAG, there are no cycles. - * 2. There is always a root node upon which all other nodes eventually depend. - * - * This allows particular optimizations in this class so that we do no need to check for cycles as - * these methods are called and we can always start traversal at the root node. - */ - -/** - * @template [T=any] - */ -class BaseNode { - static TYPES = /** @type {{NETWORK: 'network', CPU: 'cpu'}} */({ - NETWORK: 'network', - CPU: 'cpu', - }); - - /** - * @param {string} id - */ - constructor(id) { - this._id = id; - this._isMainDocument = false; - /** @type {Node[]} */ - this._dependents = []; - /** @type {Node[]} */ - this._dependencies = []; - } - - /** - * @return {string} - */ - get id() { - return this._id; - } - - /** - * @return {typeof BaseNode.TYPES[keyof typeof BaseNode.TYPES]} - */ - get type() { - throw new Error('Unimplemented'); - } - - /** - * In microseconds - * @return {number} - */ - get startTime() { - throw new Error('Unimplemented'); - } - - /** - * In microseconds - * @return {number} - */ - get endTime() { - throw new Error('Unimplemented'); - } - - /** - * @param {boolean} value - */ - setIsMainDocument(value) { - this._isMainDocument = value; - } - - /** - * @return {boolean} - */ - isMainDocument() { - return this._isMainDocument; - } - - /** - * @return {Node[]} - */ - getDependents() { - return this._dependents.slice(); - } - - /** - * @return {number} - */ - getNumberOfDependents() { - return this._dependents.length; - } - - /** - * @return {Node[]} - */ - getDependencies() { - return this._dependencies.slice(); - } - - /** - * @return {number} - */ - getNumberOfDependencies() { - return this._dependencies.length; - } - - /** - * @return {Node} - */ - getRootNode() { - let rootNode = /** @type {Node} */ (/** @type {BaseNode} */ (this)); - while (rootNode._dependencies.length) { - rootNode = rootNode._dependencies[0]; - } - - return rootNode; - } - - /** - * @param {Node} node - */ - addDependent(node) { - node.addDependency(/** @type {Node} */ (/** @type {BaseNode} */ (this))); - } - - /** - * @param {Node} node - */ - addDependency(node) { - // @ts-expect-error - in checkJs, ts doesn't know that CPUNode and NetworkNode *are* BaseNodes. - if (node === this) throw new Error('Cannot add dependency on itself'); - - if (this._dependencies.includes(node)) { - return; - } - - node._dependents.push(/** @type {Node} */ (/** @type {BaseNode} */ (this))); - this._dependencies.push(node); - } - - /** - * @param {Node} node - */ - removeDependent(node) { - node.removeDependency(/** @type {Node} */ (/** @type {BaseNode} */ (this))); - } - - /** - * @param {Node} node - */ - removeDependency(node) { - if (!this._dependencies.includes(node)) { - return; - } - - const thisIndex = node._dependents.indexOf(/** @type {Node} */ (/** @type {BaseNode} */(this))); - node._dependents.splice(thisIndex, 1); - this._dependencies.splice(this._dependencies.indexOf(node), 1); - } - - removeAllDependencies() { - for (const node of this._dependencies.slice()) { - this.removeDependency(node); - } - } - - /** - * Computes whether the given node is anywhere in the dependency graph of this node. - * While this method can prevent cycles, it walks the graph and should be used sparingly. - * Nodes are always considered dependent on themselves for the purposes of cycle detection. - * @param {BaseNode} node - * @return {boolean} - */ - isDependentOn(node) { - let isDependentOnNode = false; - this.traverse(currentNode => { - if (isDependentOnNode) return; - isDependentOnNode = currentNode === node; - }, currentNode => { - // If we've already found the dependency, don't traverse further. - if (isDependentOnNode) return []; - // Otherwise, traverse the dependencies. - return currentNode.getDependencies(); - }); - - return isDependentOnNode; - } - - /** - * Clones the node's information without adding any dependencies/dependents. - * @return {Node} - */ - cloneWithoutRelationships() { - const node = /** @type {Node} */ (new BaseNode(this.id)); - node.setIsMainDocument(this._isMainDocument); - return node; - } - - /** - * Clones the entire graph connected to this node filtered by the optional predicate. If a node is - * included by the predicate, all nodes along the paths between the node and the root will be included. If the - * node this was called on is not included in the resulting filtered graph, the method will throw. - * @param {function(Node):boolean} [predicate] - * @return {Node} - */ - cloneWithRelationships(predicate) { - const rootNode = this.getRootNode(); - - /** @type {Map} */ - const idsToIncludedClones = new Map(); - - // Walk down dependents. - rootNode.traverse(node => { - if (idsToIncludedClones.has(node.id)) return; - - if (predicate === undefined) { - // No condition for entry, so clone every node. - idsToIncludedClones.set(node.id, node.cloneWithoutRelationships()); - return; - } - - if (predicate(node)) { - // Node included, so walk back up dependencies, cloning nodes from here back to the root. - node.traverse( - node => idsToIncludedClones.set(node.id, node.cloneWithoutRelationships()), - // Dependencies already cloned have already cloned ancestors, so no need to visit again. - node => node._dependencies.filter(parent => !idsToIncludedClones.has(parent.id)) - ); - } - }); - - // Copy dependencies between nodes. - rootNode.traverse(originalNode => { - const clonedNode = idsToIncludedClones.get(originalNode.id); - if (!clonedNode) return; - - for (const dependency of originalNode._dependencies) { - const clonedDependency = idsToIncludedClones.get(dependency.id); - if (!clonedDependency) throw new Error('Dependency somehow not cloned'); - clonedNode.addDependency(clonedDependency); - } - }); - - const clonedThisNode = idsToIncludedClones.get(this.id); - if (!clonedThisNode) throw new Error('Cloned graph missing node'); - return clonedThisNode; - } - - /** - * Traverses all connected nodes in BFS order, calling `callback` exactly once - * on each. `traversalPath` is the shortest (though not necessarily unique) - * path from `node` to the root of the iteration. - * - * The `getNextNodes` function takes a visited node and returns which nodes to - * visit next. It defaults to returning the node's dependents. - * @param {(node: Node, traversalPath: Node[]) => void} callback - * @param {function(Node): Node[]} [getNextNodes] - */ - traverse(callback, getNextNodes) { - for (const {node, traversalPath} of this.traverseGenerator(getNextNodes)) { - callback(node, traversalPath); - } - } - - /** - * @see BaseNode.traverse - * @param {function(Node): Node[]} [getNextNodes] - */ - * traverseGenerator(getNextNodes) { - if (!getNextNodes) { - getNextNodes = node => node.getDependents(); - } - - /** @type {Node[][]} */ - // @ts-expect-error - only traverses graphs of Node, so force tsc to treat `this` as one - const queue = [[this]]; - const visited = new Set([this.id]); - - while (queue.length) { - /** @type {Node[]} */ - // @ts-expect-error - queue has length so it's guaranteed to have an item - const traversalPath = queue.shift(); - const node = traversalPath[0]; - yield {node, traversalPath}; - - for (const nextNode of getNextNodes(node)) { - if (visited.has(nextNode.id)) continue; - visited.add(nextNode.id); - - queue.push([nextNode, ...traversalPath]); - } - } - } - - /** - * Returns whether the given node has a cycle in its dependent graph by performing a DFS. - * @param {Node} node - * @param {'dependents'|'dependencies'|'both'} [direction] - * @return {boolean} - */ - static hasCycle(node, direction = 'both') { - // Checking 'both' is the default entrypoint to recursively check both directions - if (direction === 'both') { - return BaseNode.hasCycle(node, 'dependents') || BaseNode.hasCycle(node, 'dependencies'); - } - - const visited = new Set(); - /** @type {Node[]} */ - const currentPath = []; - const toVisit = [node]; - const depthAdded = new Map([[node, 0]]); - - // Keep going while we have nodes to visit in the stack - while (toVisit.length) { - // Get the last node in the stack (DFS uses stack, not queue) - /** @type {Node} */ - // @ts-expect-error - toVisit has length so it's guaranteed to have an item - const currentNode = toVisit.pop(); - - // We've hit a cycle if the node we're visiting is in our current dependency path - if (currentPath.includes(currentNode)) return true; - // If we've already visited the node, no need to revisit it - if (visited.has(currentNode)) continue; - - // Since we're visiting this node, clear out any nodes in our path that we had to backtrack - // @ts-expect-error - while (currentPath.length > depthAdded.get(currentNode)) currentPath.pop(); - - // Update our data structures to reflect that we're adding this node to our path - visited.add(currentNode); - currentPath.push(currentNode); - - // Add all of its dependents to our toVisit stack - const nodesToExplore = direction === 'dependents' ? - currentNode._dependents : - currentNode._dependencies; - for (const nextNode of nodesToExplore) { - if (toVisit.includes(nextNode)) continue; - toVisit.push(nextNode); - depthAdded.set(nextNode, currentPath.length); - } - } - - return false; - } - - /** - * @param {Node} node - * @return {boolean} - */ - canDependOn(node) { - return node.startTime <= this.startTime; - } -} - -export {BaseNode}; diff --git a/core/lib/lantern/BaseNode.test.js b/core/lib/lantern/BaseNode.test.js deleted file mode 100644 index 9f3bf1950b65..000000000000 --- a/core/lib/lantern/BaseNode.test.js +++ /dev/null @@ -1,385 +0,0 @@ -/** - * @license - * Copyright 2017 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import assert from 'assert/strict'; - -import * as Lantern from './lantern.js'; - -const {BaseNode, NetworkNode} = Lantern; - -function sortedById(nodeArray) { - return nodeArray.sort((node1, node2) => node1.id.localeCompare(node2.id)); -} - -function createComplexGraph() { - // B F - // / \ / - // A D - E - // \ / \ - // C G - H - - const nodeA = new BaseNode('A'); - const nodeB = new BaseNode('B'); - const nodeC = new BaseNode('C'); - const nodeD = new BaseNode('D'); - const nodeE = new BaseNode('E'); - const nodeF = new BaseNode('F'); - const nodeG = new BaseNode('G'); - const nodeH = new BaseNode('H'); - - nodeA.addDependent(nodeB); - nodeA.addDependent(nodeC); - nodeB.addDependent(nodeD); - nodeC.addDependent(nodeD); - nodeD.addDependent(nodeE); - nodeE.addDependent(nodeF); - nodeE.addDependent(nodeG); - nodeG.addDependent(nodeH); - - return { - nodeA, - nodeB, - nodeC, - nodeD, - nodeE, - nodeF, - nodeG, - nodeH, - }; -} -describe('DependencyGraph/Node', () => { - describe('#constructor', () => { - it('should set the ID', () => { - const node = new BaseNode('foo'); - assert.equal(node.id, 'foo'); - }); - }); - - describe('.addDependent', () => { - it('should add the correct edge', () => { - const nodeA = new BaseNode(1); - const nodeB = new BaseNode(2); - nodeA.addDependent(nodeB); - - assert.deepEqual(nodeA.getDependents(), [nodeB]); - assert.deepEqual(nodeB.getDependencies(), [nodeA]); - }); - }); - - describe('.addDependency', () => { - it('should add the correct edge', () => { - const nodeA = new BaseNode(1); - const nodeB = new BaseNode(2); - nodeA.addDependency(nodeB); - - assert.deepEqual(nodeA.getDependencies(), [nodeB]); - assert.deepEqual(nodeB.getDependents(), [nodeA]); - }); - - it('throw when trying to add a dependency on itself', () => { - const nodeA = new BaseNode(1); - expect(() => nodeA.addDependency(nodeA)).toThrow(); - }); - }); - - describe('.isDependentOn', () => { - it('should identify the dependency relationships', () => { - const graph = createComplexGraph(); - const nodes = Object.values(graph); - const {nodeA, nodeB, nodeD, nodeF, nodeH} = graph; - - for (const node of nodes) { - expect(nodeA.isDependentOn(node)).toBe(node === nodeA); - expect(nodeB.isDependentOn(node)).toBe(node === nodeA || node === nodeB); - expect(nodeH.isDependentOn(node)).toBe(node !== nodeF); - } - - expect(nodeD.isDependentOn(nodeA)).toBe(true); - expect(nodeD.isDependentOn(nodeB)).toBe(true); - expect(nodeD.isDependentOn(nodeD)).toBe(true); - - expect(nodeD.isDependentOn(nodeH)).toBe(false); - expect(nodeH.isDependentOn(nodeD)).toBe(true); - - expect(nodeF.isDependentOn(nodeH)).toBe(false); - expect(nodeH.isDependentOn(nodeF)).toBe(false); - }); - }); - - describe('.getRootNode', () => { - it('should return the root node', () => { - const graph = createComplexGraph(); - - assert.equal(graph.nodeA.getRootNode(), graph.nodeA); - assert.equal(graph.nodeB.getRootNode(), graph.nodeA); - assert.equal(graph.nodeD.getRootNode(), graph.nodeA); - assert.equal(graph.nodeF.getRootNode(), graph.nodeA); - }); - }); - - describe('.cloneWithoutRelationships', () => { - it('should create a copy', () => { - const node = new BaseNode(1); - const neighbor = new BaseNode(2); - node.addDependency(neighbor); - const clone = node.cloneWithoutRelationships(); - - assert.equal(clone.id, 1); - assert.notEqual(node, clone); - assert.equal(clone.getDependencies().length, 0); - }); - - it('should copy isMainDocument', () => { - const node = new BaseNode(1); - node.setIsMainDocument(true); - const networkNode = new NetworkNode({}); - networkNode.setIsMainDocument(true); - - assert.ok(node.cloneWithoutRelationships().isMainDocument()); - assert.ok(networkNode.cloneWithoutRelationships().isMainDocument()); - }); - }); - - describe('.cloneWithRelationships', () => { - it('should create a copy of a basic graph', () => { - const node = new BaseNode(1); - const neighbor = new BaseNode(2); - node.addDependency(neighbor); - const clone = node.cloneWithRelationships(); - - assert.equal(clone.id, 1); - assert.notEqual(node, clone); - - const dependencies = clone.getDependencies(); - assert.equal(dependencies.length, 1); - - const neighborClone = dependencies[0]; - assert.equal(neighborClone.id, neighbor.id); - assert.notEqual(neighborClone, neighbor); - assert.equal(neighborClone.getDependents()[0], clone); - }); - - it('should create a copy of a complex graph', () => { - const graph = createComplexGraph(); - const clone = graph.nodeA.cloneWithRelationships(); - - const clonedIdMap = new Map(); - clone.traverse(node => clonedIdMap.set(node.id, node)); - assert.equal(clonedIdMap.size, 8); - - graph.nodeA.traverse(node => { - const clone = clonedIdMap.get(node.id); - assert.equal(clone.id, node.id); - assert.notEqual(clone, node); - - const actualDependents = sortedById(clone.getDependents()); - const expectedDependents = sortedById(node.getDependents()); - actualDependents.forEach((cloneDependent, index) => { - const originalDependent = expectedDependents[index]; - assert.equal(cloneDependent.id, originalDependent.id); - assert.notEqual(cloneDependent, originalDependent); - }); - }); - }); - - it('should create a copy of a graph with long dependency chains', () => { - // C - D - E - F - // / \ - // A - - - - - - - B - const nodeA = new BaseNode('A'); - const nodeB = new BaseNode('B'); - const nodeC = new BaseNode('C'); - const nodeD = new BaseNode('D'); - const nodeE = new BaseNode('E'); - const nodeF = new BaseNode('F'); - - nodeA.addDependent(nodeB); - nodeF.addDependent(nodeB); - - nodeA.addDependent(nodeC); - nodeC.addDependent(nodeD); - nodeD.addDependent(nodeE); - nodeE.addDependent(nodeF); - - const clone = nodeA.cloneWithRelationships(); - - const clonedIdMap = new Map(); - clone.traverse(node => clonedIdMap.set(node.id, node)); - assert.equal(clonedIdMap.size, 6); - }); - - it('should create a copy when not starting at root node', () => { - const graph = createComplexGraph(); - const cloneD = graph.nodeD.cloneWithRelationships(); - assert.equal(cloneD.id, 'D'); - assert.equal(cloneD.getRootNode().id, 'A'); - }); - - it('should create a partial copy of a complex graph', () => { - const graph = createComplexGraph(); - // create a clone with F and all its dependencies - const clone = graph.nodeA.cloneWithRelationships(node => node.id === 'F'); - - const clonedIdMap = new Map(); - clone.traverse(node => clonedIdMap.set(node.id, node)); - - assert.equal(clonedIdMap.size, 6); - assert.ok(clonedIdMap.has('F'), 'did not include target node'); - assert.ok(clonedIdMap.has('E'), 'did not include dependency'); - assert.ok(clonedIdMap.has('B'), 'did not include branched dependency'); - assert.ok(clonedIdMap.has('C'), 'did not include branched dependency'); - assert.equal(clonedIdMap.get('G'), undefined); - assert.equal(clonedIdMap.get('H'), undefined); - }); - - it('should throw if original node is not in cloned graph', () => { - const graph = createComplexGraph(); - assert.throws( - // clone from root to nodeB, but called on nodeD - _ => graph.nodeD.cloneWithRelationships(node => node.id === 'B'), - /^Error: Cloned graph missing node$/ - ); - }); - }); - - describe('.traverse', () => { - it('should visit every dependent node', () => { - const graph = createComplexGraph(); - const ids = []; - graph.nodeA.traverse(node => ids.push(node.id)); - - assert.deepEqual(ids, ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']); - }); - - it('should include a shortest traversal path to every dependent node', () => { - const graph = createComplexGraph(); - const paths = []; - graph.nodeA.traverse((node, traversalPath) => { - assert.strictEqual(node.id, traversalPath[0].id); - paths.push(traversalPath.map(node => node.id)); - }); - - assert.deepStrictEqual(paths, [ - ['A'], - ['B', 'A'], - ['C', 'A'], - ['D', 'B', 'A'], - ['E', 'D', 'B', 'A'], - ['F', 'E', 'D', 'B', 'A'], - ['G', 'E', 'D', 'B', 'A'], - ['H', 'G', 'E', 'D', 'B', 'A'], - ]); - }); - - it('should respect getNext', () => { - const graph = createComplexGraph(); - const ids = []; - graph.nodeF.traverse( - node => ids.push(node.id), - node => node.getDependencies() - ); - - assert.deepEqual(ids, ['F', 'E', 'D', 'B', 'C', 'A']); - }); - }); - - describe('#hasCycle', () => { - it('should return false for DAGs', () => { - const graph = createComplexGraph(); - assert.equal(BaseNode.hasCycle(graph.nodeA), false); - }); - - it('should return false for triangular DAGs', () => { - // B - // / \ - // A - C - const nodeA = new BaseNode('A'); - const nodeB = new BaseNode('B'); - const nodeC = new BaseNode('C'); - - nodeA.addDependent(nodeC); - nodeA.addDependent(nodeB); - nodeB.addDependent(nodeC); - - assert.equal(BaseNode.hasCycle(nodeA), false); - }); - - it('should return true for basic cycles', () => { - // A - B - C - A! - const nodeA = new BaseNode('A'); - const nodeB = new BaseNode('B'); - const nodeC = new BaseNode('C'); - - nodeA.addDependent(nodeB); - nodeB.addDependent(nodeC); - nodeC.addDependent(nodeA); - - assert.equal(BaseNode.hasCycle(nodeA), true); - }); - - it('should return true for children', () => { - // A! - // / - // A - B - C - const nodeA = new BaseNode('A'); - const nodeB = new BaseNode('B'); - const nodeC = new BaseNode('C'); - - nodeA.addDependent(nodeB); - nodeB.addDependent(nodeC); - nodeB.addDependent(nodeA); - - assert.equal(BaseNode.hasCycle(nodeC), true); - }); - - it('should return true for complex cycles', () => { - // B - D - F - G - C! - // / / - // A - - C - E - H - const nodeA = new BaseNode('A'); - const nodeB = new BaseNode('B'); - const nodeC = new BaseNode('C'); - const nodeD = new BaseNode('D'); - const nodeE = new BaseNode('E'); - const nodeF = new BaseNode('F'); - const nodeG = new BaseNode('G'); - const nodeH = new BaseNode('H'); - - nodeA.addDependent(nodeB); - nodeA.addDependent(nodeC); - nodeB.addDependent(nodeD); - nodeC.addDependent(nodeE); - nodeC.addDependent(nodeF); - nodeD.addDependent(nodeF); - nodeE.addDependent(nodeH); - nodeF.addDependent(nodeG); - nodeG.addDependent(nodeC); - - assert.equal(BaseNode.hasCycle(nodeA), true); - assert.equal(BaseNode.hasCycle(nodeB), true); - assert.equal(BaseNode.hasCycle(nodeC), true); - assert.equal(BaseNode.hasCycle(nodeD), true); - assert.equal(BaseNode.hasCycle(nodeE), true); - assert.equal(BaseNode.hasCycle(nodeF), true); - assert.equal(BaseNode.hasCycle(nodeG), true); - assert.equal(BaseNode.hasCycle(nodeH), true); - }); - - it('works for very large graphs', () => { - const root = new BaseNode('root'); - - let lastNode = root; - for (let i = 0; i < 10000; i++) { - const nextNode = new BaseNode(`child${i}`); - lastNode.addDependent(nextNode); - lastNode = nextNode; - } - - lastNode.addDependent(root); - assert.equal(BaseNode.hasCycle(root), true); - }); - }); -}); diff --git a/core/lib/lantern/CpuNode.js b/core/lib/lantern/CpuNode.js deleted file mode 100644 index 749626abf13f..000000000000 --- a/core/lib/lantern/CpuNode.js +++ /dev/null @@ -1,99 +0,0 @@ -/** - * @license - * Copyright 2017 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from './lantern.js'; - -/** - * @template [T=any] - * @extends {Lantern.BaseNode} - */ -class CPUNode extends Lantern.BaseNode { - /** - * @param {Lantern.TraceEvent} parentEvent - * @param {Lantern.TraceEvent[]=} childEvents - * @param {number=} correctedEndTs - */ - constructor(parentEvent, childEvents = [], correctedEndTs) { - const nodeId = `${parentEvent.tid}.${parentEvent.ts}`; - super(nodeId); - - this._event = parentEvent; - this._childEvents = childEvents; - this._correctedEndTs = correctedEndTs; - } - - get type() { - return Lantern.BaseNode.TYPES.CPU; - } - - /** - * @return {number} - */ - get startTime() { - return this._event.ts; - } - - /** - * @return {number} - */ - get endTime() { - if (this._correctedEndTs) return this._correctedEndTs; - return this._event.ts + this._event.dur; - } - - /** - * @return {number} - */ - get duration() { - return this.endTime - this.startTime; - } - - /** - * @return {Lantern.TraceEvent} - */ - get event() { - return this._event; - } - - /** - * @return {Lantern.TraceEvent[]} - */ - get childEvents() { - return this._childEvents; - } - - /** - * Returns true if this node contains a Layout task. - * @return {boolean} - */ - didPerformLayout() { - return this._childEvents.some(evt => evt.name === 'Layout'); - } - - /** - * Returns the script URLs that had their EvaluateScript events occur in this task. - */ - getEvaluateScriptURLs() { - /** @type {Set} */ - const urls = new Set(); - for (const event of this._childEvents) { - if (event.name !== 'EvaluateScript') continue; - if (!event.args.data || !event.args.data.url) continue; - urls.add(event.args.data.url); - } - - return urls; - } - - /** - * @return {CPUNode} - */ - cloneWithoutRelationships() { - return new CPUNode(this._event, this._childEvents, this._correctedEndTs); - } -} - -export {CPUNode}; diff --git a/core/lib/lantern/LanternError.js b/core/lib/lantern/LanternError.js deleted file mode 100644 index 54fa2116ddc5..000000000000 --- a/core/lib/lantern/LanternError.js +++ /dev/null @@ -1,9 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -class LanternError extends Error {} - -export {LanternError}; diff --git a/core/lib/lantern/Metric.js b/core/lib/lantern/Metric.js deleted file mode 100644 index e0497b047362..000000000000 --- a/core/lib/lantern/Metric.js +++ /dev/null @@ -1,137 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from './lantern.js'; - -/** @typedef {import('./BaseNode.js').Node} Node */ -/** @typedef {import('./NetworkNode.js').NetworkNode} NetworkNode */ -/** @typedef {import('./simulation/Simulator.js').Simulator} Simulator */ - -/** - * @typedef Extras - * @property {boolean} optimistic - * @property {Lantern.Metrics.Result=} fcpResult - * @property {Lantern.Metrics.Result=} lcpResult - * @property {Lantern.Metrics.Result=} interactiveResult - * @property {number=} observedSpeedIndex - */ - -class Metric { - /** - * @param {Node} dependencyGraph - * @param {function(NetworkNode):boolean=} treatNodeAsRenderBlocking - * @return {Set} - */ - static getScriptUrls(dependencyGraph, treatNodeAsRenderBlocking) { - /** @type {Set} */ - const scriptUrls = new Set(); - - dependencyGraph.traverse(node => { - if (node.type !== Lantern.BaseNode.TYPES.NETWORK) return; - if (node.request.resourceType !== Lantern.NetworkRequestTypes.Script) return; - if (treatNodeAsRenderBlocking?.(node)) { - scriptUrls.add(node.request.url); - } - }); - - return scriptUrls; - } - - /** - * @return {Lantern.Simulation.MetricCoefficients} - */ - static get COEFFICIENTS() { - throw new Error('COEFFICIENTS unimplemented!'); - } - - /** - * Returns the coefficients, scaled by the throttling settings if needed by the metric. - * Some lantern metrics (speed-index) use components in their estimate that are not - * from the simulator. In this case, we need to adjust the coefficients as the target throttling - * settings change. - * - * @param {number} rttMs - * @return {Lantern.Simulation.MetricCoefficients} - */ - static getScaledCoefficients(rttMs) { // eslint-disable-line no-unused-vars - return this.COEFFICIENTS; - } - - /** - * @param {Node} dependencyGraph - * @param {Lantern.Simulation.ProcessedNavigation} processedNavigation - * @return {Node} - */ - static getOptimisticGraph(dependencyGraph, processedNavigation) { // eslint-disable-line no-unused-vars - throw new Error('Optimistic graph unimplemented!'); - } - - /** - * @param {Node} dependencyGraph - * @param {Lantern.Simulation.ProcessedNavigation} processedNavigation - * @return {Node} - */ - static getPessimisticGraph(dependencyGraph, processedNavigation) { // eslint-disable-line no-unused-vars - throw new Error('Pessmistic graph unimplemented!'); - } - - /** - * @param {Lantern.Simulation.Result} simulationResult - * @param {Extras} extras - * @return {Lantern.Simulation.Result} - */ - static getEstimateFromSimulation(simulationResult, extras) { // eslint-disable-line no-unused-vars - return simulationResult; - } - - /** - * @param {Lantern.Simulation.MetricComputationDataInput} data - * @param {Omit=} extras - * @return {Promise} - */ - static async compute(data, extras) { - const {simulator, graph, processedNavigation} = data; - - const metricName = this.name.replace('Lantern', ''); - const optimisticGraph = this.getOptimisticGraph(graph, processedNavigation); - const pessimisticGraph = this.getPessimisticGraph(graph, processedNavigation); - - let simulateOptions = {label: `optimistic${metricName}`}; - const optimisticSimulation = simulator.simulate(optimisticGraph, simulateOptions); - - simulateOptions = {label: `pessimistic${metricName}`}; - const pessimisticSimulation = simulator.simulate(pessimisticGraph, simulateOptions); - - const optimisticEstimate = this.getEstimateFromSimulation( - optimisticSimulation, - {...extras, optimistic: true} - ); - - const pessimisticEstimate = this.getEstimateFromSimulation( - pessimisticSimulation, - {...extras, optimistic: false} - ); - - const coefficients = this.getScaledCoefficients(simulator.rtt); - // Estimates under 1s don't really follow the normal curve fit, minimize the impact of the intercept - const interceptMultiplier = coefficients.intercept > 0 ? - Math.min(1, optimisticEstimate.timeInMs / 1000) : 1; - const timing = - coefficients.intercept * interceptMultiplier + - coefficients.optimistic * optimisticEstimate.timeInMs + - coefficients.pessimistic * pessimisticEstimate.timeInMs; - - return { - timing, - optimisticEstimate, - pessimisticEstimate, - optimisticGraph, - pessimisticGraph, - }; - } -} - -export {Metric}; diff --git a/core/lib/lantern/NetworkNode.js b/core/lib/lantern/NetworkNode.js deleted file mode 100644 index 6632726fa5be..000000000000 --- a/core/lib/lantern/NetworkNode.js +++ /dev/null @@ -1,136 +0,0 @@ -/** - * @license - * Copyright 2017 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from './lantern.js'; -import {NetworkRequestTypes} from './lantern.js'; - -const NON_NETWORK_SCHEMES = [ - 'blob', // @see https://developer.mozilla.org/en-US/docs/Web/API/URL/createObjectURL - 'data', // @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs - 'intent', // @see https://developer.chrome.com/docs/multidevice/android/intents/ - 'file', // @see https://en.wikipedia.org/wiki/File_URI_scheme - 'filesystem', // @see https://developer.mozilla.org/en-US/docs/Web/API/FileSystem - 'chrome-extension', -]; - -/** - * Note: the `protocol` field from CDP can be 'h2', 'http', (not 'https'!) or it'll be url's scheme. - * https://source.chromium.org/chromium/chromium/src/+/main:content/browser/devtools/protocol/network_handler.cc;l=598-611;drc=56d4a9a9deb30be73adcee8737c73bcb2a5ab64f - * However, a `new URL(href).protocol` has a colon suffix. - * https://url.spec.whatwg.org/#dom-url-protocol - * A URL's `scheme` is specced as the `protocol` sans-colon, but isn't exposed on a URL object. - * This method can take all 3 of these string types as a parameter. - * @param {string} protocol Either a networkRequest's `protocol` per CDP or a `new URL(href).protocol` - * @return {boolean} - */ -function isNonNetworkProtocol(protocol) { - // Strip off any colon - const urlScheme = protocol.includes(':') ? protocol.slice(0, protocol.indexOf(':')) : protocol; - return NON_NETWORK_SCHEMES.includes(urlScheme); -} - -/** - * @template [T=any] - * @extends {Lantern.BaseNode} - */ -class NetworkNode extends Lantern.BaseNode { - /** - * @param {Lantern.NetworkRequest} networkRequest - */ - constructor(networkRequest) { - super(networkRequest.requestId); - /** @private */ - this._request = networkRequest; - } - - get type() { - return Lantern.BaseNode.TYPES.NETWORK; - } - - /** - * @return {number} - */ - get startTime() { - return this._request.rendererStartTime * 1000; - } - - /** - * @return {number} - */ - get endTime() { - return this._request.networkEndTime * 1000; - } - - /** - * @return {Readonly} - */ - get rawRequest() { - return /** @type {Required} */ (this._request.rawRequest); - } - - /** - * @return {Lantern.NetworkRequest} - */ - get request() { - return this._request; - } - - /** - * @return {string} - */ - get initiatorType() { - return this._request.initiator && this._request.initiator.type; - } - - /** - * @return {boolean} - */ - get fromDiskCache() { - return !!this._request.fromDiskCache; - } - - /** - * @return {boolean} - */ - get isNonNetworkProtocol() { - // The 'protocol' field in devtools a string more like a `scheme` - return isNonNetworkProtocol(this.request.protocol) || - // But `protocol` can fail to be populated if the request fails, so fallback to scheme. - isNonNetworkProtocol(this.request.parsedURL.scheme); - } - - /** - * Returns whether this network request can be downloaded without a TCP connection. - * During simulation we treat data coming in over a network connection separately from on-device data. - * @return {boolean} - */ - get isConnectionless() { - return this.fromDiskCache || this.isNonNetworkProtocol; - } - - /** - * @return {boolean} - */ - hasRenderBlockingPriority() { - const priority = this._request.priority; - const isScript = this._request.resourceType === NetworkRequestTypes.Script; - const isDocument = this._request.resourceType === NetworkRequestTypes.Document; - const isBlockingScript = priority === 'High' && isScript; - const isBlockingHtmlImport = priority === 'High' && isDocument; - return priority === 'VeryHigh' || isBlockingScript || isBlockingHtmlImport; - } - - /** - * @return {NetworkNode} - */ - cloneWithoutRelationships() { - const node = new NetworkNode(this._request); - node.setIsMainDocument(this._isMainDocument); - return node; - } -} - -export {NetworkNode}; diff --git a/core/lib/lantern/PageDependencyGraph.js b/core/lib/lantern/PageDependencyGraph.js deleted file mode 100644 index b3a7ea609361..000000000000 --- a/core/lib/lantern/PageDependencyGraph.js +++ /dev/null @@ -1,610 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from './lantern.js'; -import {NetworkNode} from './NetworkNode.js'; -import {CPUNode} from './CpuNode.js'; - -// COMPAT: m71+ We added RunTask to `disabled-by-default-lighthouse` -const SCHEDULABLE_TASK_TITLE_LH = 'RunTask'; -// m69-70 DoWork is different and we now need RunTask, see https://bugs.chromium.org/p/chromium/issues/detail?id=871204#c11 -const SCHEDULABLE_TASK_TITLE_ALT1 = 'ThreadControllerImpl::RunTask'; -// In m66-68 refactored to this task title, https://crrev.com/c/883346 -const SCHEDULABLE_TASK_TITLE_ALT2 = 'ThreadControllerImpl::DoWork'; -// m65 and earlier -const SCHEDULABLE_TASK_TITLE_ALT3 = 'TaskQueueManager::ProcessTaskFromWorkQueue'; - -/** @typedef {import('./BaseNode.js').Node} Node */ - -/** - * @typedef {Object} NetworkNodeOutput - * @property {Array} nodes - * @property {Map} idToNodeMap - * @property {Map>} urlToNodeMap - * @property {Map} frameIdToNodeMap - */ - -// Shorter tasks have negligible impact on simulation results. -const SIGNIFICANT_DUR_THRESHOLD_MS = 10; - -// TODO: video files tend to be enormous and throw off all graph traversals, move this ignore -// into estimation logic when we use the dependency graph for other purposes. -const IGNORED_MIME_TYPES_REGEX = /^video/; - -class PageDependencyGraph { - /** - * @param {Lantern.NetworkRequest} request - * @return {Array} - */ - static getNetworkInitiators(request) { - if (!request.initiator) return []; - if (request.initiator.url) return [request.initiator.url]; - if (request.initiator.type === 'script') { - // Script initiators have the stack of callFrames from all functions that led to this request. - // If async stacks are enabled, then the stack will also have the parent functions that asynchronously - // led to this request chained in the `parent` property. - /** @type {Set} */ - const scriptURLs = new Set(); - let stack = request.initiator.stack; - while (stack) { - const callFrames = stack.callFrames || []; - for (const frame of callFrames) { - if (frame.url) scriptURLs.add(frame.url); - } - - stack = stack.parent; - } - - return Array.from(scriptURLs); - } - - return []; - } - - /** - * @param {Array} networkRequests - * @return {NetworkNodeOutput} - */ - static getNetworkNodeOutput(networkRequests) { - /** @type {Array} */ - const nodes = []; - /** @type {Map} */ - const idToNodeMap = new Map(); - /** @type {Map>} */ - const urlToNodeMap = new Map(); - /** @type {Map} */ - const frameIdToNodeMap = new Map(); - - networkRequests.forEach(request => { - if (IGNORED_MIME_TYPES_REGEX.test(request.mimeType)) return; - if (request.fromWorker) return; - - // Network requestIds can be duplicated for an unknown reason - // Suffix all subsequent requests with `:duplicate` until it's unique - // NOTE: This should never happen with modern NetworkRequest library, but old fixtures - // might still have this issue. - while (idToNodeMap.has(request.requestId)) { - request.requestId += ':duplicate'; - } - - const node = new NetworkNode(request); - nodes.push(node); - - const urlList = urlToNodeMap.get(request.url) || []; - urlList.push(node); - - idToNodeMap.set(request.requestId, node); - urlToNodeMap.set(request.url, urlList); - - // If the request was for the root document of an iframe, save an entry in our - // map so we can link up the task `args.data.frame` dependencies later in graph creation. - if (request.frameId && - request.resourceType === Lantern.NetworkRequestTypes.Document && - request.documentURL === request.url) { - // If there's ever any ambiguity, permanently set the value to `false` to avoid loops in the graph. - const value = frameIdToNodeMap.has(request.frameId) ? null : node; - frameIdToNodeMap.set(request.frameId, value); - } - }); - - return {nodes, idToNodeMap, urlToNodeMap, frameIdToNodeMap}; - } - - /** - * @param {Lantern.TraceEvent} evt - * @return {boolean} - */ - static isScheduleableTask(evt) { - return evt.name === SCHEDULABLE_TASK_TITLE_LH || - evt.name === SCHEDULABLE_TASK_TITLE_ALT1 || - evt.name === SCHEDULABLE_TASK_TITLE_ALT2 || - evt.name === SCHEDULABLE_TASK_TITLE_ALT3; - } - - /** - * There should *always* be at least one top level event, having 0 typically means something is - * drastically wrong with the trace and we should just give up early and loudly. - * - * @param {Lantern.TraceEvent[]} events - */ - static assertHasToplevelEvents(events) { - const hasToplevelTask = events.some(this.isScheduleableTask); - if (!hasToplevelTask) { - throw new Error('Could not find any top level events'); - } - } - - /** - * @param {Lantern.TraceEvent[]} mainThreadEvents - * @return {Array} - */ - static getCPUNodes(mainThreadEvents) { - /** @type {Array} */ - const nodes = []; - let i = 0; - - PageDependencyGraph.assertHasToplevelEvents(mainThreadEvents); - - while (i < mainThreadEvents.length) { - const evt = mainThreadEvents[i]; - i++; - - // Skip all trace events that aren't schedulable tasks with sizable duration - if (!PageDependencyGraph.isScheduleableTask(evt) || !evt.dur) { - continue; - } - - /** @type {number|undefined} */ - let correctedEndTs = undefined; - - // Capture all events that occurred within the task - /** @type {Array} */ - const children = []; - for ( - const endTime = evt.ts + evt.dur; - i < mainThreadEvents.length && mainThreadEvents[i].ts < endTime; - i++ - ) { - const event = mainThreadEvents[i]; - - // Temporary fix for a Chrome bug where some RunTask events can be overlapping. - // We correct that here be ensuring each RunTask ends at least 1 microsecond before the next - // https://github.com/GoogleChrome/lighthouse/issues/15896 - // https://issues.chromium.org/issues/329678173 - if (PageDependencyGraph.isScheduleableTask(event) && event.dur) { - correctedEndTs = event.ts - 1; - break; - } - - children.push(event); - } - - nodes.push(new CPUNode(evt, children, correctedEndTs)); - } - - return nodes; - } - - /** - * @param {NetworkNode} rootNode - * @param {NetworkNodeOutput} networkNodeOutput - */ - static linkNetworkNodes(rootNode, networkNodeOutput) { - networkNodeOutput.nodes.forEach(node => { - const directInitiatorRequest = node.request.initiatorRequest || rootNode.request; - const directInitiatorNode = - networkNodeOutput.idToNodeMap.get(directInitiatorRequest.requestId) || rootNode; - const canDependOnInitiator = - !directInitiatorNode.isDependentOn(node) && - node.canDependOn(directInitiatorNode); - const initiators = PageDependencyGraph.getNetworkInitiators(node.request); - if (initiators.length) { - initiators.forEach(initiator => { - const parentCandidates = networkNodeOutput.urlToNodeMap.get(initiator) || []; - // Only add the edge if the parent is unambiguous with valid timing and isn't circular. - if (parentCandidates.length === 1 && - parentCandidates[0].startTime <= node.startTime && - !parentCandidates[0].isDependentOn(node)) { - node.addDependency(parentCandidates[0]); - } else if (canDependOnInitiator) { - directInitiatorNode.addDependent(node); - } - }); - } else if (canDependOnInitiator) { - directInitiatorNode.addDependent(node); - } - - // Make sure the nodes are attached to the graph if the initiator information was invalid. - if (node !== rootNode && node.getDependencies().length === 0 && node.canDependOn(rootNode)) { - node.addDependency(rootNode); - } - - if (!node.request.redirects) return; - - const redirects = [...node.request.redirects, node.request]; - for (let i = 1; i < redirects.length; i++) { - const redirectNode = networkNodeOutput.idToNodeMap.get(redirects[i - 1].requestId); - const actualNode = networkNodeOutput.idToNodeMap.get(redirects[i].requestId); - if (actualNode && redirectNode) { - actualNode.addDependency(redirectNode); - } - } - }); - } - - /** - * @param {Node} rootNode - * @param {NetworkNodeOutput} networkNodeOutput - * @param {Array} cpuNodes - */ - static linkCPUNodes(rootNode, networkNodeOutput, cpuNodes) { - /** @type {Set} */ - const linkableResourceTypes = new Set([ - Lantern.NetworkRequestTypes.XHR, - Lantern.NetworkRequestTypes.Fetch, - Lantern.NetworkRequestTypes.Script, - ]); - - /** @param {CPUNode} cpuNode @param {string} reqId */ - function addDependentNetworkRequest(cpuNode, reqId) { - const networkNode = networkNodeOutput.idToNodeMap.get(reqId); - if (!networkNode || - // Ignore all network nodes that started before this CPU task started - // A network request that started earlier could not possibly have been started by this task - networkNode.startTime <= cpuNode.startTime) return; - const {request} = networkNode; - const resourceType = request.resourceType || - request.redirectDestination?.resourceType; - if (!linkableResourceTypes.has(resourceType)) { - // We only link some resources to CPU nodes because we observe LCP simulation - // regressions when including images, etc. - return; - } - cpuNode.addDependent(networkNode); - } - - /** - * If the node has an associated frameId, then create a dependency on the root document request - * for the frame. The task obviously couldn't have started before the frame was even downloaded. - * - * @param {CPUNode} cpuNode - * @param {string|undefined} frameId - */ - function addDependencyOnFrame(cpuNode, frameId) { - if (!frameId) return; - const networkNode = networkNodeOutput.frameIdToNodeMap.get(frameId); - if (!networkNode) return; - // Ignore all network nodes that started after this CPU task started - // A network request that started after could not possibly be required this task - if (networkNode.startTime >= cpuNode.startTime) return; - cpuNode.addDependency(networkNode); - } - - /** @param {CPUNode} cpuNode @param {string} url */ - function addDependencyOnUrl(cpuNode, url) { - if (!url) return; - // Allow network requests that end up to 100ms before the task started - // Some script evaluations can start before the script finishes downloading - const minimumAllowableTimeSinceNetworkNodeEnd = -100 * 1000; - const candidates = networkNodeOutput.urlToNodeMap.get(url) || []; - - let minCandidate = null; - let minDistance = Infinity; - // Find the closest request that finished before this CPU task started - for (const candidate of candidates) { - // Explicitly ignore all requests that started after this CPU node - // A network request that started after this task started cannot possibly be a dependency - if (cpuNode.startTime <= candidate.startTime) return; - - const distance = cpuNode.startTime - candidate.endTime; - if (distance >= minimumAllowableTimeSinceNetworkNodeEnd && distance < minDistance) { - minCandidate = candidate; - minDistance = distance; - } - } - - if (!minCandidate) return; - cpuNode.addDependency(minCandidate); - } - - /** @type {Map} */ - const timers = new Map(); - for (const node of cpuNodes) { - for (const evt of node.childEvents) { - if (!evt.args.data) continue; - - const argsUrl = evt.args.data.url; - const stackTraceUrls = (evt.args.data.stackTrace || []).map(l => l.url).filter(Boolean); - - switch (evt.name) { - case 'TimerInstall': - // @ts-expect-error - 'TimerInstall' event means timerId exists. - timers.set(evt.args.data.timerId, node); - stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); - break; - case 'TimerFire': { - // @ts-expect-error - 'TimerFire' event means timerId exists. - const installer = timers.get(evt.args.data.timerId); - if (!installer || installer.endTime > node.startTime) break; - installer.addDependent(node); - break; - } - - case 'InvalidateLayout': - case 'ScheduleStyleRecalculation': - addDependencyOnFrame(node, evt.args.data.frame); - stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); - break; - - case 'EvaluateScript': - addDependencyOnFrame(node, evt.args.data.frame); - // @ts-expect-error - 'EvaluateScript' event means argsUrl is defined. - addDependencyOnUrl(node, argsUrl); - stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); - break; - - case 'XHRReadyStateChange': - // Only create the dependency if the request was completed - // 'XHRReadyStateChange' event means readyState is defined. - if (evt.args.data.readyState !== 4) break; - - // @ts-expect-error - 'XHRReadyStateChange' event means argsUrl is defined. - addDependencyOnUrl(node, argsUrl); - stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); - break; - - case 'FunctionCall': - case 'v8.compile': - addDependencyOnFrame(node, evt.args.data.frame); - // @ts-expect-error - events mean argsUrl is defined. - addDependencyOnUrl(node, argsUrl); - break; - - case 'ParseAuthorStyleSheet': - addDependencyOnFrame(node, evt.args.data.frame); - // @ts-expect-error - 'ParseAuthorStyleSheet' event means styleSheetUrl is defined. - addDependencyOnUrl(node, evt.args.data.styleSheetUrl); - break; - - case 'ResourceSendRequest': - addDependencyOnFrame(node, evt.args.data.frame); - // @ts-expect-error - 'ResourceSendRequest' event means requestId is defined. - addDependentNetworkRequest(node, evt.args.data.requestId); - stackTraceUrls.forEach(url => addDependencyOnUrl(node, url)); - break; - } - } - - // Nodes starting before the root node cannot depend on it. - if (node.getNumberOfDependencies() === 0 && node.canDependOn(rootNode)) { - node.addDependency(rootNode); - } - } - - // Second pass to prune the graph of short tasks. - const minimumEvtDur = SIGNIFICANT_DUR_THRESHOLD_MS * 1000; - let foundFirstLayout = false; - let foundFirstPaint = false; - let foundFirstParse = false; - - for (const node of cpuNodes) { - // Don't prune if event is the first ParseHTML/Layout/Paint. - // See https://github.com/GoogleChrome/lighthouse/issues/9627#issuecomment-526699524 for more. - let isFirst = false; - if (!foundFirstLayout && node.childEvents.some(evt => evt.name === 'Layout')) { - isFirst = foundFirstLayout = true; - } - if (!foundFirstPaint && node.childEvents.some(evt => evt.name === 'Paint')) { - isFirst = foundFirstPaint = true; - } - if (!foundFirstParse && node.childEvents.some(evt => evt.name === 'ParseHTML')) { - isFirst = foundFirstParse = true; - } - - if (isFirst || node.duration >= minimumEvtDur) { - // Don't prune this node. The task is long / important so it will impact simulation. - continue; - } - - // Prune the node if it isn't highly connected to minimize graph size. Rewiring the graph - // here replaces O(M + N) edges with (M * N) edges, which is fine if either M or N is at - // most 1. - if (node.getNumberOfDependencies() === 1 || node.getNumberOfDependents() <= 1) { - PageDependencyGraph._pruneNode(node); - } - } - } - - /** - * Removes the given node from the graph, but retains all paths between its dependencies and - * dependents. - * @param {Node} node - */ - static _pruneNode(node) { - const dependencies = node.getDependencies(); - const dependents = node.getDependents(); - for (const dependency of dependencies) { - node.removeDependency(dependency); - for (const dependent of dependents) { - dependency.addDependent(dependent); - } - } - for (const dependent of dependents) { - node.removeDependent(dependent); - } - } - - /** - * TODO(15841): remove when CDT backend is gone. until then, this is a useful debugging tool - * to find delta between using CDP or the trace to create the network requests. - * - * When a test fails using the trace backend, I enabled this debug method and copied the network - * requests when CDP was used, then when trace is used, and diff'd them. This method helped - * remove non-logical differences from the comparison (order of properties, slight rounding - * discrepancies, removing object cycles, etc). - * - * When using for a unit test, make sure to do `.only` so you are getting what you expect. - * @param {Lantern.NetworkRequest[]} lanternRequests - * @return {never} - */ - static _debugNormalizeRequests(lanternRequests) { - for (const request of lanternRequests) { - request.rendererStartTime = Math.round(request.rendererStartTime * 1000) / 1000; - request.networkRequestTime = Math.round(request.networkRequestTime * 1000) / 1000; - request.responseHeadersEndTime = Math.round(request.responseHeadersEndTime * 1000) / 1000; - request.networkEndTime = Math.round(request.networkEndTime * 1000) / 1000; - } - - for (const r of lanternRequests) { - delete r.rawRequest; - if (r.initiatorRequest) { - // @ts-expect-error - r.initiatorRequest = {id: r.initiatorRequest.requestId}; - } - if (r.redirectDestination) { - // @ts-expect-error - r.redirectDestination = {id: r.redirectDestination.requestId}; - } - if (r.redirectSource) { - // @ts-expect-error - r.redirectSource = {id: r.redirectSource.requestId}; - } - if (r.redirects) { - // @ts-expect-error - r.redirects = r.redirects.map(r2 => r2.requestId); - } - } - /** @type {Lantern.NetworkRequest[]} */ - const requests = lanternRequests.map(r => ({ - requestId: r.requestId, - connectionId: r.connectionId, - connectionReused: r.connectionReused, - url: r.url, - protocol: r.protocol, - parsedURL: r.parsedURL, - documentURL: r.documentURL, - rendererStartTime: r.rendererStartTime, - networkRequestTime: r.networkRequestTime, - responseHeadersEndTime: r.responseHeadersEndTime, - networkEndTime: r.networkEndTime, - transferSize: r.transferSize, - resourceSize: r.resourceSize, - fromDiskCache: r.fromDiskCache, - fromMemoryCache: r.fromMemoryCache, - finished: r.finished, - statusCode: r.statusCode, - redirectSource: r.redirectSource, - redirectDestination: r.redirectDestination, - redirects: r.redirects, - failed: r.failed, - initiator: r.initiator, - timing: r.timing ? { - requestTime: r.timing.requestTime, - proxyStart: r.timing.proxyStart, - proxyEnd: r.timing.proxyEnd, - dnsStart: r.timing.dnsStart, - dnsEnd: r.timing.dnsEnd, - connectStart: r.timing.connectStart, - connectEnd: r.timing.connectEnd, - sslStart: r.timing.sslStart, - sslEnd: r.timing.sslEnd, - workerStart: r.timing.workerStart, - workerReady: r.timing.workerReady, - workerFetchStart: r.timing.workerFetchStart, - workerRespondWithSettled: r.timing.workerRespondWithSettled, - sendStart: r.timing.sendStart, - sendEnd: r.timing.sendEnd, - pushStart: r.timing.pushStart, - pushEnd: r.timing.pushEnd, - receiveHeadersStart: r.timing.receiveHeadersStart, - receiveHeadersEnd: r.timing.receiveHeadersEnd, - } : r.timing, - resourceType: r.resourceType, - mimeType: r.mimeType, - priority: r.priority, - initiatorRequest: r.initiatorRequest, - frameId: r.frameId, - fromWorker: r.fromWorker, - isLinkPreload: r.isLinkPreload, - serverResponseTime: r.serverResponseTime, - })).filter(r => !r.fromWorker); - // eslint-disable-next-line no-unused-vars - const debug = requests; - // Set breakpoint here. - // Copy `debug` and compare with https://www.diffchecker.com/text-compare/ - process.exit(1); - } - - /** - * @param {Lantern.TraceEvent[]} mainThreadEvents - * @param {Lantern.NetworkRequest[]} networkRequests - * @param {Lantern.Simulation.URL} URL - * @return {Node} - */ - static createGraph(mainThreadEvents, networkRequests, URL) { - // This is for debugging trace/devtoolslog network records. - // const debug = PageDependencyGraph._debugNormalizeRequests(networkRequests); - const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(networkRequests); - const cpuNodes = PageDependencyGraph.getCPUNodes(mainThreadEvents); - const {requestedUrl, mainDocumentUrl} = URL; - if (!requestedUrl) throw new Error('requestedUrl is required to get the root request'); - if (!mainDocumentUrl) throw new Error('mainDocumentUrl is required to get the main resource'); - - const rootRequest = - Lantern.Simulation.NetworkAnalyzer.findResourceForUrl(networkRequests, requestedUrl); - if (!rootRequest) throw new Error('rootRequest not found'); - const rootNode = networkNodeOutput.idToNodeMap.get(rootRequest.requestId); - if (!rootNode) throw new Error('rootNode not found'); - const mainDocumentRequest = - Lantern.Simulation.NetworkAnalyzer.findLastDocumentForUrl(networkRequests, mainDocumentUrl); - if (!mainDocumentRequest) throw new Error('mainDocumentRequest not found'); - const mainDocumentNode = networkNodeOutput.idToNodeMap.get(mainDocumentRequest.requestId); - if (!mainDocumentNode) throw new Error('mainDocumentNode not found'); - - PageDependencyGraph.linkNetworkNodes(rootNode, networkNodeOutput); - PageDependencyGraph.linkCPUNodes(rootNode, networkNodeOutput, cpuNodes); - mainDocumentNode.setIsMainDocument(true); - - if (NetworkNode.hasCycle(rootNode)) { - throw new Error('Invalid dependency graph created, cycle detected'); - } - - return rootNode; - } - - /** - * @param {Node} rootNode - */ - static printGraph(rootNode, widthInCharacters = 100) { - /** @param {string} str @param {number} target */ - function padRight(str, target, padChar = ' ') { - return str + padChar.repeat(Math.max(target - str.length, 0)); - } - - /** @type {Array} */ - const nodes = []; - rootNode.traverse(node => nodes.push(node)); - nodes.sort((a, b) => a.startTime - b.startTime); - - const min = nodes[0].startTime; - const max = nodes.reduce((max, node) => Math.max(max, node.endTime), 0); - - const totalTime = max - min; - const timePerCharacter = totalTime / widthInCharacters; - nodes.forEach(node => { - const offset = Math.round((node.startTime - min) / timePerCharacter); - const length = Math.ceil((node.endTime - node.startTime) / timePerCharacter); - const bar = padRight('', offset) + padRight('', length, '='); - - // @ts-expect-error -- disambiguate displayName from across possible Node types. - const displayName = node.request ? node.request.url : node.type; - // eslint-disable-next-line - console.log(padRight(bar, widthInCharacters), `| ${displayName.slice(0, 30)}`); - }); - } -} - -export {PageDependencyGraph}; diff --git a/core/lib/lantern/PageDependencyGraph.test.js b/core/lib/lantern/PageDependencyGraph.test.js deleted file mode 100644 index efcf22883318..000000000000 --- a/core/lib/lantern/PageDependencyGraph.test.js +++ /dev/null @@ -1,654 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import assert from 'assert/strict'; - -import * as Lantern from './lantern.js'; - -const {PageDependencyGraph, NetworkRequestTypes} = Lantern; - -function createRequest( - requestId, - url, - rendererStartTime = 0, - initiator = null, - resourceType = NetworkRequestTypes.Document, - fromWorker = false -) { - const networkEndTime = rendererStartTime + 50; - return { - requestId, - url, - rendererStartTime, - networkEndTime, - initiator, - resourceType, - fromWorker, - }; -} - -const TOPLEVEL_TASK_NAME = 'TaskQueueManager::ProcessTaskFromWorkQueue'; -describe('PageDependencyGraph', () => { - let traceEvents; - let URL; - - function addTaskEvents(startTs, duration, evts) { - const mainEvent = { - name: TOPLEVEL_TASK_NAME, - tid: 1, - ts: startTs * 1000, - dur: duration * 1000, - args: {}, - }; - - traceEvents.push(mainEvent); - - let i = 0; - for (const evt of evts) { - i++; - traceEvents.push({ - name: evt.name, - ts: (evt.ts * 1000) || (startTs * 1000 + i), - args: {data: evt.data}, - }); - } - } - - beforeEach(() => { - traceEvents = []; - URL = {requestedUrl: 'https://example.com/', mainDocumentUrl: 'https://example.com/'}; - }); - - describe('#getNetworkNodeOutput', () => { - const request1 = createRequest(1, 'https://example.com/'); - const request2 = createRequest(2, 'https://example.com/page'); - const request3 = createRequest(3, 'https://example.com/page'); - const networkRequests = [request1, request2, request3]; - - it('should create network nodes', () => { - const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(networkRequests); - for (let i = 0; i < networkRequests.length; i++) { - const node = networkNodeOutput.nodes[i]; - assert.ok(node, `did not create node at index ${i}`); - assert.equal(node.id, i + 1); - assert.equal(node.type, 'network'); - assert.equal(node.request, networkRequests[i]); - } - }); - - it('should ignore worker requests', () => { - const workerRequest = createRequest(4, 'https://example.com/worker.js', 0, null, 'Script', true); - const recordsWithWorker = [ - ...networkRequests, - workerRequest, - ]; - - const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(recordsWithWorker); - - expect(networkNodeOutput.nodes).toHaveLength(3); - expect(networkNodeOutput.nodes.map(node => node.request)).not.toContain(workerRequest); - }); - - it('should index nodes by ID', () => { - const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(networkRequests); - const indexedById = networkNodeOutput.idToNodeMap; - for (const request of networkRequests) { - assert.equal(indexedById.get(request.requestId).request, request); - } - }); - - it('should index nodes by URL', () => { - const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput(networkRequests); - const nodes = networkNodeOutput.nodes; - const indexedByUrl = networkNodeOutput.urlToNodeMap; - assert.deepEqual(indexedByUrl.get('https://example.com/'), [nodes[0]]); - assert.deepEqual(indexedByUrl.get('https://example.com/page'), [nodes[1], nodes[2]]); - }); - - it('should index nodes by frame', () => { - const networkNodeOutput = PageDependencyGraph.getNetworkNodeOutput([ - {...createRequest(1, 'https://example.com/'), documentURL: 'https://example.com/', frameId: 'A'}, - {...createRequest(2, 'https://example.com/page'), documentURL: 'https://example.com/', frameId: 'A'}, - {...createRequest(3, 'https://example.com/page2'), documentURL: 'https://example.com/page2', frameId: 'C', - resourceType: NetworkRequestTypes.XHR}, - {...createRequest(4, 'https://example.com/page3'), documentURL: 'https://example.com/page3', frameId: 'D'}, - {...createRequest(4, 'https://example.com/page4'), documentURL: 'https://example.com/page4', frameId: undefined}, - {...createRequest(4, 'https://example.com/page5'), documentURL: 'https://example.com/page5', frameId: 'collision'}, - {...createRequest(4, 'https://example.com/page6'), documentURL: 'https://example.com/page6', frameId: 'collision'}, - ]); - - const nodes = networkNodeOutput.nodes; - const indexedByFrame = networkNodeOutput.frameIdToNodeMap; - expect([...indexedByFrame.entries()]).toEqual([ - ['A', nodes[0]], - ['D', nodes[3]], - ['collision', null], - ]); - }); - }); - - describe('#getCPUNodes', () => { - it('should create CPU nodes', () => { - addTaskEvents(0, 100, [ - {name: 'MyCustomEvent'}, - {name: 'OtherEvent'}, - {name: 'OutsideTheWindow', ts: 200}, - {name: 'OrphanedEvent'}, // should be ignored since we stopped at OutsideTheWindow - ]); - - addTaskEvents(250, 50, [ - {name: 'LaterEvent'}, - ]); - - assert.equal(traceEvents.length, 7); - const nodes = PageDependencyGraph.getCPUNodes(traceEvents); - assert.equal(nodes.length, 2); - - const node1 = nodes[0]; - assert.equal(node1.id, '1.0'); - assert.equal(node1.type, 'cpu'); - assert.equal(node1.event, traceEvents[0]); - assert.equal(node1.childEvents.length, 2); - assert.equal(node1.childEvents[1].name, 'OtherEvent'); - - const node2 = nodes[1]; - assert.equal(node2.id, '1.250000'); - assert.equal(node2.type, 'cpu'); - assert.equal(node2.event, traceEvents[5]); - assert.equal(node2.childEvents.length, 1); - assert.equal(node2.childEvents[0].name, 'LaterEvent'); - }); - - it('should correct overlapping tasks', () => { - addTaskEvents(0, 500, [ - {name: 'MyCustomEvent'}, - {name: 'OtherEvent'}, - ]); - - addTaskEvents(400, 50, [ - {name: 'OverlappingEvent'}, - ]); - - assert.equal(traceEvents.length, 5); - const nodes = PageDependencyGraph.getCPUNodes(traceEvents); - assert.equal(nodes.length, 2); - - const node1 = nodes[0]; - assert.equal(node1.id, '1.0'); - assert.equal(node1.type, 'cpu'); - assert.equal(node1.event, traceEvents[0]); - assert.equal(node1.childEvents.length, 2); - assert.equal(node1.childEvents[0].name, 'MyCustomEvent'); - assert.equal(node1.childEvents[1].name, 'OtherEvent'); - - const node2 = nodes[1]; - assert.equal(node2.id, '1.400000'); - assert.equal(node2.type, 'cpu'); - assert.equal(node2.event, traceEvents[3]); - assert.equal(node2.childEvents.length, 1); - assert.equal(node2.childEvents[0].name, 'OverlappingEvent'); - }); - }); - - describe('#createGraph', () => { - it('should compute a simple network graph', () => { - const request1 = createRequest(1, 'https://example.com/', 0); - const request2 = createRequest(2, 'https://example.com/page', 5); - const request3 = createRequest(3, 'https://example.com/page2', 5); - const request4 = createRequest(4, 'https://example.com/page3', 10, {url: 'https://example.com/page'}); - const networkRequests = [request1, request2, request3, request4]; - - addTaskEvents(0, 0, []); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - assert.equal(nodes.length, 4); - assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); - assert.deepEqual(nodes[0].getDependencies(), []); - assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[3].getDependencies(), [nodes[1]]); - }); - - it('should compute a simple network and CPU graph', () => { - const request1 = createRequest(1, 'https://example.com/', 0); - const request2 = createRequest(2, 'https://example.com/page', 50); - const request3 = createRequest(3, 'https://example.com/page2', 50); - const request4 = createRequest(4, 'https://example.com/page3', 300, null, NetworkRequestTypes.XHR); - const networkRequests = [request1, request2, request3, request4]; - - addTaskEvents(200, 200, [ - {name: 'EvaluateScript', data: {url: 'https://example.com/page'}}, - {name: 'ResourceSendRequest', data: {requestId: 4}}, - ]); - - addTaskEvents(700, 50, [ - {name: 'InvalidateLayout', data: {stackTrace: [{url: 'https://example.com/page2'}]}}, - {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page3'}}, - ]); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - const getIds = nodes => nodes.map(node => node.id); - const getDependencyIds = node => getIds(node.getDependencies()); - - assert.equal(nodes.length, 6); - assert.deepEqual(getIds(nodes), [1, 2, 3, 4, '1.200000', '1.700000']); - assert.deepEqual(getDependencyIds(nodes[0]), []); - assert.deepEqual(getDependencyIds(nodes[1]), [1]); - assert.deepEqual(getDependencyIds(nodes[2]), [1]); - assert.deepEqual(getDependencyIds(nodes[3]), [1, '1.200000']); - assert.deepEqual(getDependencyIds(nodes[4]), [2]); - assert.deepEqual(getDependencyIds(nodes[5]), [3, 4]); - }); - - it('should compute a network graph with duplicate URLs', () => { - const request1 = createRequest(1, 'https://example.com/', 0); - const request2 = createRequest(2, 'https://example.com/page', 5); - const request3 = createRequest(3, 'https://example.com/page', 5); // duplicate URL - const request4 = createRequest(4, 'https://example.com/page3', 10, {url: 'https://example.com/page'}); - const networkRequests = [request1, request2, request3, request4]; - - addTaskEvents(0, 0, []); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - assert.equal(nodes.length, 4); - assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); - assert.deepEqual(nodes[0].getDependencies(), []); - assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[3].getDependencies(), [nodes[0]]); // should depend on rootNode instead - }); - - it('should be forgiving without cyclic dependencies', () => { - const request1 = createRequest(1, 'https://example.com/', 0); - const request2 = createRequest(2, 'https://example.com/page', 250, null, NetworkRequestTypes.XHR); - const request3 = createRequest(3, 'https://example.com/page2', 210); - const request4 = createRequest(4, 'https://example.com/page3', 590); - const request5 = createRequest(5, 'https://example.com/page4', 595, null, NetworkRequestTypes.XHR); - const networkRequests = [request1, request2, request3, request4, request5]; - - addTaskEvents(200, 200, [ - // CPU 1.2 should depend on Network 1 - {name: 'EvaluateScript', data: {url: 'https://example.com/'}}, - - // Network 2 should depend on CPU 1.2, but 1.2 should not depend on Network 1 - {name: 'ResourceSendRequest', data: {requestId: 2}}, - {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page'}}, - - // CPU 1.2 should not depend on Network 3 because it starts after CPU 1.2 - {name: 'EvaluateScript', data: {url: 'https://example.com/page2'}}, - ]); - - addTaskEvents(600, 150, [ - // CPU 1.6 should depend on Network 4 even though it ends at 410ms - {name: 'InvalidateLayout', data: {stackTrace: [{url: 'https://example.com/page3'}]}}, - // Network 5 should not depend on CPU 1.6 because it started before CPU 1.6 - {name: 'ResourceSendRequest', data: {requestId: 5}}, - ]); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - const getDependencyIds = node => node.getDependencies().map(node => node.id); - - assert.equal(nodes.length, 7); - assert.deepEqual(getDependencyIds(nodes[0]), []); - assert.deepEqual(getDependencyIds(nodes[1]), [1, '1.200000']); - assert.deepEqual(getDependencyIds(nodes[2]), [1]); - assert.deepEqual(getDependencyIds(nodes[3]), [1]); - assert.deepEqual(getDependencyIds(nodes[4]), [1]); - assert.deepEqual(getDependencyIds(nodes[5]), [1]); - assert.deepEqual(getDependencyIds(nodes[6]), [4]); - }); - - it('should not install timer dependency on itself', () => { - const request1 = createRequest(1, 'https://example.com/', 0); - const networkRequests = [request1]; - - addTaskEvents(200, 200, [ - // CPU 1.2 should depend on Network 1 - {name: 'EvaluateScript', data: {url: 'https://example.com/'}}, - // CPU 1.2 will install and fire it's own timer, but should not depend on itself - {name: 'TimerInstall', data: {timerId: 'timer1'}}, - {name: 'TimerFire', data: {timerId: 'timer1'}}, - ]); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - const getDependencyIds = node => node.getDependencies().map(node => node.id); - - assert.equal(nodes.length, 2); - assert.deepEqual(getDependencyIds(nodes[0]), []); - assert.deepEqual(getDependencyIds(nodes[1]), [1]); - }); - - it('should prune short tasks', () => { - const request0 = createRequest(0, 'https://example.com/page0', 0); - const request1 = createRequest(1, 'https://example.com/', 100, null, NetworkRequestTypes.Script); - const request2 = createRequest(2, 'https://example.com/page', 200, null, NetworkRequestTypes.XHR); - const request3 = createRequest(3, 'https://example.com/page2', 300, null, NetworkRequestTypes.Script); - const request4 = createRequest(4, 'https://example.com/page3', 400, null, NetworkRequestTypes.XHR); - const networkRequests = [request0, request1, request2, request3, request4]; - URL = {requestedUrl: 'https://example.com/page0', mainDocumentUrl: 'https://example.com/page0'}; - - // Long task, should be kept in the output. - addTaskEvents(120, 50, [ - {name: 'EvaluateScript', data: {url: 'https://example.com/'}}, - {name: 'ResourceSendRequest', data: {requestId: 2}}, - {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page'}}, - ]); - - // Short task, should be pruned, but the 3->4 relationship should be retained - addTaskEvents(350, 5, [ - {name: 'EvaluateScript', data: {url: 'https://example.com/page2'}}, - {name: 'ResourceSendRequest', data: {requestId: 4}}, - {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page3'}}, - ]); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - const getDependencyIds = node => node.getDependencies().map(node => node.id); - - assert.equal(nodes.length, 6); - - assert.deepEqual(getDependencyIds(nodes[0]), []); - assert.deepEqual(getDependencyIds(nodes[1]), [0]); - assert.deepEqual(getDependencyIds(nodes[2]), [0, '1.120000']); - assert.deepEqual(getDependencyIds(nodes[3]), [0]); - assert.deepEqual(getDependencyIds(nodes[4]), [0, 3]); - - assert.equal('1.120000', nodes[5].id); - assert.deepEqual(getDependencyIds(nodes[5]), [1]); - }); - - it('should not prune highly-connected short tasks', () => { - const request0 = createRequest(0, 'https://example.com/page0', 0); - const request1 = { - ...createRequest(1, 'https://example.com/', 100, null, NetworkRequestTypes.Document), - documentURL: 'https://example.com/', - frameId: 'frame1', - }; - const request2 = { - ...createRequest(2, 'https://example.com/page', 200, null, NetworkRequestTypes.Script), - documentURL: 'https://example.com/', - frameId: 'frame1', - }; - const request3 = createRequest(3, 'https://example.com/page2', 300, null, NetworkRequestTypes.XHR); - const request4 = createRequest(4, 'https://example.com/page3', 400, null, NetworkRequestTypes.XHR); - const networkRequests = [request0, request1, request2, request3, request4]; - URL = {requestedUrl: 'https://example.com/page0', mainDocumentUrl: 'https://example.com/page0'}; - - // Short task, evaluates script (2) and sends two XHRs. - addTaskEvents(220, 5, [ - {name: 'EvaluateScript', data: {url: 'https://example.com/page', frame: 'frame1'}}, - - {name: 'ResourceSendRequest', data: {requestId: 3}}, - {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page2'}}, - - {name: 'ResourceSendRequest', data: {requestId: 4}}, - {name: 'XHRReadyStateChange', data: {readyState: 4, url: 'https://example.com/page3'}}, - ]); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - const getDependencyIds = node => node.getDependencies().map(node => node.id); - - assert.equal(nodes.length, 6); - - assert.deepEqual(getDependencyIds(nodes[0]), []); - assert.deepEqual(getDependencyIds(nodes[1]), [0]); - assert.deepEqual(getDependencyIds(nodes[2]), [0]); - assert.deepEqual(getDependencyIds(nodes[3]), [0, '1.220000']); - assert.deepEqual(getDependencyIds(nodes[4]), [0, '1.220000']); - - assert.equal('1.220000', nodes[5].id); - assert.deepEqual(getDependencyIds(nodes[5]), [1, 2]); - }); - - it('should not prune short, first tasks of critical events', () => { - const request0 = createRequest(0, 'https://example.com/page0', 0); - const networkRequests = [request0]; - URL = {requestedUrl: 'https://example.com/page0', mainDocumentUrl: 'https://example.com/page0'}; - - const makeShortEvent = firstEventName => { - const startTs = traceEvents.length * 100; - addTaskEvents(startTs, 5, [ - {name: firstEventName, data: {url: 'https://example.com/page0'}}, - ]); - }; - - const criticalEventNames = [ - 'Paint', - 'Layout', - 'ParseHTML', - ]; - for (const eventName of criticalEventNames) { - makeShortEvent(eventName); - makeShortEvent(eventName); - } - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const cpuNodes = []; - graph.traverse(node => node.type === 'cpu' && cpuNodes.push(node)); - - expect(cpuNodes.map(node => { - return { - id: node.id, - name: node.childEvents[0].name, - }; - })).toEqual([ - { - id: '1.0', - name: 'Paint', - }, - { - // ID jumps by 4 between each because each node has 2 CPU tasks and we skip the 2nd of each event type - id: '1.400000', - name: 'Layout', - }, - { - id: '1.800000', - name: 'ParseHTML', - }, - ]); - }); - - it('should set isMainDocument on request with mainDocumentUrl', () => { - const request1 = createRequest(1, 'https://example.com/', 0, null, NetworkRequestTypes.Other); - const request2 = createRequest(2, 'https://example.com/page', 5, null, NetworkRequestTypes.Document); - // Add in another unrelated + early request to make sure we pick the correct chain - const request3 = createRequest(3, 'https://example.com/page2', 0, null, NetworkRequestTypes.Other); - request2.redirects = [request1]; - const networkRequests = [request1, request2, request3]; - URL = {requestedUrl: 'https://example.com/', mainDocumentUrl: 'https://example.com/page'}; - - addTaskEvents(0, 0, []); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - assert.equal(nodes.length, 3); - assert.equal(nodes[0].id, 1); - assert.equal(nodes[0].isMainDocument(), false); - assert.equal(nodes[1].isMainDocument(), true); - assert.equal(nodes[2].isMainDocument(), false); - }); - - it('should link up script initiators', () => { - const request1 = createRequest(1, 'https://example.com/', 0); - const request2 = createRequest(2, 'https://example.com/page', 5); - const request3 = createRequest(3, 'https://example.com/page2', 5); - const request4 = createRequest(4, 'https://example.com/page3', 20); - // Set multiple initiator requests through script stack. - request4.initiator = { - type: 'script', - stack: {callFrames: [{url: 'https://example.com/page'}], parent: {parent: {callFrames: [{url: 'https://example.com/page2'}]}}}, - }; - // Also set the initiatorRequest that Lighthouse's network-recorder.js creates. - // This should be ignored and only used as a fallback. - request4.initiatorRequest = request1; - const networkRequests = [request1, request2, request3, request4]; - - addTaskEvents(0, 0, []); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - assert.equal(nodes.length, 4); - assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); - assert.deepEqual(nodes[0].getDependencies(), []); - assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[3].getDependencies(), [nodes[1], nodes[2]]); - }); - - it('should link up script initiators only when timing is valid', () => { - const request1 = createRequest(1, 'https://example.com/', 0); - const request2 = createRequest(2, 'https://example.com/page', 500); - const request3 = createRequest(3, 'https://example.com/page2', 500); - const request4 = createRequest(4, 'https://example.com/page3', 20); - request4.initiator = { - type: 'script', - stack: {callFrames: [{url: 'https://example.com/page'}], parent: {parent: {callFrames: [{url: 'https://example.com/page2'}]}}}, - }; - const networkRequests = [request1, request2, request3, request4]; - - addTaskEvents(0, 0, []); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - assert.equal(nodes.length, 4); - assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); - assert.deepEqual(nodes[0].getDependencies(), []); - assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[3].getDependencies(), [nodes[0]]); - }); - - it('should link up script initiators with prefetch requests', () => { - const request1 = createRequest(1, 'https://a.com/1', 0); - const request2Prefetch = createRequest(2, 'https://a.com/js', 5); - const request2Fetch = createRequest(3, 'https://a.com/js', 10); - const request3 = createRequest(4, 'https://a.com/4', 20); - // Set the initiator to an ambiguous URL (there are 2 requests for https://a.com/js) - request3.initiator = { - type: 'script', - stack: {callFrames: [{url: 'https://a.com/js'}], parent: {parent: {callFrames: [{url: 'js'}]}}}, - }; - // Set the initiatorRequest that it should fallback to. - request3.initiatorRequest = request2Fetch; - const networkRequests = [request1, request2Prefetch, request2Fetch, request3]; - URL = {requestedUrl: 'https://a.com/1', mainDocumentUrl: 'https://a.com/1'}; - - addTaskEvents(0, 0, []); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - assert.equal(nodes.length, 4); - assert.deepEqual(nodes.map(node => node.id), [1, 2, 3, 4]); - assert.deepEqual(nodes[0].getDependencies(), []); - assert.deepEqual(nodes[1].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); - assert.deepEqual(nodes[3].getDependencies(), [nodes[2]]); - }); - - it('should not link up initiators with circular dependencies', () => { - const rootRequest = createRequest(1, 'https://a.com', 0); - // jsRequest1 initiated by jsRequest2 - // *AND* - // jsRequest2 initiated by jsRequest1 - const jsRequest1 = createRequest(2, 'https://a.com/js1', 1, {url: 'https://a.com/js2'}); - const jsRequest2 = createRequest(3, 'https://a.com/js2', 1, {url: 'https://a.com/js1'}); - const networkRequests = [rootRequest, jsRequest1, jsRequest2]; - URL = {requestedUrl: 'https://a.com', mainDocumentUrl: 'https://a.com'}; - - addTaskEvents(0, 0, []); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - nodes.sort((a, b) => a.id - b.id); - - assert.equal(nodes.length, 3); - assert.deepEqual(nodes.map(node => node.id), [1, 2, 3]); - assert.deepEqual(nodes[0].getDependencies(), []); - // We don't know which of the initiators to trust in a cycle, so for now we - // trust the earliest one (mostly because it's simplest). - // In the wild so far we've only seen this for self-referential relationships. - // If the evidence changes, then feel free to change these expectations :) - assert.deepEqual(nodes[1].getDependencies(), [nodes[2]]); - assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); - }); - - it('should not link up initiatorRequests with circular dependencies', () => { - const rootRequest = createRequest(1, 'https://a.com', 0); - // jsRequest1 initiated by jsRequest2 - // *AND* - // jsRequest2 initiated by jsRequest1 - const jsRequest1 = createRequest(2, 'https://a.com/js1', 1); - const jsRequest2 = createRequest(3, 'https://a.com/js2', 1); - jsRequest1.initiatorRequest = jsRequest2; - jsRequest2.initiatorRequest = jsRequest1; - const networkRequests = [rootRequest, jsRequest1, jsRequest2]; - URL = {requestedUrl: 'https://a.com', mainDocumentUrl: 'https://a.com'}; - - addTaskEvents(0, 0, []); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - nodes.sort((a, b) => a.id - b.id); - - assert.equal(nodes.length, 3); - assert.deepEqual(nodes.map(node => node.id), [1, 2, 3]); - assert.deepEqual(nodes[0].getDependencies(), []); - assert.deepEqual(nodes[1].getDependencies(), [nodes[2]]); - assert.deepEqual(nodes[2].getDependencies(), [nodes[0]]); - }); - - it('should find root if it is not the first node', () => { - const request1 = createRequest(1, 'https://example.com/', 0, null, NetworkRequestTypes.Other); - const request2 = createRequest(2, 'https://example.com/page', 5, null, NetworkRequestTypes.Document); - const networkRequests = [request1, request2]; - URL = {requestedUrl: 'https://example.com/page', mainDocumentUrl: 'https://example.com/page'}; - - // Evaluated before root request. - addTaskEvents(0.1, 50, [ - {name: 'EvaluateScript'}, - ]); - - const graph = PageDependencyGraph.createGraph(traceEvents, networkRequests, URL); - const nodes = []; - graph.traverse(node => nodes.push(node)); - - assert.equal(nodes.length, 1); - assert.deepEqual(nodes.map(node => node.id), [2]); - assert.deepEqual(nodes[0].getDependencies(), []); - assert.deepEqual(nodes[0].getDependents(), []); - }); - }); -}); diff --git a/core/lib/lantern/TBTUtils.js b/core/lib/lantern/TBTUtils.js deleted file mode 100644 index 98b24d2c0335..000000000000 --- a/core/lib/lantern/TBTUtils.js +++ /dev/null @@ -1,76 +0,0 @@ -/** - * @license - * Copyright 2021 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -const BLOCKING_TIME_THRESHOLD = 50; - -/** - * For TBT, We only want to consider tasks that fall in our time range - * - FCP and TTI for navigation mode - * - Trace start and trace end for timespan mode - * - * FCP is picked as `startTimeMs` because there is little risk of user input happening - * before FCP so Long Queuing Qelay regions do not harm user experience. Developers should be - * optimizing to reach FCP as fast as possible without having to worry about task lengths. - * - * TTI is picked as `endTimeMs` because we want a well defined end point for page load. - * - * @param {{start: number, end: number, duration: number}} event - * @param {number} startTimeMs Should be FCP in navigation mode and the trace start time in timespan mode - * @param {number} endTimeMs Should be TTI in navigation mode and the trace end time in timespan mode - * @param {{start: number, end: number, duration: number}} [topLevelEvent] Leave unset if `event` is top level. Has no effect if `event` has the same duration as `topLevelEvent`. - * @return {number} - */ -function calculateTbtImpactForEvent(event, startTimeMs, endTimeMs, topLevelEvent) { - let threshold = BLOCKING_TIME_THRESHOLD; - - // If a task is not top level, it doesn't make sense to subtract the entire 50ms - // blocking threshold from the event. - // - // e.g. A 80ms top level task with two 40ms children should attribute some blocking - // time to the 40ms tasks even though they do not meet the 50ms threshold. - // - // The solution is to scale the threshold for child events to be considered blocking. - if (topLevelEvent) threshold *= (event.duration / topLevelEvent.duration); - - if (event.duration < threshold) return 0; - if (event.end < startTimeMs) return 0; - if (event.start > endTimeMs) return 0; - - // Perform the clipping and then calculate Blocking Region. So if we have a 150ms task - // [0, 150] and `startTimeMs` is at 50ms, we first clip the task to [50, 150], and then - // calculate the Blocking Region to be [100, 150]. The rational here is that tasks before - // the start time are unimportant, so we care whether the main thread is busy more than - // 50ms at a time only after the start time. - const clippedStart = Math.max(event.start, startTimeMs); - const clippedEnd = Math.min(event.end, endTimeMs); - const clippedDuration = clippedEnd - clippedStart; - if (clippedDuration < threshold) return 0; - - return clippedDuration - threshold; -} - -/** - * @param {Array<{start: number, end: number, duration: number}>} topLevelEvents - * @param {number} startTimeMs - * @param {number} endTimeMs - * @return {number} - */ -function calculateSumOfBlockingTime(topLevelEvents, startTimeMs, endTimeMs) { - if (endTimeMs <= startTimeMs) return 0; - - let sumBlockingTime = 0; - for (const event of topLevelEvents) { - sumBlockingTime += calculateTbtImpactForEvent(event, startTimeMs, endTimeMs); - } - - return sumBlockingTime; -} - -export { - BLOCKING_TIME_THRESHOLD, - calculateSumOfBlockingTime, - calculateTbtImpactForEvent, -}; diff --git a/core/lib/lantern/TBTUtils.test.js b/core/lib/lantern/TBTUtils.test.js deleted file mode 100644 index cc7702e91aac..000000000000 --- a/core/lib/lantern/TBTUtils.test.js +++ /dev/null @@ -1,130 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from './lantern.js'; - -const {calculateSumOfBlockingTime} = Lantern.TBTUtils; - -describe('TotalBlockingTime utils', () => { - it('reports 0 when no task is longer than 50ms', () => { - const events = [ - {start: 1000, end: 1050, duration: 50}, - {start: 2000, end: 2010, duration: 10}, - ]; - - const fcpTimeMs = 500; - const interactiveTimeMs = 4000; - - expect( - calculateSumOfBlockingTime(events, fcpTimeMs, interactiveTimeMs) - ).toBe(0); - }); - - it('only looks at tasks within FCP and TTI', () => { - const events = [ - {start: 1000, end: 1060, duration: 60}, - {start: 2000, end: 2100, duration: 100}, - {start: 2300, end: 2450, duration: 150}, - {start: 2600, end: 2800, duration: 200}, - ]; - - const fcpTimeMs = 1500; - const interactiveTimeMs = 2500; - - expect( - calculateSumOfBlockingTime(events, fcpTimeMs, interactiveTimeMs) - ).toBe(150); - }); - - it('clips before finding blocking regions', () => { - const fcpTimeMs = 150; - const interactiveTimeMs = 300; - - const events = [ - // The clipping is done first, so the task becomes [150, 200] after clipping and contributes - // 0ms of blocking time. This is in contrast to first calculating the blocking region ([100, - // 200]) and then clipping at FCP (150ms), which yields 50ms blocking time. - {start: 50, end: 200, duration: 150}, - // Similarly, the task is first clipped above to be [240, 300], and then contributes 10ms - // blocking time. - {start: 240, end: 460, duration: 120}, - ]; - - expect( - calculateSumOfBlockingTime(events, fcpTimeMs, interactiveTimeMs) - ).toBe(10); // 0ms + 10ms. - }); - - // TTI can happen in the middle of a task, for example, if TTI is at FMP which occurs as part - // of a larger task, or in the lantern case where we use estimate TTI using a different graph - // from the one used to estimate TBT. - it('clips properly if TTI falls in the middle of a task', () => { - const fcpTimeMs = 1000; - const interactiveTimeMs = 2000; - - expect( - calculateSumOfBlockingTime( - [{start: 1951, end: 2100, duration: 149}], - fcpTimeMs, - interactiveTimeMs - ) - ).toBe(0); // Duration after clipping is 49, which is < 50. - expect( - calculateSumOfBlockingTime( - [{start: 1950, end: 2100, duration: 150}], - fcpTimeMs, - interactiveTimeMs - ) - ).toBe(0); // Duration after clipping is 50, so time after 50ms is 0ms. - expect( - calculateSumOfBlockingTime( - [{start: 1949, end: 2100, duration: 151}], - fcpTimeMs, - interactiveTimeMs - ) - ).toBe(1); // Duration after clipping is 51, so time after 50ms is 1ms. - }); - - it('clips properly if FCP falls in the middle of a task', () => { - const fcpTimeMs = 1000; - const interactiveTimeMs = 2000; - - expect( - calculateSumOfBlockingTime( - [{start: 900, end: 1049, duration: 149}], - fcpTimeMs, - interactiveTimeMs - ) - ).toBe(0); // Duration after clipping is 49, which is < 50. - expect( - calculateSumOfBlockingTime( - [{start: 900, end: 1050, duration: 150}], - fcpTimeMs, - interactiveTimeMs - ) - ).toBe(0); // Duration after clipping is 50, so time after 50ms is 0ms. - expect( - calculateSumOfBlockingTime( - [{start: 900, end: 1051, duration: 151}], - fcpTimeMs, - interactiveTimeMs - ) - ).toBe(1); // Duration after clipping is 51, so time after 50ms is 1ms. - }); - - // This can happen in the lantern metric case, where we use the optimistic - // TTI and pessimistic FCP. - it('returns 0 if interactiveTime is earlier than FCP', () => { - const fcpTimeMs = 2050; - const interactiveTimeMs = 1050; - - const events = [{start: 500, end: 3000, duration: 2500}]; - - expect( - calculateSumOfBlockingTime(events, fcpTimeMs, interactiveTimeMs) - ).toBe(0); - }); -}); diff --git a/core/lib/lantern/TraceEngineComputationData.js b/core/lib/lantern/TraceEngineComputationData.js deleted file mode 100644 index 6c0450ca7dd3..000000000000 --- a/core/lib/lantern/TraceEngineComputationData.js +++ /dev/null @@ -1,466 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as TraceEngine from '@paulirish/trace_engine'; -import * as Protocol from '@paulirish/trace_engine/generated/protocol.js'; - -import * as Lantern from './lantern.js'; - -/** @typedef {import('@paulirish/trace_engine/models/trace/handlers/PageLoadMetricsHandler.js').MetricName} MetricName */ -/** @typedef {import('@paulirish/trace_engine/models/trace/handlers/PageLoadMetricsHandler.js').MetricScore} MetricScore */ - -/** - * @param {TraceEngine.Handlers.Types.TraceParseData} traceEngineData - * @return {Lantern.Simulation.ProcessedNavigation} - */ -function createProcessedNavigation(traceEngineData) { - const Meta = traceEngineData.Meta; - const frameId = Meta.mainFrameId; - const scoresByNav = traceEngineData.PageLoadMetrics.metricScoresByFrameId.get(frameId); - if (!scoresByNav) { - throw new Error('missing metric scores for main frame'); - } - - const lastNavigationId = Meta.mainFrameNavigations.at(-1)?.args.data?.navigationId; - const scores = lastNavigationId && scoresByNav.get(lastNavigationId); - if (!scores) { - throw new Error('missing metric scores for specified navigation'); - } - - /** @param {MetricName} metric */ - const getTimestampOrUndefined = metric => { - const metricScore = scores.get(metric); - if (!metricScore?.event) return; - return metricScore.event.ts; - }; - /** @param {MetricName} metric */ - const getTimestamp = metric => { - const metricScore = scores.get(metric); - if (!metricScore?.event) throw new Error(`missing metric: ${metric}`); - return metricScore.event.ts; - }; - // TODO: should use `MetricName.LCP`, but it is a const enum. - const FCP = /** @type {MetricName} */('FCP'); - const LCP = /** @type {MetricName} */('LCP'); - return { - timestamps: { - firstContentfulPaint: getTimestamp(FCP), - largestContentfulPaint: getTimestampOrUndefined(LCP), - }, - }; -} - -/** - * @param {URL|string} url - */ -function createParsedUrl(url) { - if (typeof url === 'string') { - url = new URL(url); - } - return { - scheme: url.protocol.split(':')[0], - // Intentional, DevTools uses different terminology - host: url.hostname, - securityOrigin: url.origin, - }; -} - -/** - * Returns a map of `pid` -> `tid[]`. - * @param {Lantern.Trace} trace - * @return {Map} - */ -function findWorkerThreads(trace) { - // TODO: WorkersHandler in TraceEngine needs to be updated to also include `pid` (only had `tid`). - const workerThreads = new Map(); - const workerCreationEvents = ['ServiceWorker thread', 'DedicatedWorker thread']; - - for (const event of trace.traceEvents) { - if (event.name !== 'thread_name' || !event.args.name) { - continue; - } - if (!workerCreationEvents.includes(event.args.name)) { - continue; - } - - const tids = workerThreads.get(event.pid); - if (tids) { - tids.push(event.tid); - } else { - workerThreads.set(event.pid, [event.tid]); - } - } - - return workerThreads; -} - -/** - * @param {TraceEngine.Handlers.Types.TraceParseData} traceEngineData - * @param {Map} workerThreads - * @param {import('@paulirish/trace_engine/models/trace/types/TraceEvents.js').SyntheticNetworkRequest} request - * @return {Lantern.NetworkRequest=} - */ -function createLanternRequest(traceEngineData, workerThreads, request) { - if (request.args.data.connectionId === undefined || - request.args.data.connectionReused === undefined) { - throw new Error('Trace is too old'); - } - - let url; - try { - url = new URL(request.args.data.url); - } catch (e) { - return; - } - - const timing = request.args.data.timing ? { - // These two timings are not included in the trace. - workerFetchStart: -1, - workerRespondWithSettled: -1, - ...request.args.data.timing, - } : undefined; - - const networkRequestTime = timing ? - timing.requestTime * 1000 : - request.args.data.syntheticData.downloadStart / 1000; - - let fromWorker = false; - const tids = workerThreads.get(request.pid); - if (tids?.includes(request.tid)) { - fromWorker = true; - } - - // TraceEngine collects worker thread ids in a different manner than `workerThreads` does. - // AFAIK these should be equivalent, but in case they are not let's also check this for now. - if (traceEngineData.Workers.workerIdByThread.has(request.tid)) { - fromWorker = true; - } - - // typescript const enums.... gotta stop using those ... - const Other = /** @type {Protocol.Network.InitiatorType.Other} */ ('other'); - - // `initiator` in the trace does not contain the stack trace for JS-initiated - // requests. Instead, that is stored in the `stackTrace` property of the SyntheticNetworkRequest. - // There are some minor differences in the fields, accounted for here. - // Most importantly, there seems to be fewer frames in the trace than the equivalent - // events over the CDP. This results in less accuracy in determining the initiator request, - // which means less edges in the graph, which mean worse results. - // TODO: Should fix in Chromium. - /** @type {Lantern.NetworkRequest['initiator']} */ - const initiator = request.args.data.initiator ?? {type: Other}; - if (request.args.data.stackTrace) { - const callFrames = request.args.data.stackTrace.map(f => { - return { - scriptId: /** @type {Protocol.Runtime.ScriptId} */(String(f.scriptId)), - url: f.url, - lineNumber: f.lineNumber - 1, - columnNumber: f.columnNumber - 1, - functionName: f.functionName, - }; - }); - initiator.stack = {callFrames}; - // Note: there is no `parent` to set ... - } - - let resourceType = request.args.data.resourceType; - if (request.args.data.initiator?.fetchType === 'xmlhttprequest') { - // @ts-expect-error yes XHR is a valid ResourceType. TypeScript const enums are so unhelpful. - resourceType = 'XHR'; - } else if (request.args.data.initiator?.fetchType === 'fetch') { - // @ts-expect-error yes Fetch is a valid ResourceType. TypeScript const enums are so unhelpful. - resourceType = 'Fetch'; - } - - // TODO: set decodedBodyLength for data urls in Trace Engine. - let resourceSize = request.args.data.decodedBodyLength ?? 0; - if (url.protocol === 'data:' && resourceSize === 0) { - const needle = 'base64,'; - const index = url.pathname.indexOf(needle); - if (index !== -1) { - resourceSize = atob(url.pathname.substring(index + needle.length)).length; - } - } - - return { - rawRequest: request, - requestId: request.args.data.requestId, - connectionId: request.args.data.connectionId, - connectionReused: request.args.data.connectionReused, - url: request.args.data.url, - protocol: request.args.data.protocol, - parsedURL: createParsedUrl(url), - documentURL: request.args.data.requestingFrameUrl, - rendererStartTime: request.ts / 1000, - networkRequestTime, - responseHeadersEndTime: request.args.data.syntheticData.downloadStart / 1000, - networkEndTime: request.args.data.syntheticData.finishTime / 1000, - transferSize: request.args.data.encodedDataLength, - resourceSize, - fromDiskCache: request.args.data.syntheticData.isDiskCached, - fromMemoryCache: request.args.data.syntheticData.isMemoryCached, - isLinkPreload: request.args.data.isLinkPreload, - finished: request.args.data.finished, - failed: request.args.data.failed, - statusCode: request.args.data.statusCode, - initiator, - timing, - resourceType, - mimeType: request.args.data.mimeType, - priority: request.args.data.priority, - frameId: request.args.data.frame, - fromWorker, - // Set later. - redirects: undefined, - redirectSource: undefined, - redirectDestination: undefined, - initiatorRequest: undefined, - }; -} - -/** - * @param {Lantern.NetworkRequest} request The request to find the initiator of - * @param {Map} requestsByURL - * @return {Lantern.NetworkRequest|null} - */ -function chooseInitiatorRequest(request, requestsByURL) { - if (request.redirectSource) { - return request.redirectSource; - } - - const initiatorURL = Lantern.PageDependencyGraph.getNetworkInitiators(request)[0]; - let candidates = requestsByURL.get(initiatorURL) || []; - // The (valid) initiator must come before the initiated request. - candidates = candidates.filter(c => { - return c.responseHeadersEndTime <= request.rendererStartTime && - c.finished && !c.failed; - }); - if (candidates.length > 1) { - // Disambiguate based on prefetch. Prefetch requests have type 'Other' and cannot - // initiate requests, so we drop them here. - const nonPrefetchCandidates = candidates.filter( - cand => cand.resourceType !== Lantern.NetworkRequestTypes.Other); - if (nonPrefetchCandidates.length) { - candidates = nonPrefetchCandidates; - } - } - if (candidates.length > 1) { - // Disambiguate based on frame. It's likely that the initiator comes from the same frame. - const sameFrameCandidates = candidates.filter(cand => cand.frameId === request.frameId); - if (sameFrameCandidates.length) { - candidates = sameFrameCandidates; - } - } - if (candidates.length > 1 && request.initiator.type === 'parser') { - // Filter to just Documents when initiator type is parser. - const documentCandidates = candidates.filter(cand => - cand.resourceType === Lantern.NetworkRequestTypes.Document); - if (documentCandidates.length) { - candidates = documentCandidates; - } - } - if (candidates.length > 1) { - // If all real loads came from successful preloads (url preloaded and - // loads came from the cache), filter to link rel=preload request(s). - const linkPreloadCandidates = candidates.filter(c => c.isLinkPreload); - if (linkPreloadCandidates.length) { - const nonPreloadCandidates = candidates.filter(c => !c.isLinkPreload); - const allPreloaded = nonPreloadCandidates.every(c => c.fromDiskCache || c.fromMemoryCache); - if (nonPreloadCandidates.length && allPreloaded) { - candidates = linkPreloadCandidates; - } - } - } - - // Only return an initiator if the result is unambiguous. - return candidates.length === 1 ? candidates[0] : null; -} - -/** - * @param {Lantern.NetworkRequest[]} lanternRequests - */ -function linkInitiators(lanternRequests) { -/** @type {Map} */ - const requestsByURL = new Map(); - for (const request of lanternRequests) { - const requests = requestsByURL.get(request.url) || []; - requests.push(request); - requestsByURL.set(request.url, requests); - } - - for (const request of lanternRequests) { - const initiatorRequest = chooseInitiatorRequest(request, requestsByURL); - if (initiatorRequest) { - request.initiatorRequest = initiatorRequest; - } - } -} - -/** - * @param {Lantern.Trace} trace - * @param {TraceEngine.Handlers.Types.TraceParseData} traceEngineData - * @return {Lantern.NetworkRequest[]} - */ -function createNetworkRequests(trace, traceEngineData) { - const workerThreads = findWorkerThreads(trace); - - /** @type {Lantern.NetworkRequest[]} */ - const lanternRequests = []; - for (const request of traceEngineData.NetworkRequests.byTime) { - const lanternRequest = createLanternRequest(traceEngineData, workerThreads, request); - if (lanternRequest) { - lanternRequests.push(lanternRequest); - } - } - - // TraceEngine consolidates all redirects into a single request object, but lantern needs - // an entry for each redirected request. - for (const request of [...lanternRequests]) { - if (!request.rawRequest) continue; - - const redirects = request.rawRequest.args.data.redirects; - if (!redirects.length) continue; - - const requestChain = []; - for (const redirect of redirects) { - const redirectedRequest = structuredClone(request); - - redirectedRequest.networkRequestTime = redirect.ts / 1000; - redirectedRequest.rendererStartTime = redirectedRequest.networkRequestTime; - - redirectedRequest.networkEndTime = (redirect.ts + redirect.dur) / 1000; - redirectedRequest.responseHeadersEndTime = redirectedRequest.networkEndTime; - - redirectedRequest.timing = { - requestTime: redirectedRequest.networkRequestTime / 1000, - receiveHeadersStart: redirectedRequest.responseHeadersEndTime, - receiveHeadersEnd: redirectedRequest.responseHeadersEndTime, - proxyStart: -1, - proxyEnd: -1, - dnsStart: -1, - dnsEnd: -1, - connectStart: -1, - connectEnd: -1, - sslStart: -1, - sslEnd: -1, - sendStart: -1, - sendEnd: -1, - workerStart: -1, - workerReady: -1, - workerFetchStart: -1, - workerRespondWithSettled: -1, - pushStart: -1, - pushEnd: -1, - }; - - redirectedRequest.url = redirect.url; - redirectedRequest.parsedURL = createParsedUrl(redirect.url); - // TODO: TraceEngine is not retaining the actual status code. - redirectedRequest.statusCode = 302; - redirectedRequest.resourceType = undefined; - // TODO: TraceEngine is not retaining transfer size of redirected request. - redirectedRequest.transferSize = 400; - requestChain.push(redirectedRequest); - lanternRequests.push(redirectedRequest); - } - requestChain.push(request); - - for (let i = 0; i < requestChain.length; i++) { - const request = requestChain[i]; - if (i > 0) { - request.redirectSource = requestChain[i - 1]; - request.redirects = requestChain.slice(0, i); - } - if (i !== requestChain.length - 1) { - request.redirectDestination = requestChain[i + 1]; - } - } - - // Apply the `:redirect` requestId convention: only redirects[0].requestId is the actual - // requestId, all the rest have n occurences of `:redirect` as a suffix. - for (let i = 1; i < requestChain.length; i++) { - requestChain[i].requestId = `${requestChain[i - 1].requestId}:redirect`; - } - } - - linkInitiators(lanternRequests); - - // This would already be sorted by rendererStartTime, if not for the redirect unwrapping done - // above. - return lanternRequests.sort((a, b) => a.rendererStartTime - b.rendererStartTime); -} - -/** - * @param {Lantern.Trace} trace - * @param {TraceEngine.Handlers.Types.TraceParseData} traceEngineData - * @return {Lantern.TraceEvent[]} - */ -function collectMainThreadEvents(trace, traceEngineData) { - const Meta = traceEngineData.Meta; - const mainFramePids = Meta.mainFrameNavigations.length - ? new Set(Meta.mainFrameNavigations.map(nav => nav.pid)) - : Meta.topLevelRendererIds; - - const rendererPidToTid = new Map(); - for (const pid of mainFramePids) { - const threads = Meta.threadsInProcess.get(pid) ?? []; - - let found = false; - for (const [tid, thread] of threads) { - if (thread.args.name === 'CrRendererMain') { - rendererPidToTid.set(pid, tid); - found = true; - break; - } - } - - if (found) continue; - - // `CrRendererMain` can be missing if chrome is launched with the `--single-process` flag. - // In this case, page tasks will be run in the browser thread. - for (const [tid, thread] of threads) { - if (thread.args.name === 'CrBrowserMain') { - rendererPidToTid.set(pid, tid); - found = true; - break; - } - } - } - - return trace.traceEvents.filter(e => rendererPidToTid.get(e.pid) === e.tid); -} - -/** - * @param {Lantern.NetworkRequest[]} requests - * @param {Lantern.Trace} trace - * @param {TraceEngine.Handlers.Types.TraceParseData} traceEngineData - * @param {Lantern.Simulation.URL=} URL - */ -function createGraph(requests, trace, traceEngineData, URL) { - const mainThreadEvents = collectMainThreadEvents(trace, traceEngineData); - - // URL defines the initial request that the Lantern graph starts at (the root node) and the - // main document request. These are equal if there are no redirects. - if (!URL) { - URL = { - requestedUrl: requests[0].url, - mainDocumentUrl: '', - }; - - let request = requests[0]; - while (request.redirectDestination) { - request = request.redirectDestination; - } - URL.mainDocumentUrl = request.url; - } - - return Lantern.PageDependencyGraph.createGraph(mainThreadEvents, requests, URL); -} - -export { - createProcessedNavigation, - createNetworkRequests, - createGraph, -}; diff --git a/core/lib/lantern/lantern.js b/core/lib/lantern/lantern.js index c636c90904c0..0ce77f3036e0 100644 --- a/core/lib/lantern/lantern.js +++ b/core/lib/lantern/lantern.js @@ -4,48 +4,5 @@ * SPDX-License-Identifier: Apache-2.0 */ -import * as Lantern from './types/lantern.js'; - -/** @type {Lantern.Util.SelfMap} */ -const NetworkRequestTypes = { - XHR: 'XHR', - Fetch: 'Fetch', - EventSource: 'EventSource', - Script: 'Script', - Stylesheet: 'Stylesheet', - Image: 'Image', - Media: 'Media', - Font: 'Font', - Document: 'Document', - TextTrack: 'TextTrack', - WebSocket: 'WebSocket', - Other: 'Other', - Manifest: 'Manifest', - SignedExchange: 'SignedExchange', - Ping: 'Ping', - Preflight: 'Preflight', - CSPViolationReport: 'CSPViolationReport', - Prefetch: 'Prefetch', -}; - -export {BaseNode} from './BaseNode.js'; -export {CPUNode} from './CpuNode.js'; -export {LanternError as Error} from './LanternError.js'; -export {Metric} from './Metric.js'; -export {NetworkNode} from './NetworkNode.js'; -export {PageDependencyGraph} from './PageDependencyGraph.js'; -export * as Metrics from './metrics/metrics.js'; -export * as Simulation from './simulation/simulation.js'; -export * as TBTUtils from './TBTUtils.js'; -export * as TraceEngineComputationData from './TraceEngineComputationData.js'; - -/** @template [T=any] @typedef {Lantern.NetworkRequest} NetworkRequest */ -/** @typedef {Lantern.ResourcePriority} ResourcePriority */ -/** @typedef {Lantern.ResourceTiming} ResourceTiming */ -/** @typedef {Lantern.ResourceType} ResourceType */ -/** @typedef {Lantern.Trace} Trace */ -/** @typedef {Lantern.TraceEvent} TraceEvent */ - -export { - NetworkRequestTypes, -}; +export * from '@paulirish/trace_engine/models/trace/lantern/lantern.js'; +export {LanternComputationData as TraceEngineComputationData} from '@paulirish/trace_engine'; diff --git a/core/lib/lantern/metrics/FirstContentfulPaint.js b/core/lib/lantern/metrics/FirstContentfulPaint.js deleted file mode 100644 index 8c9bbc96afb0..000000000000 --- a/core/lib/lantern/metrics/FirstContentfulPaint.js +++ /dev/null @@ -1,198 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; - -/** @typedef {import('../BaseNode.js').Node} Node */ -/** @template T @typedef {import('../NetworkNode.js').NetworkNode} NetworkNode */ -/** @typedef {import('../CpuNode.js').CPUNode} CpuNode */ - -class FirstContentfulPaint extends Lantern.Metric { - /** - * @return {Lantern.Simulation.MetricCoefficients} - */ - static get COEFFICIENTS() { - return { - intercept: 0, - optimistic: 0.5, - pessimistic: 0.5, - }; - } - - /** - * @template T - * @typedef FirstPaintBasedGraphOpts - * @property {number} cutoffTimestamp The timestamp used to filter out tasks that occured after - * our paint of interest. Typically this is First Contentful Paint or First Meaningful Paint. - * @property {function(NetworkNode):boolean} treatNodeAsRenderBlocking The function that determines - * which resources should be considered *possibly* render-blocking. - * @property {(function(CpuNode):boolean)=} additionalCpuNodesToTreatAsRenderBlocking The function that - * determines which CPU nodes should also be included in our blocking node IDs set, - * beyond what getRenderBlockingNodeData() already includes. - */ - - /** - * This function computes the set of URLs that *appeared* to be render-blocking based on our filter, - * *but definitely were not* render-blocking based on the timing of their EvaluateScript task. - * It also computes the set of corresponding CPU node ids that were needed for the paint at the - * given timestamp. - * - * @template [T=unknown] - * @param {Node} graph - * @param {FirstPaintBasedGraphOpts} opts - * @return {{definitelyNotRenderBlockingScriptUrls: Set, renderBlockingCpuNodeIds: Set}} - */ - static getRenderBlockingNodeData( - graph, - {cutoffTimestamp, treatNodeAsRenderBlocking, additionalCpuNodesToTreatAsRenderBlocking} - ) { - /** @type {Map} A map of blocking script URLs to the earliest EvaluateScript task node that executed them. */ - const scriptUrlToNodeMap = new Map(); - - /** @type {Array} */ - const cpuNodes = []; - graph.traverse(node => { - if (node.type === Lantern.BaseNode.TYPES.CPU) { - // A task is *possibly* render blocking if it *started* before cutoffTimestamp. - // We use startTime here because the paint event can be *inside* the task that was render blocking. - if (node.startTime <= cutoffTimestamp) cpuNodes.push(node); - - // Build our script URL map to find the earliest EvaluateScript task node. - const scriptUrls = node.getEvaluateScriptURLs(); - for (const url of scriptUrls) { - // Use the earliest CPU node we find. - const existing = scriptUrlToNodeMap.get(url) || node; - scriptUrlToNodeMap.set(url, node.startTime < existing.startTime ? node : existing); - } - } - }); - - cpuNodes.sort((a, b) => a.startTime - b.startTime); - - // A script is *possibly* render blocking if it finished loading before cutoffTimestamp. - const possiblyRenderBlockingScriptUrls = Lantern.Metric.getScriptUrls(graph, node => { - // The optimistic LCP treatNodeAsRenderBlocking fn wants to exclude some images in the graph, - // but here it only receives scripts to evaluate. It's a no-op in this case, but it will - // matter below in the getFirstPaintBasedGraph clone operation. - return node.endTime <= cutoffTimestamp && treatNodeAsRenderBlocking(node); - }); - - // A script is *definitely not* render blocking if its EvaluateScript task started after cutoffTimestamp. - /** @type {Set} */ - const definitelyNotRenderBlockingScriptUrls = new Set(); - /** @type {Set} */ - const renderBlockingCpuNodeIds = new Set(); - for (const url of possiblyRenderBlockingScriptUrls) { - // Lookup the CPU node that had the earliest EvaluateScript for this URL. - const cpuNodeForUrl = scriptUrlToNodeMap.get(url); - - // If we can't find it at all, we can't conclude anything, so just skip it. - if (!cpuNodeForUrl) continue; - - // If we found it and it was in our `cpuNodes` set that means it finished before cutoffTimestamp, so it really is render-blocking. - if (cpuNodes.includes(cpuNodeForUrl)) { - renderBlockingCpuNodeIds.add(cpuNodeForUrl.id); - continue; - } - - // We couldn't find the evaluate script in the set of CPU nodes that ran before our paint, so - // it must not have been necessary for the paint. - definitelyNotRenderBlockingScriptUrls.add(url); - } - - // The first layout, first paint, and first ParseHTML are almost always necessary for first paint, - // so we always include those CPU nodes. - const firstLayout = cpuNodes.find(node => node.didPerformLayout()); - if (firstLayout) renderBlockingCpuNodeIds.add(firstLayout.id); - const firstPaint = cpuNodes.find(node => node.childEvents.some(e => e.name === 'Paint')); - if (firstPaint) renderBlockingCpuNodeIds.add(firstPaint.id); - const firstParse = cpuNodes.find(node => node.childEvents.some(e => e.name === 'ParseHTML')); - if (firstParse) renderBlockingCpuNodeIds.add(firstParse.id); - - // If a CPU filter was passed in, we also want to include those extra nodes. - if (additionalCpuNodesToTreatAsRenderBlocking) { - cpuNodes - .filter(additionalCpuNodesToTreatAsRenderBlocking) - .forEach(node => renderBlockingCpuNodeIds.add(node.id)); - } - - return { - definitelyNotRenderBlockingScriptUrls, - renderBlockingCpuNodeIds, - }; - } - - /** - * This function computes the graph required for the first paint of interest. - * - * @template [T=unknown] - * @param {Node} dependencyGraph - * @param {FirstPaintBasedGraphOpts} opts - * @return {Node} - */ - static getFirstPaintBasedGraph( - dependencyGraph, - {cutoffTimestamp, treatNodeAsRenderBlocking, additionalCpuNodesToTreatAsRenderBlocking} - ) { - const rbData = this.getRenderBlockingNodeData(dependencyGraph, { - cutoffTimestamp, - treatNodeAsRenderBlocking, - additionalCpuNodesToTreatAsRenderBlocking, - }); - const {definitelyNotRenderBlockingScriptUrls, renderBlockingCpuNodeIds} = rbData; - - return dependencyGraph.cloneWithRelationships(node => { - if (node.type === Lantern.BaseNode.TYPES.NETWORK) { - // Exclude all nodes that ended after cutoffTimestamp (except for the main document which we always consider necessary) - // endTime is negative if request does not finish, make sure startTime isn't after cutoffTimestamp in this case. - const endedAfterPaint = node.endTime > cutoffTimestamp || node.startTime > cutoffTimestamp; - if (endedAfterPaint && !node.isMainDocument()) return false; - - const url = node.request.url; - // If the URL definitely wasn't render-blocking then we filter it out. - if (definitelyNotRenderBlockingScriptUrls.has(url)) { - return false; - } - - // Lastly, build up the FCP graph of all nodes we consider render blocking - return treatNodeAsRenderBlocking(node); - } else { - // If it's a CPU node, just check if it was blocking. - return renderBlockingCpuNodeIds.has(node.id); - } - }); - } - - /** - * @param {Node} dependencyGraph - * @param {Lantern.Simulation.ProcessedNavigation} processedNavigation - * @return {Node} - */ - static getOptimisticGraph(dependencyGraph, processedNavigation) { - return this.getFirstPaintBasedGraph(dependencyGraph, { - cutoffTimestamp: processedNavigation.timestamps.firstContentfulPaint, - // In the optimistic graph we exclude resources that appeared to be render blocking but were - // initiated by a script. While they typically have a very high importance and tend to have a - // significant impact on the page's content, these resources don't technically block rendering. - treatNodeAsRenderBlocking: node => - node.hasRenderBlockingPriority() && node.initiatorType !== 'script', - }); - } - - /** - * @param {Node} dependencyGraph - * @param {Lantern.Simulation.ProcessedNavigation} processedNavigation - * @return {Node} - */ - static getPessimisticGraph(dependencyGraph, processedNavigation) { - return this.getFirstPaintBasedGraph(dependencyGraph, { - cutoffTimestamp: processedNavigation.timestamps.firstContentfulPaint, - treatNodeAsRenderBlocking: node => node.hasRenderBlockingPriority(), - }); - } -} - -export {FirstContentfulPaint}; diff --git a/core/lib/lantern/metrics/FirstContentfulPaint.test.js b/core/lib/lantern/metrics/FirstContentfulPaint.test.js deleted file mode 100644 index 0c95c38e6642..000000000000 --- a/core/lib/lantern/metrics/FirstContentfulPaint.test.js +++ /dev/null @@ -1,54 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import assert from 'assert/strict'; - -import * as Lantern from '../lantern.js'; -import {readJson} from '../../../test/test-utils.js'; -import {getComputationDataFromFixture} from './MetricTestUtils.js'; - -const {FirstContentfulPaint} = Lantern.Metrics; - -const trace = readJson('../../../test/fixtures/artifacts/progressive-app/trace.json', import.meta); - -describe('Metrics: Lantern FCP', () => { - it('should compute predicted value', async () => { - const data = await getComputationDataFromFixture({trace}); - const result = await FirstContentfulPaint.compute(data); - - expect({ - timing: Math.round(result.timing), - optimistic: Math.round(result.optimisticEstimate.timeInMs), - pessimistic: Math.round(result.pessimisticEstimate.timeInMs), - optimisticNodeTimings: result.optimisticEstimate.nodeTimings.size, - pessimisticNodeTimings: result.pessimisticEstimate.nodeTimings.size, - }).toMatchSnapshot(); - assert.ok(result.optimisticGraph, 'should have created optimistic graph'); - assert.ok(result.pessimisticGraph, 'should have created pessimistic graph'); - }); - - it('should handle negative request networkEndTime', async () => { - const data = await getComputationDataFromFixture({trace}); - data.graph.request.networkEndTime = -1; - const result = await FirstContentfulPaint.compute(data); - - const optimisticNodes = []; - result.optimisticGraph.traverse(node => { - if (node.type === 'network') { - optimisticNodes.push(node); - } - }); - expect(optimisticNodes.map(node => node.request.url)).toEqual(['https://squoosh.app/']); - - const pessimisticNodes = []; - result.pessimisticGraph.traverse(node => { - if (node.type === 'network') { - pessimisticNodes.push(node); - } - }); - expect(pessimisticNodes.map(node => node.request.url)).toEqual(['https://squoosh.app/']); - }); -}); diff --git a/core/lib/lantern/metrics/Interactive.js b/core/lib/lantern/metrics/Interactive.js deleted file mode 100644 index d2244868627e..000000000000 --- a/core/lib/lantern/metrics/Interactive.js +++ /dev/null @@ -1,109 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; - -/** @typedef {import('../BaseNode.js').Node} Node */ - -// Any CPU task of 20 ms or more will end up being a critical long task on mobile -const CRITICAL_LONG_TASK_THRESHOLD = 20; - -class Interactive extends Lantern.Metric { - /** - * @return {Lantern.Simulation.MetricCoefficients} - */ - static get COEFFICIENTS() { - return { - intercept: 0, - optimistic: 0.45, - pessimistic: 0.55, - }; - } - - /** - * @param {Node} dependencyGraph - * @return {Node} - */ - static getOptimisticGraph(dependencyGraph) { - // Adjust the critical long task threshold for microseconds - const minimumCpuTaskDuration = CRITICAL_LONG_TASK_THRESHOLD * 1000; - - return dependencyGraph.cloneWithRelationships(node => { - // Include everything that might be a long task - if (node.type === Lantern.BaseNode.TYPES.CPU) { - return node.duration > minimumCpuTaskDuration; - } - - // Include all scripts and high priority requests, exclude all images - const isImage = node.request.resourceType === Lantern.NetworkRequestTypes.Image; - const isScript = node.request.resourceType === Lantern.NetworkRequestTypes.Script; - return ( - !isImage && - (isScript || - node.request.priority === 'High' || - node.request.priority === 'VeryHigh') - ); - }); - } - - /** - * @param {Node} dependencyGraph - * @return {Node} - */ - static getPessimisticGraph(dependencyGraph) { - return dependencyGraph; - } - - /** - * @param {Lantern.Simulation.Result} simulationResult - * @param {import('../Metric.js').Extras} extras - * @return {Lantern.Simulation.Result} - */ - static getEstimateFromSimulation(simulationResult, extras) { - if (!extras.lcpResult) throw new Error('missing lcpResult'); - - const lastTaskAt = Interactive.getLastLongTaskEndTime(simulationResult.nodeTimings); - const minimumTime = extras.optimistic - ? extras.lcpResult.optimisticEstimate.timeInMs - : extras.lcpResult.pessimisticEstimate.timeInMs; - return { - timeInMs: Math.max(minimumTime, lastTaskAt), - nodeTimings: simulationResult.nodeTimings, - }; - } - - /** - * @param {Lantern.Simulation.MetricComputationDataInput} data - * @param {Omit=} extras - * @return {Promise} - */ - static async compute(data, extras) { - const lcpResult = extras?.lcpResult; - if (!lcpResult) { - throw new Error('LCP is required to calculate the Interactive metric'); - } - - const metricResult = await super.compute(data, extras); - metricResult.timing = Math.max(metricResult.timing, lcpResult.timing); - return metricResult; - } - - /** - * @param {Lantern.Simulation.Result['nodeTimings']} nodeTimings - * @return {number} - */ - static getLastLongTaskEndTime(nodeTimings, duration = 50) { - return Array.from(nodeTimings.entries()) - .filter(([node, timing]) => { - if (node.type !== Lantern.BaseNode.TYPES.CPU) return false; - return timing.duration > duration; - }) - .map(([_, timing]) => timing.endTime) - .reduce((max, x) => Math.max(max || 0, x || 0), 0); - } -} - -export {Interactive}; diff --git a/core/lib/lantern/metrics/Interactive.test.js b/core/lib/lantern/metrics/Interactive.test.js deleted file mode 100644 index 978c087b5028..000000000000 --- a/core/lib/lantern/metrics/Interactive.test.js +++ /dev/null @@ -1,56 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import assert from 'assert/strict'; - -import * as Lantern from '../lantern.js'; -import {getComputationDataFromFixture} from './MetricTestUtils.js'; -import {readJson} from '../../../test/test-utils.js'; - -const {Interactive, FirstContentfulPaint, LargestContentfulPaint} = Lantern.Metrics; - -const trace = readJson('../../../test/fixtures/artifacts/progressive-app/trace.json', import.meta); -const iframeTrace = readJson('../../../test/fixtures/artifacts/iframe/trace.json', import.meta); - -describe('Metrics: Lantern TTI', () => { - it('should compute predicted value', async () => { - const data = await getComputationDataFromFixture({trace}); - const result = await Interactive.compute(data, { - lcpResult: await LargestContentfulPaint.compute(data, { - fcpResult: await FirstContentfulPaint.compute(data), - }), - }); - - expect({ - timing: Math.round(result.timing), - optimistic: Math.round(result.optimisticEstimate.timeInMs), - pessimistic: Math.round(result.pessimisticEstimate.timeInMs), - }).toMatchSnapshot(); - assert.equal(result.optimisticEstimate.nodeTimings.size, 14); - assert.equal(result.pessimisticEstimate.nodeTimings.size, 31); - assert.ok(result.optimisticGraph, 'should have created optimistic graph'); - assert.ok(result.pessimisticGraph, 'should have created pessimistic graph'); - }); - - it('should compute predicted value on iframes with substantial layout', async () => { - const data = await getComputationDataFromFixture({ - trace: iframeTrace, - }); - const result = await Interactive.compute(data, { - lcpResult: await LargestContentfulPaint.compute(data, { - fcpResult: await FirstContentfulPaint.compute(data), - }), - }); - - expect({ - timing: Math.round(result.timing), - optimistic: Math.round(result.optimisticEstimate.timeInMs), - pessimistic: Math.round(result.pessimisticEstimate.timeInMs), - }).toMatchSnapshot(); - assert.ok(result.optimisticGraph, 'should have created optimistic graph'); - assert.ok(result.pessimisticGraph, 'should have created pessimistic graph'); - }); -}); diff --git a/core/lib/lantern/metrics/LargestContentfulPaint.js b/core/lib/lantern/metrics/LargestContentfulPaint.js deleted file mode 100644 index 3efa9a507021..000000000000 --- a/core/lib/lantern/metrics/LargestContentfulPaint.js +++ /dev/null @@ -1,105 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; - -/** @typedef {import('../BaseNode.js').Node} Node */ - -class LargestContentfulPaint extends Lantern.Metric { - /** - * @return {Lantern.Simulation.MetricCoefficients} - */ - static get COEFFICIENTS() { - return { - intercept: 0, - optimistic: 0.5, - pessimistic: 0.5, - }; - } - - /** - * Low priority image nodes are usually offscreen and very unlikely to be the - * resource that is required for LCP. Our LCP graphs include everything except for these images. - * - * @param {Node} node - * @return {boolean} - */ - static isNotLowPriorityImageNode(node) { - if (node.type !== 'network') return true; - const isImage = node.request.resourceType === 'Image'; - const isLowPriority = node.request.priority === 'Low' || node.request.priority === 'VeryLow'; - return !isImage || !isLowPriority; - } - - /** - * @param {Node} dependencyGraph - * @param {Lantern.Simulation.ProcessedNavigation} processedNavigation - * @return {Node} - */ - static getOptimisticGraph(dependencyGraph, processedNavigation) { - const lcp = processedNavigation.timestamps.largestContentfulPaint; - if (!lcp) { - throw new Lantern.Error('NO_LCP'); - } - - return Lantern.Metrics.FirstContentfulPaint.getFirstPaintBasedGraph(dependencyGraph, { - cutoffTimestamp: lcp, - treatNodeAsRenderBlocking: LargestContentfulPaint.isNotLowPriorityImageNode, - }); - } - - /** - * @param {Node} dependencyGraph - * @param {Lantern.Simulation.ProcessedNavigation} processedNavigation - * @return {Node} - */ - static getPessimisticGraph(dependencyGraph, processedNavigation) { - const lcp = processedNavigation.timestamps.largestContentfulPaint; - if (!lcp) { - throw new Lantern.Error('NO_LCP'); - } - - return Lantern.Metrics.FirstContentfulPaint.getFirstPaintBasedGraph(dependencyGraph, { - cutoffTimestamp: lcp, - treatNodeAsRenderBlocking: _ => true, - // For pessimistic LCP we'll include *all* layout nodes - additionalCpuNodesToTreatAsRenderBlocking: node => node.didPerformLayout(), - }); - } - - /** - * @param {Lantern.Simulation.Result} simulationResult - * @return {Lantern.Simulation.Result} - */ - static getEstimateFromSimulation(simulationResult) { - const nodeTimesNotOffscreenImages = Array.from(simulationResult.nodeTimings.entries()) - .filter(entry => LargestContentfulPaint.isNotLowPriorityImageNode(entry[0])) - .map(entry => entry[1].endTime); - - return { - timeInMs: Math.max(...nodeTimesNotOffscreenImages), - nodeTimings: simulationResult.nodeTimings, - }; - } - - /** - * @param {Lantern.Simulation.MetricComputationDataInput} data - * @param {Omit=} extras - * @return {Promise} - */ - static async compute(data, extras) { - const fcpResult = extras?.fcpResult; - if (!fcpResult) { - throw new Error('FCP is required to calculate the LCP metric'); - } - - const metricResult = await super.compute(data, extras); - metricResult.timing = Math.max(metricResult.timing, fcpResult.timing); - return metricResult; - } -} - -export {LargestContentfulPaint}; diff --git a/core/lib/lantern/metrics/LargestContentfulPaint.test.js b/core/lib/lantern/metrics/LargestContentfulPaint.test.js deleted file mode 100644 index 21120d9a2d82..000000000000 --- a/core/lib/lantern/metrics/LargestContentfulPaint.test.js +++ /dev/null @@ -1,42 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import assert from 'assert/strict'; - -import * as Lantern from '../lantern.js'; -import {getComputationDataFromFixture} from './MetricTestUtils.js'; -import {readJson} from '../../../test/test-utils.js'; - -const {FirstContentfulPaint, LargestContentfulPaint} = Lantern.Metrics; - -const trace = readJson('../../../test/fixtures/artifacts/paul/trace.json', import.meta); - -describe('Metrics: Lantern LCP', () => { - it('should compute predicted value', async () => { - const data = await getComputationDataFromFixture({trace}); - const result = await LargestContentfulPaint.compute(data, { - fcpResult: await FirstContentfulPaint.compute(data), - }); - - expect({ - timing: Math.round(result.timing), - optimistic: Math.round(result.optimisticEstimate.timeInMs), - pessimistic: Math.round(result.pessimisticEstimate.timeInMs), - optimisticNodeTimings: result.optimisticEstimate.nodeTimings.size, - pessimisticNodeTimings: result.pessimisticEstimate.nodeTimings.size}). -toMatchInlineSnapshot(` -Object { - "optimistic": 1457, - "optimisticNodeTimings": 8, - "pessimistic": 1616, - "pessimisticNodeTimings": 9, - "timing": 1536, -} -`); - assert.ok(result.optimisticGraph, 'should have created optimistic graph'); - assert.ok(result.pessimisticGraph, 'should have created pessimistic graph'); - }); -}); diff --git a/core/lib/lantern/metrics/MaxPotentialFID.js b/core/lib/lantern/metrics/MaxPotentialFID.js deleted file mode 100644 index 83bf3b06c807..000000000000 --- a/core/lib/lantern/metrics/MaxPotentialFID.js +++ /dev/null @@ -1,91 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; - -/** @typedef {import('../BaseNode.js').Node} Node */ - -class MaxPotentialFID extends Lantern.Metric { - /** - * @return {Lantern.Simulation.MetricCoefficients} - */ - static get COEFFICIENTS() { - return { - intercept: 0, - optimistic: 0.5, - pessimistic: 0.5, - }; - } - - /** - * @param {Node} dependencyGraph - * @return {Node} - */ - static getOptimisticGraph(dependencyGraph) { - return dependencyGraph; - } - - /** - * @param {Node} dependencyGraph - * @return {Node} - */ - static getPessimisticGraph(dependencyGraph) { - return dependencyGraph; - } - - /** - * @param {Lantern.Simulation.Result} simulation - * @param {import('../Metric.js').Extras} extras - * @return {Lantern.Simulation.Result} - */ - static getEstimateFromSimulation(simulation, extras) { - if (!extras.fcpResult) throw new Error('missing fcpResult'); - - // Intentionally use the opposite FCP estimate, a more pessimistic FCP means that more tasks - // are excluded from the FID computation, so a higher FCP means lower FID for same work. - const fcpTimeInMs = extras.optimistic - ? extras.fcpResult.pessimisticEstimate.timeInMs - : extras.fcpResult.optimisticEstimate.timeInMs; - - const timings = MaxPotentialFID.getTimingsAfterFCP( - simulation.nodeTimings, - fcpTimeInMs - ); - - return { - timeInMs: Math.max(...timings.map(timing => timing.duration), 16), - nodeTimings: simulation.nodeTimings, - }; - } - - /** - * @param {Lantern.Simulation.MetricComputationDataInput} data - * @param {Omit=} extras - * @return {Promise} - */ - static compute(data, extras) { - const fcpResult = extras?.fcpResult; - if (!fcpResult) { - throw new Error('FCP is required to calculate the Max Potential FID metric'); - } - - return super.compute(data, extras); - } - - /** - * @param {Lantern.Simulation.Result['nodeTimings']} nodeTimings - * @param {number} fcpTimeInMs - * @return {Array<{duration: number}>} - */ - static getTimingsAfterFCP(nodeTimings, fcpTimeInMs) { - return Array.from(nodeTimings.entries()) - .filter(([node, timing]) => - node.type === Lantern.BaseNode.TYPES.CPU && timing.endTime > fcpTimeInMs) - .map(([_, timing]) => timing); - } -} - -export {MaxPotentialFID}; diff --git a/core/lib/lantern/metrics/MetricTestUtils.js b/core/lib/lantern/metrics/MetricTestUtils.js deleted file mode 100644 index 5ac981e265f7..000000000000 --- a/core/lib/lantern/metrics/MetricTestUtils.js +++ /dev/null @@ -1,48 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as TraceEngine from '@paulirish/trace_engine'; - -import * as Lantern from '../lantern.js'; -import {polyfillDOMRect} from '../../polyfill-dom-rect.js'; - -polyfillDOMRect(); - -/** - * @param {TraceEngine.Types.TraceEvents.TraceEventData[]} traceEvents - */ -async function runTraceEngine(traceEvents) { - const processor = TraceEngine.Processor.TraceProcessor.createWithAllHandlers(); - await processor.parse(traceEvents); - if (!processor.traceParsedData) throw new Error('No data'); - return processor.traceParsedData; -} - -/** - * @param {{trace: Lantern.Trace, settings?: Lantern.Simulation.Settings, URL?: Lantern.Simulation.URL}} opts - */ -async function getComputationDataFromFixture({trace, settings, URL}) { - settings = settings ?? /** @type {Lantern.Simulation.Settings} */({}); - if (!settings.throttlingMethod) settings.throttlingMethod = 'simulate'; - const traceEngineData = await runTraceEngine( - /** @type {TraceEngine.Types.TraceEvents.TraceEventData[]} */ (trace.traceEvents) - ); - const requests = - Lantern.TraceEngineComputationData.createNetworkRequests(trace, traceEngineData); - const networkAnalysis = Lantern.Simulation.NetworkAnalyzer.analyze(requests); - - return { - simulator: Lantern.Simulation.Simulator.createSimulator({...settings, networkAnalysis}), - graph: Lantern.TraceEngineComputationData.createGraph(requests, trace, traceEngineData, URL), - processedNavigation: - Lantern.TraceEngineComputationData.createProcessedNavigation(traceEngineData), - }; -} - -export { - runTraceEngine, - getComputationDataFromFixture, -}; diff --git a/core/lib/lantern/metrics/SpeedIndex.js b/core/lib/lantern/metrics/SpeedIndex.js deleted file mode 100644 index 9fdbd173a522..000000000000 --- a/core/lib/lantern/metrics/SpeedIndex.js +++ /dev/null @@ -1,142 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; - -const mobileSlow4GRtt = 150; - -/** @typedef {import('../BaseNode.js').Node} Node */ - -class SpeedIndex extends Lantern.Metric { - /** - * @return {Lantern.Simulation.MetricCoefficients} - */ - static get COEFFICIENTS() { - return { - // Note that the optimistic estimate is based on the real observed speed index rather than a - // real lantern graph (and the final estimate will be Math.max(FCP, Speed Index)). - intercept: 0, - optimistic: 1.4, - pessimistic: 0.4, - }; - } - - /** - * @param {number} rttMs - * @return {Lantern.Simulation.MetricCoefficients} - */ - static getScaledCoefficients(rttMs) { // eslint-disable-line no-unused-vars - // We want to scale our default coefficients based on the speed of the connection. - // We will linearly interpolate coefficients for the passed-in rttMs based on two pre-determined points: - // 1. Baseline point of 30 ms RTT where Speed Index should be a ~50/50 blend of optimistic/pessimistic. - // 30 ms was based on a typical home WiFi connection's actual RTT. - // Coefficients here follow from the fact that the optimistic estimate should be very close - // to reality at this connection speed and the pessimistic estimate compensates for minor - // connection speed differences. - // 2. Default throttled point of 150 ms RTT where the default coefficients have been determined to be most accurate. - // Coefficients here were determined through thorough analysis and linear regression on the - // lantern test data set. See core/scripts/test-lantern.sh for more detail. - // While the coefficients haven't been analyzed at the interpolated points, it's our current best effort. - const defaultCoefficients = this.COEFFICIENTS; - const defaultRttExcess = mobileSlow4GRtt - 30; - const multiplier = Math.max((rttMs - 30) / defaultRttExcess, 0); - - return { - intercept: defaultCoefficients.intercept * multiplier, - optimistic: 0.5 + (defaultCoefficients.optimistic - 0.5) * multiplier, - pessimistic: 0.5 + (defaultCoefficients.pessimistic - 0.5) * multiplier, - }; - } - - /** - * @param {Node} dependencyGraph - * @return {Node} - */ - static getOptimisticGraph(dependencyGraph) { - return dependencyGraph; - } - - /** - * @param {Node} dependencyGraph - * @return {Node} - */ - static getPessimisticGraph(dependencyGraph) { - return dependencyGraph; - } - - /** - * @param {Lantern.Simulation.Result} simulationResult - * @param {import('../Metric.js').Extras} extras - * @return {Lantern.Simulation.Result} - */ - static getEstimateFromSimulation(simulationResult, extras) { - if (!extras.fcpResult) throw new Error('missing fcpResult'); - if (extras.observedSpeedIndex === undefined) throw new Error('missing observedSpeedIndex'); - - const fcpTimeInMs = extras.fcpResult.pessimisticEstimate.timeInMs; - const estimate = extras.optimistic - ? extras.observedSpeedIndex - : SpeedIndex.computeLayoutBasedSpeedIndex(simulationResult.nodeTimings, fcpTimeInMs); - return { - timeInMs: estimate, - nodeTimings: simulationResult.nodeTimings, - }; - } - - /** - * @param {Lantern.Simulation.MetricComputationDataInput} data - * @param {Omit=} extras - * @return {Promise} - */ - static async compute(data, extras) { - const fcpResult = extras?.fcpResult; - if (!fcpResult) { - throw new Error('FCP is required to calculate the SpeedIndex metric'); - } - - const metricResult = await super.compute(data, extras); - metricResult.timing = Math.max(metricResult.timing, fcpResult.timing); - return metricResult; - } - - /** - * Approximate speed index using layout events from the simulated node timings. - * The layout-based speed index is the weighted average of the endTime of CPU nodes that contained - * a 'Layout' task. log(duration) is used as the weight to stand for "significance" to the page. - * - * If no layout events can be found or the endTime of a CPU task is too early, FCP is used instead. - * - * This approach was determined after evaluating the accuracy/complexity tradeoff of many - * different methods. Read more in the evaluation doc. - * - * @see https://docs.google.com/document/d/1qJWXwxoyVLVadezIp_Tgdk867G3tDNkkVRvUJSH3K1E/edit# - * @param {Lantern.Simulation.Result['nodeTimings']} nodeTimings - * @param {number} fcpTimeInMs - * @return {number} - */ - static computeLayoutBasedSpeedIndex(nodeTimings, fcpTimeInMs) { - /** @type {Array<{time: number, weight: number}>} */ - const layoutWeights = []; - for (const [node, timing] of nodeTimings.entries()) { - if (node.type !== Lantern.BaseNode.TYPES.CPU) continue; - - if (node.childEvents.some(x => x.name === 'Layout')) { - const timingWeight = Math.max(Math.log2(timing.endTime - timing.startTime), 0); - layoutWeights.push({time: timing.endTime, weight: timingWeight}); - } - } - - const totalWeightedTime = layoutWeights - .map(evt => evt.weight * Math.max(evt.time, fcpTimeInMs)) - .reduce((a, b) => a + b, 0); - const totalWeight = layoutWeights.map(evt => evt.weight).reduce((a, b) => a + b, 0); - - if (!totalWeight) return fcpTimeInMs; - return totalWeightedTime / totalWeight; - } -} - -export {SpeedIndex}; diff --git a/core/lib/lantern/metrics/SpeedIndex.test.js b/core/lib/lantern/metrics/SpeedIndex.test.js deleted file mode 100644 index e22a181d5cfd..000000000000 --- a/core/lib/lantern/metrics/SpeedIndex.test.js +++ /dev/null @@ -1,83 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; -import {readJson} from '../../../test/test-utils.js'; -import {getComputationDataFromFixture} from './MetricTestUtils.js'; - -const {SpeedIndex, FirstContentfulPaint} = Lantern.Metrics; - -const trace = readJson('../../../test/fixtures/artifacts/progressive-app/trace.json', import.meta); - -const defaultThrottling = Lantern.Simulation.Constants.throttling.mobileSlow4G; - -describe('Metrics: Lantern Speed Index', () => { - it('should compute predicted value', async () => { - const data = await getComputationDataFromFixture({trace}); - // TODO: observedSpeedIndex is from the Speedline library, and is used for optimistic - // mode. At the moment callers must pass the result into Lantern. - const observedSpeedIndex = 379.04474997520487; - const result = await SpeedIndex.compute(data, { - fcpResult: await FirstContentfulPaint.compute(data), - observedSpeedIndex, - }); - - expect({ - timing: Math.round(result.timing), - optimistic: Math.round(result.optimisticEstimate.timeInMs), - pessimistic: Math.round(result.pessimisticEstimate.timeInMs)}). -toMatchInlineSnapshot(` -Object { - "optimistic": 379, - "pessimistic": 1122, - "timing": 1107, -} -`); - }); - - it('should compute predicted value for different settings', async () => { - const settings = {throttlingMethod: 'simulate', throttling: {...defaultThrottling, rttMs: 300}}; - const data = await getComputationDataFromFixture({trace, settings}); - const observedSpeedIndex = 379.04474997520487; - const result = await SpeedIndex.compute(data, { - fcpResult: await FirstContentfulPaint.compute(data), - observedSpeedIndex, - }); - - expect({ - timing: Math.round(result.timing), - optimistic: Math.round(result.optimisticEstimate.timeInMs), - pessimistic: Math.round(result.pessimisticEstimate.timeInMs)}). -toMatchInlineSnapshot(` -Object { - "optimistic": 379, - "pessimistic": 2022, - "timing": 2007, -} -`); - }); - - it('should not scale coefficients at default', async () => { - const result = SpeedIndex.getScaledCoefficients(defaultThrottling.rttMs); - expect(result).toEqual(SpeedIndex.COEFFICIENTS); - }); - - it('should scale coefficients back', async () => { - const result = SpeedIndex.getScaledCoefficients(5); - expect(result).toEqual({intercept: 0, pessimistic: 0.5, optimistic: 0.5}); - }); - - it('should scale coefficients forward', async () => { - const result = SpeedIndex.getScaledCoefficients(300); - expect(result).toMatchInlineSnapshot(` - Object { - "intercept": 0, - "optimistic": 2.525, - "pessimistic": 0.275, - } - `); - }); -}); diff --git a/core/lib/lantern/metrics/TotalBlockingTime.js b/core/lib/lantern/metrics/TotalBlockingTime.js deleted file mode 100644 index 6e94d88265a4..000000000000 --- a/core/lib/lantern/metrics/TotalBlockingTime.js +++ /dev/null @@ -1,125 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; - -/** @typedef {import('../BaseNode.js').Node} Node */ - -class TotalBlockingTime extends Lantern.Metric { - /** - * @return {Lantern.Simulation.MetricCoefficients} - */ - static get COEFFICIENTS() { - return { - intercept: 0, - optimistic: 0.5, - pessimistic: 0.5, - }; - } - - /** - * @param {Node} dependencyGraph - * @return {Node} - */ - static getOptimisticGraph(dependencyGraph) { - return dependencyGraph; - } - - /** - * @param {Node} dependencyGraph - * @return {Node} - */ - static getPessimisticGraph(dependencyGraph) { - return dependencyGraph; - } - - /** - * @param {Lantern.Simulation.Result} simulation - * @param {import('../Metric.js').Extras} extras - * @return {Lantern.Simulation.Result} - */ - static getEstimateFromSimulation(simulation, extras) { - if (!extras.fcpResult) throw new Error('missing fcpResult'); - if (!extras.interactiveResult) throw new Error('missing interactiveResult'); - - // Intentionally use the opposite FCP estimate. A pessimistic FCP is higher than equal to an - // optimistic FCP, which means potentially more tasks are excluded from the Total Blocking Time - // computation. So a more pessimistic FCP gives a more optimistic Total Blocking Time for the - // same work. - const fcpTimeInMs = extras.optimistic - ? extras.fcpResult.pessimisticEstimate.timeInMs - : extras.fcpResult.optimisticEstimate.timeInMs; - - // Similarly, we always have pessimistic TTI >= optimistic TTI. Therefore, picking optimistic - // TTI means our window of interest is smaller and thus potentially more tasks are excluded from - // Total Blocking Time computation, yielding a lower (more optimistic) Total Blocking Time value - // for the same work. - const interactiveTimeMs = extras.optimistic - ? extras.interactiveResult.optimisticEstimate.timeInMs - : extras.interactiveResult.pessimisticEstimate.timeInMs; - - const minDurationMs = Lantern.TBTUtils.BLOCKING_TIME_THRESHOLD; - - const events = TotalBlockingTime.getTopLevelEvents( - simulation.nodeTimings, - minDurationMs - ); - - return { - timeInMs: Lantern.TBTUtils.calculateSumOfBlockingTime( - events, - fcpTimeInMs, - interactiveTimeMs - ), - nodeTimings: simulation.nodeTimings, - }; - } - - /** - * @param {Lantern.Simulation.MetricComputationDataInput} data - * @param {Omit=} extras - * @return {Promise} - */ - static async compute(data, extras) { - const fcpResult = extras?.fcpResult; - if (!fcpResult) { - throw new Error('FCP is required to calculate the TBT metric'); - } - - const interactiveResult = extras?.fcpResult; - if (!interactiveResult) { - throw new Error('Interactive is required to calculate the TBT metric'); - } - - return super.compute(data, extras); - } - - /** - * @param {Lantern.Simulation.Result['nodeTimings']} nodeTimings - * @param {number} minDurationMs - */ - static getTopLevelEvents(nodeTimings, minDurationMs) { - /** @type {Array<{start: number, end: number, duration: number}>} - */ - const events = []; - - for (const [node, timing] of nodeTimings.entries()) { - if (node.type !== Lantern.BaseNode.TYPES.CPU) continue; - // Filtering out events below minimum duration. - if (timing.duration < minDurationMs) continue; - - events.push({ - start: timing.startTime, - end: timing.endTime, - duration: timing.duration, - }); - } - - return events; - } -} - -export {TotalBlockingTime}; diff --git a/core/lib/lantern/metrics/__snapshots__/FirstContentfulPaint.test.js.snap b/core/lib/lantern/metrics/__snapshots__/FirstContentfulPaint.test.js.snap deleted file mode 100644 index de5ef4c68fb9..000000000000 --- a/core/lib/lantern/metrics/__snapshots__/FirstContentfulPaint.test.js.snap +++ /dev/null @@ -1,11 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`Metrics: Lantern FCP should compute predicted value 1`] = ` -Object { - "optimistic": 1107, - "optimisticNodeTimings": 4, - "pessimistic": 1107, - "pessimisticNodeTimings": 4, - "timing": 1107, -} -`; diff --git a/core/lib/lantern/metrics/__snapshots__/Interactive.test.js.snap b/core/lib/lantern/metrics/__snapshots__/Interactive.test.js.snap deleted file mode 100644 index b796d0f59af0..000000000000 --- a/core/lib/lantern/metrics/__snapshots__/Interactive.test.js.snap +++ /dev/null @@ -1,17 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`Metrics: Lantern TTI should compute predicted value 1`] = ` -Object { - "optimistic": 1107, - "pessimistic": 1134, - "timing": 1122, -} -`; - -exports[`Metrics: Lantern TTI should compute predicted value on iframes with substantial layout 1`] = ` -Object { - "optimistic": 2372, - "pessimistic": 2386, - "timing": 2379, -} -`; diff --git a/core/lib/lantern/metrics/metrics.js b/core/lib/lantern/metrics/metrics.js deleted file mode 100644 index e8d20b4ad2a7..000000000000 --- a/core/lib/lantern/metrics/metrics.js +++ /dev/null @@ -1,16 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../types/lantern.js'; - -export {FirstContentfulPaint} from './FirstContentfulPaint.js'; -export {Interactive} from './Interactive.js'; -export {LargestContentfulPaint} from './LargestContentfulPaint.js'; -export {MaxPotentialFID} from './MaxPotentialFID.js'; -export {SpeedIndex} from './SpeedIndex.js'; -export {TotalBlockingTime} from './TotalBlockingTime.js'; - -/** @template [T=any] @typedef {Lantern.MetricResult} Result */ diff --git a/core/lib/lantern/simulation/ConnectionPool.js b/core/lib/lantern/simulation/ConnectionPool.js deleted file mode 100644 index f08f92147a64..000000000000 --- a/core/lib/lantern/simulation/ConnectionPool.js +++ /dev/null @@ -1,152 +0,0 @@ -/** - * @license - * Copyright 2018 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; -import {NetworkAnalyzer} from './NetworkAnalyzer.js'; -import {TcpConnection} from './TcpConnection.js'; - -const DEFAULT_SERVER_RESPONSE_TIME = 30; -const TLS_SCHEMES = ['https', 'wss']; - -// Each origin can have 6 simulatenous connections open -// https://cs.chromium.org/chromium/src/net/socket/client_socket_pool_manager.cc?type=cs&q="int+g_max_sockets_per_group" -const CONNECTIONS_PER_ORIGIN = 6; - -export class ConnectionPool { - /** - * @param {Lantern.NetworkRequest[]} records - * @param {Required} options - */ - constructor(records, options) { - this._options = options; - - this._records = records; - /** @type {Map} */ - this._connectionsByOrigin = new Map(); - /** @type {Map} */ - this._connectionsByRequest = new Map(); - this._connectionsInUse = new Set(); - this._connectionReusedByRequestId = NetworkAnalyzer.estimateIfConnectionWasReused(records, { - forceCoarseEstimates: true, - }); - - this._initializeConnections(); - } - - /** - * @return {TcpConnection[]} - */ - connectionsInUse() { - return Array.from(this._connectionsInUse); - } - - _initializeConnections() { - const connectionReused = this._connectionReusedByRequestId; - const additionalRttByOrigin = this._options.additionalRttByOrigin; - const serverResponseTimeByOrigin = this._options.serverResponseTimeByOrigin; - - const recordsByOrigin = NetworkAnalyzer.groupByOrigin(this._records); - for (const [origin, requests] of recordsByOrigin.entries()) { - const connections = []; - const additionalRtt = additionalRttByOrigin.get(origin) || 0; - const responseTime = serverResponseTimeByOrigin.get(origin) || DEFAULT_SERVER_RESPONSE_TIME; - - for (const request of requests) { - if (connectionReused.get(request.requestId)) continue; - - const isTLS = TLS_SCHEMES.includes(request.parsedURL.scheme); - const isH2 = request.protocol === 'h2'; - const connection = new TcpConnection( - this._options.rtt + additionalRtt, - this._options.throughput, - responseTime, - isTLS, - isH2 - ); - - connections.push(connection); - } - - if (!connections.length) { - throw new Error(`Could not find a connection for origin: ${origin}`); - } - - // Make sure each origin has minimum number of connections available for max throughput. - // But only if it's not over H2 which maximizes throughput already. - const minConnections = connections[0].isH2() ? 1 : CONNECTIONS_PER_ORIGIN; - while (connections.length < minConnections) connections.push(connections[0].clone()); - - this._connectionsByOrigin.set(origin, connections); - } - } - - /** - * @param {Array} connections - */ - _findAvailableConnectionWithLargestCongestionWindow(connections) { - /** @type {TcpConnection|null} */ - let maxConnection = null; - for (let i = 0; i < connections.length; i++) { - const connection = connections[i]; - - // Connections that are in use are never available. - if (this._connectionsInUse.has(connection)) { - continue; - } - - // This connection is a match and is available! Update our max if it has a larger congestionWindow - const currentMax = (maxConnection?.congestionWindow) || -Infinity; - if (connection.congestionWindow > currentMax) maxConnection = connection; - } - - return maxConnection; - } - - /** - * This method finds an available connection to the origin specified by the network request or null - * if no connection was available. If returned, connection will not be available for other network - * records until release is called. - * - * @param {Lantern.NetworkRequest} request - * @return {?TcpConnection} - */ - acquire(request) { - if (this._connectionsByRequest.has(request)) throw new Error('Record already has a connection'); - - const origin = request.parsedURL.securityOrigin; - const connections = this._connectionsByOrigin.get(origin) || []; - const connectionToUse = this._findAvailableConnectionWithLargestCongestionWindow(connections); - - if (!connectionToUse) return null; - - this._connectionsInUse.add(connectionToUse); - this._connectionsByRequest.set(request, connectionToUse); - return connectionToUse; - } - - /** - * Return the connection currently being used to fetch a request. If no connection - * currently being used for this request, an error will be thrown. - * - * @param {Lantern.NetworkRequest} request - * @return {TcpConnection} - */ - acquireActiveConnectionFromRequest(request) { - const activeConnection = this._connectionsByRequest.get(request); - if (!activeConnection) throw new Error('Could not find an active connection for request'); - - return activeConnection; - } - - /** - * @param {Lantern.NetworkRequest} request - */ - release(request) { - const connection = this._connectionsByRequest.get(request); - this._connectionsByRequest.delete(request); - this._connectionsInUse.delete(connection); - } -} diff --git a/core/lib/lantern/simulation/ConnectionPool.test.js b/core/lib/lantern/simulation/ConnectionPool.test.js deleted file mode 100644 index a0fd3b094dc5..000000000000 --- a/core/lib/lantern/simulation/ConnectionPool.test.js +++ /dev/null @@ -1,195 +0,0 @@ -/** - * @license - * Copyright 2018 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import assert from 'assert/strict'; -import {URL} from 'url'; - -import * as Lantern from '../lantern.js'; - -const {ConnectionPool} = Lantern.Simulation; - -describe('ConnectionPool', () => { - const rtt = 100; - const throughput = 10000 * 1024; - let requestId; - - function request(data = {}) { - const url = data.url || 'http://example.com'; - const origin = new URL(url).origin; - const scheme = url.split(':')[0]; - - return Object.assign({ - requestId: requestId++, - url, - protocol: 'http/1.1', - parsedURL: {scheme, securityOrigin: origin}, - }, data); - } - - function simulationOptions(options) { - return Object.assign( - { - rtt: 150, - throughput: 1024, - additionalRttByOrigin: new Map(), - serverResponseTimeByOrigin: new Map(), - }, - options - ); - } - - beforeEach(() => { - requestId = 1; - }); - - describe('#constructor', () => { - it('should create the pool', () => { - const pool = new ConnectionPool([request()], simulationOptions({rtt, throughput})); - // Make sure 6 connections are created for each origin - assert.equal(pool._connectionsByOrigin.get('http://example.com').length, 6); - // Make sure it populates connectionWasReused - assert.equal(pool._connectionReusedByRequestId.get(1), false); - - const connection = pool._connectionsByOrigin.get('http://example.com')[0]; - assert.equal(connection._rtt, rtt); - assert.equal(connection._throughput, throughput); - assert.equal(connection._serverLatency, 30); // sets to default value - }); - - it('should set TLS properly', () => { - const recordA = request({url: 'https://example.com'}); - const pool = new ConnectionPool([recordA], simulationOptions({rtt, throughput})); - const connection = pool._connectionsByOrigin.get('https://example.com')[0]; - assert.ok(connection._ssl, 'should have set connection TLS'); - }); - - it('should set H2 properly', () => { - const recordA = request({protocol: 'h2'}); - const pool = new ConnectionPool([recordA], simulationOptions({rtt, throughput})); - const connection = pool._connectionsByOrigin.get('http://example.com')[0]; - assert.ok(connection.isH2(), 'should have set HTTP/2'); - assert.equal(pool._connectionsByOrigin.get('http://example.com').length, 1); - }); - - it('should set origin-specific RTT properly', () => { - const additionalRttByOrigin = new Map([['http://example.com', 63]]); - const pool = new ConnectionPool([request()], - simulationOptions({rtt, throughput, additionalRttByOrigin})); - const connection = pool._connectionsByOrigin.get('http://example.com')[0]; - assert.ok(connection._rtt, rtt + 63); - }); - - it('should set origin-specific server latency properly', () => { - const serverResponseTimeByOrigin = new Map([['http://example.com', 63]]); - const pool = new ConnectionPool([request()], - simulationOptions({rtt, throughput, serverResponseTimeByOrigin})); - const connection = pool._connectionsByOrigin.get('http://example.com')[0]; - assert.ok(connection._serverLatency, 63); - }); - }); - - describe('.acquire', () => { - it('should remember the connection associated with each request', () => { - const requestA = request(); - const requestB = request(); - const pool = new ConnectionPool([requestA, requestB], simulationOptions({rtt, throughput})); - - const connectionForA = pool.acquire(requestA); - const connectionForB = pool.acquire(requestB); - for (let i = 0; i < 10; i++) { - assert.equal(pool.acquireActiveConnectionFromRequest(requestA), connectionForA); - assert.equal(pool.acquireActiveConnectionFromRequest(requestB), connectionForB); - } - - assert.deepStrictEqual(pool.connectionsInUse(), [connectionForA, connectionForB]); - }); - - it('should allocate at least 6 connections', () => { - const pool = new ConnectionPool([request()], simulationOptions({rtt, throughput})); - for (let i = 0; i < 6; i++) { - assert.ok(pool.acquire(request()), `did not find connection for ${i}th request`); - } - }); - - it('should allocate all connections', () => { - const records = new Array(7).fill(undefined, 0, 7).map(() => request()); - const pool = new ConnectionPool(records, simulationOptions({rtt, throughput})); - const connections = records.map(request => pool.acquire(request)); - assert.ok(connections[0], 'did not find connection for 1st request'); - assert.ok(connections[5], 'did not find connection for 6th request'); - assert.ok(connections[6], 'did not find connection for 7th request'); - }); - - it('should be oblivious to connection reuse', () => { - const coldRecord = request(); - const warmRecord = request(); - const pool = new ConnectionPool([coldRecord, warmRecord], - simulationOptions({rtt, throughput})); - pool._connectionReusedByRequestId.set(warmRecord.requestId, true); - - assert.ok(pool.acquire(coldRecord), 'should have acquired connection'); - assert.ok(pool.acquire(warmRecord), 'should have acquired connection'); - pool.release(coldRecord); - - for (const connection of pool._connectionsByOrigin.get('http://example.com')) { - connection.setWarmed(true); - } - - assert.ok(pool.acquire(coldRecord), 'should have acquired connection'); - assert.ok(pool.acquireActiveConnectionFromRequest(warmRecord), - 'should have acquired connection'); - }); - - it('should acquire in order of warmness', () => { - const recordA = request(); - const recordB = request(); - const recordC = request(); - const pool = new ConnectionPool([recordA, recordB, recordC], - simulationOptions({rtt, throughput})); - pool._connectionReusedByRequestId.set(recordA.requestId, true); - pool._connectionReusedByRequestId.set(recordB.requestId, true); - pool._connectionReusedByRequestId.set(recordC.requestId, true); - - const [connectionWarm, connectionWarmer, connectionWarmest] = - pool._connectionsByOrigin.get('http://example.com'); - connectionWarm.setWarmed(true); - connectionWarm.setCongestionWindow(10); - connectionWarmer.setWarmed(true); - connectionWarmer.setCongestionWindow(100); - connectionWarmest.setWarmed(true); - connectionWarmest.setCongestionWindow(1000); - - assert.equal(pool.acquire(recordA), connectionWarmest); - assert.equal(pool.acquire(recordB), connectionWarmer); - assert.equal(pool.acquire(recordC), connectionWarm); - }); - }); - - describe('.release', () => { - it('noop for request without connection', () => { - const requestA = request(); - const pool = new ConnectionPool([requestA], simulationOptions({rtt, throughput})); - assert.equal(pool.release(requestA), undefined); - }); - - it('frees the connection for reissue', () => { - const requests = new Array(6).fill(undefined, 0, 7).map(() => request()); - const pool = new ConnectionPool(requests, simulationOptions({rtt, throughput})); - requests.push(request()); - - requests.forEach(request => pool.acquire(request)); - - assert.equal(pool.connectionsInUse().length, 6); - assert.ok(!pool.acquire(requests[6]), 'had connection that is in use'); - - pool.release(requests[0]); - assert.equal(pool.connectionsInUse().length, 5); - - assert.ok(pool.acquire(requests[6]), 'could not reissue released connection'); - assert.ok(!pool.acquire(requests[0]), 'had connection that is in use'); - }); - }); -}); diff --git a/core/lib/lantern/simulation/Constants.js b/core/lib/lantern/simulation/Constants.js deleted file mode 100644 index ecbb5ab83533..000000000000 --- a/core/lib/lantern/simulation/Constants.js +++ /dev/null @@ -1,48 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -const DEVTOOLS_RTT_ADJUSTMENT_FACTOR = 3.75; -const DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR = 0.9; - -const throttling = { - DEVTOOLS_RTT_ADJUSTMENT_FACTOR, - DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, - // These values align with WebPageTest's definition of "Fast 3G" - // But offer similar characteristics to roughly the 75th percentile of 4G connections. - mobileSlow4G: { - rttMs: 150, - throughputKbps: 1.6 * 1024, - requestLatencyMs: 150 * DEVTOOLS_RTT_ADJUSTMENT_FACTOR, - downloadThroughputKbps: 1.6 * 1024 * DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, - uploadThroughputKbps: 750 * DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, - cpuSlowdownMultiplier: 4, - }, - // These values partially align with WebPageTest's definition of "Regular 3G". - // These values are meant to roughly align with Chrome UX report's 3G definition which are based - // on HTTP RTT of 300-1400ms and downlink throughput of <700kbps. - mobileRegular3G: { - rttMs: 300, - throughputKbps: 700, - requestLatencyMs: 300 * DEVTOOLS_RTT_ADJUSTMENT_FACTOR, - downloadThroughputKbps: 700 * DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, - uploadThroughputKbps: 700 * DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR, - cpuSlowdownMultiplier: 4, - }, - // Using a "broadband" connection type - // Corresponds to "Dense 4G 25th percentile" in https://docs.google.com/document/d/1Ft1Bnq9-t4jK5egLSOc28IL4TvR-Tt0se_1faTA4KTY/edit#heading=h.bb7nfy2x9e5v - desktopDense4G: { - rttMs: 40, - throughputKbps: 10 * 1024, - cpuSlowdownMultiplier: 1, - requestLatencyMs: 0, // 0 means unset - downloadThroughputKbps: 0, - uploadThroughputKbps: 0, - }, -}; - -const Constants = {throttling}; - -export {Constants}; diff --git a/core/lib/lantern/simulation/DNSCache.js b/core/lib/lantern/simulation/DNSCache.js deleted file mode 100644 index f1d9d2347cbc..000000000000 --- a/core/lib/lantern/simulation/DNSCache.js +++ /dev/null @@ -1,74 +0,0 @@ -/** - * @license - * Copyright 2018 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; - -// A DNS lookup will usually take ~1-2 roundtrips of connection latency plus the extra DNS routing time. -// Example: https://www.webpagetest.org/result/180703_3A_e33ec79747c002ed4d7bcbfc81462203/1/details/#waterfall_view_step1 -// Example: https://www.webpagetest.org/result/180707_1M_89673eb633b5d98386de95dfcf9b33d5/1/details/#waterfall_view_step1 -// DNS is highly variable though, many times it's a little more than 1, but can easily be 4-5x RTT. -// We'll use 2 since it seems to give the most accurate results on average, but this can be tweaked. -const DNS_RESOLUTION_RTT_MULTIPLIER = 2; - -class DNSCache { - /** - * @param {{rtt: number}} options - */ - constructor({rtt}) { - this._rtt = rtt; - - /** @type {Map} */ - this._resolvedDomainNames = new Map(); - } - - /** - * @param {Lantern.NetworkRequest} request - * @param {{requestedAt: number, shouldUpdateCache: boolean}=} options - * @return {number} - */ - getTimeUntilResolution(request, options) { - const {requestedAt = 0, shouldUpdateCache = false} = options || {}; - - const domain = request.parsedURL.host; - const cacheEntry = this._resolvedDomainNames.get(domain); - let timeUntilResolved = this._rtt * DNSCache.RTT_MULTIPLIER; - if (cacheEntry) { - const timeUntilCachedIsResolved = Math.max(cacheEntry.resolvedAt - requestedAt, 0); - timeUntilResolved = Math.min(timeUntilCachedIsResolved, timeUntilResolved); - } - - const resolvedAt = requestedAt + timeUntilResolved; - if (shouldUpdateCache) this._updateCacheResolvedAtIfNeeded(request, resolvedAt); - - return timeUntilResolved; - } - - /** - * @param {Lantern.NetworkRequest} request - * @param {number} resolvedAt - */ - _updateCacheResolvedAtIfNeeded(request, resolvedAt) { - const domain = request.parsedURL.host; - const cacheEntry = this._resolvedDomainNames.get(domain) || {resolvedAt}; - cacheEntry.resolvedAt = Math.min(cacheEntry.resolvedAt, resolvedAt); - this._resolvedDomainNames.set(domain, cacheEntry); - } - - /** - * Forcefully sets the DNS resolution time for a request. - * Useful for testing and alternate execution simulations. - * - * @param {string} domain - * @param {number} resolvedAt - */ - setResolvedAt(domain, resolvedAt) { - this._resolvedDomainNames.set(domain, {resolvedAt}); - } -} - -DNSCache.RTT_MULTIPLIER = DNS_RESOLUTION_RTT_MULTIPLIER; - -export {DNSCache}; diff --git a/core/lib/lantern/simulation/DNSCache.test.js b/core/lib/lantern/simulation/DNSCache.test.js deleted file mode 100644 index b795193ed4be..000000000000 --- a/core/lib/lantern/simulation/DNSCache.test.js +++ /dev/null @@ -1,72 +0,0 @@ -/** - * @license - * Copyright 2018 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; - -const {DNSCache} = Lantern.Simulation; - -const MULTIPLIER = DNSCache.RTT_MULTIPLIER; - -describe('DNSCache', () => { - let dns; - let request; - - beforeEach(() => { - dns = new DNSCache({rtt: 100}); - request = {parsedURL: {host: 'example.com'}}; - }); - - describe('.getTimeUntilResolution', () => { - it('should return the RTT multiplied', () => { - const resolutionTime = dns.getTimeUntilResolution(request); - expect(resolutionTime).toBe(100 * MULTIPLIER); - }); - - it('should return time with requestedAt', () => { - const resolutionTime = dns.getTimeUntilResolution(request, {requestedAt: 1500}); - expect(resolutionTime).toBe(100 * MULTIPLIER); - }); - - it('should not cache by default', () => { - dns.getTimeUntilResolution(request, {requestedAt: 0}); - const resolutionTime = dns.getTimeUntilResolution(request, {requestedAt: 1000}); - expect(resolutionTime).toBe(100 * MULTIPLIER); - }); - - it('should cache when told', () => { - dns.getTimeUntilResolution(request, {requestedAt: 0, shouldUpdateCache: true}); - const resolutionTime = dns.getTimeUntilResolution(request, {requestedAt: 1000}); - expect(resolutionTime).toBe(0); - }); - - it('should cache by domain', () => { - dns.getTimeUntilResolution(request, {requestedAt: 0, shouldUpdateCache: true}); - const otherRequest = {parsedURL: {host: 'other-example.com'}}; - const resolutionTime = dns.getTimeUntilResolution(otherRequest, {requestedAt: 1000}); - expect(resolutionTime).toBe(100 * MULTIPLIER); - }); - - it('should not update cache with later times', () => { - dns.getTimeUntilResolution(request, {requestedAt: 1000, shouldUpdateCache: true}); - dns.getTimeUntilResolution(request, {requestedAt: 1500, shouldUpdateCache: true}); - dns.getTimeUntilResolution(request, {requestedAt: 500, shouldUpdateCache: true}); - dns.getTimeUntilResolution(request, {requestedAt: 5000, shouldUpdateCache: true}); - - expect(dns.getTimeUntilResolution(request, {requestedAt: 0})).toBe(100 * MULTIPLIER); - expect(dns.getTimeUntilResolution(request, {requestedAt: 550})).toBe(100 * MULTIPLIER - 50); - expect(dns.getTimeUntilResolution(request, {requestedAt: 1000})).toBe(0); - expect(dns.getTimeUntilResolution(request, {requestedAt: 2000})).toBe(0); - }); - }); - - describe('.setResolvedAt', () => { - it('should set the DNS resolution time for a request', () => { - dns.setResolvedAt(request.parsedURL.host, 123); - const resolutionTime = dns.getTimeUntilResolution(request); - expect(resolutionTime).toEqual(123); - }); - }); -}); diff --git a/core/lib/lantern/simulation/NetworkAnalyzer.js b/core/lib/lantern/simulation/NetworkAnalyzer.js deleted file mode 100644 index 667a5d4f29a2..000000000000 --- a/core/lib/lantern/simulation/NetworkAnalyzer.js +++ /dev/null @@ -1,597 +0,0 @@ -/** - * @license - * Copyright 2018 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; -import UrlUtils from '../../url-utils.js'; - -const INITIAL_CWD = 14 * 1024; - -// Assume that 40% of TTFB was server response time by default for static assets -const DEFAULT_SERVER_RESPONSE_PERCENTAGE = 0.4; - -/** - * For certain resource types, server response time takes up a greater percentage of TTFB (dynamic - * assets like HTML documents, XHR/API calls, etc) - * @type {Partial>} - */ -const SERVER_RESPONSE_PERCENTAGE_OF_TTFB = { - Document: 0.9, - XHR: 0.9, - Fetch: 0.9, -}; - -class NetworkAnalyzer { - /** - * @return {string} - */ - static get SUMMARY() { - return '__SUMMARY__'; - } - - /** - * @param {Lantern.NetworkRequest[]} records - * @return {Map} - */ - static groupByOrigin(records) { - const grouped = new Map(); - records.forEach(item => { - const key = item.parsedURL.securityOrigin; - const group = grouped.get(key) || []; - group.push(item); - grouped.set(key, group); - }); - return grouped; - } - - /** - * @param {number[]} values - * @return {Summary} - */ - static getSummary(values) { - values.sort((a, b) => a - b); - - let median; - if (values.length === 0) { - median = values[0]; - } else if (values.length % 2 === 0) { - const a = values[Math.floor((values.length - 1) / 2)]; - const b = values[Math.floor((values.length - 1) / 2) + 1]; - median = (a + b) / 2; - } else { - median = values[Math.floor((values.length - 1) / 2)]; - } - - return { - min: values[0], - max: values[values.length - 1], - avg: values.reduce((a, b) => a + b, 0) / values.length, - median, - }; - } - - /** - * @param {Map} values - * @return {Map} - */ - static summarize(values) { - const summaryByKey = new Map(); - const allEstimates = []; - for (const [key, estimates] of values) { - summaryByKey.set(key, NetworkAnalyzer.getSummary(estimates)); - allEstimates.push(...estimates); - } - - summaryByKey.set(NetworkAnalyzer.SUMMARY, NetworkAnalyzer.getSummary(allEstimates)); - return summaryByKey; - } - - /** @typedef {{request: Lantern.NetworkRequest, timing: Lantern.ResourceTiming, connectionReused?: boolean}} RequestInfo */ - - /** - * @param {Lantern.NetworkRequest[]} requests - * @param {(e: RequestInfo) => number | number[] | undefined} iteratee - * @return {Map} - */ - static _estimateValueByOrigin(requests, iteratee) { - const connectionWasReused = NetworkAnalyzer.estimateIfConnectionWasReused(requests); - const groupedByOrigin = NetworkAnalyzer.groupByOrigin(requests); - - const estimates = new Map(); - for (const [origin, originRequests] of groupedByOrigin.entries()) { - /** @type {number[]} */ - let originEstimates = []; - - for (const request of originRequests) { - const timing = request.timing; - if (!timing) continue; - - const value = iteratee({ - request, - timing, - connectionReused: connectionWasReused.get(request.requestId), - }); - if (typeof value !== 'undefined') { - originEstimates = originEstimates.concat(value); - } - } - - if (!originEstimates.length) continue; - estimates.set(origin, originEstimates); - } - - return estimates; - } - - /** - * Estimates the observed RTT to each origin based on how long the connection setup. - * For h1 and h2, this could includes two estimates - one for the TCP handshake, another for - * SSL negotiation. - * For h3, we get only one estimate since QUIC establishes a secure connection in a - * single handshake. - * This is the most accurate and preferred method of measurement when the data is available. - * - * @param {RequestInfo} info - * @return {number[]|number|undefined} - */ - static _estimateRTTViaConnectionTiming(info) { - const {timing, connectionReused, request} = info; - if (connectionReused) return; - - const {connectStart, sslStart, sslEnd, connectEnd} = timing; - if (connectEnd >= 0 && connectStart >= 0 && request.protocol.startsWith('h3')) { - // These values are equal to sslStart and sslEnd for h3. - return connectEnd - connectStart; - } else if (sslStart >= 0 && sslEnd >= 0 && sslStart !== connectStart) { - // SSL can also be more than 1 RT but assume False Start was used. - return [connectEnd - sslStart, sslStart - connectStart]; - } else if (connectStart >= 0 && connectEnd >= 0) { - return connectEnd - connectStart; - } - } - - /** - * Estimates the observed RTT to each origin based on how long a download took on a fresh connection. - * NOTE: this will tend to overestimate the actual RTT quite significantly as the download can be - * slow for other reasons as well such as bandwidth constraints. - * - * @param {RequestInfo} info - * @return {number|undefined} - */ - static _estimateRTTViaDownloadTiming(info) { - const {timing, connectionReused, request} = info; - if (connectionReused) return; - - // Only look at downloads that went past the initial congestion window - if (request.transferSize <= INITIAL_CWD) return; - if (!Number.isFinite(timing.receiveHeadersEnd) || timing.receiveHeadersEnd < 0) return; - - // Compute the amount of time downloading everything after the first congestion window took - const totalTime = request.networkEndTime - request.networkRequestTime; - const downloadTimeAfterFirstByte = totalTime - timing.receiveHeadersEnd; - const numberOfRoundTrips = Math.log2(request.transferSize / INITIAL_CWD); - - // Ignore requests that required a high number of round trips since bandwidth starts to play - // a larger role than latency - if (numberOfRoundTrips > 5) return; - - return downloadTimeAfterFirstByte / numberOfRoundTrips; - } - - /** - * Estimates the observed RTT to each origin based on how long it took until Chrome could - * start sending the actual request when a new connection was required. - * NOTE: this will tend to overestimate the actual RTT as the request can be delayed for other - * reasons as well such as more SSL handshakes if TLS False Start is not enabled. - * - * @param {RequestInfo} info - * @return {number|undefined} - */ - static _estimateRTTViaSendStartTiming(info) { - const {timing, connectionReused, request} = info; - if (connectionReused) return; - - if (!Number.isFinite(timing.sendStart) || timing.sendStart < 0) return; - - // Assume everything before sendStart was just DNS + (SSL)? + TCP handshake - // 1 RT for DNS, 1 RT (maybe) for SSL, 1 RT for TCP - let roundTrips = 1; - if (!request.protocol.startsWith('h3')) roundTrips += 1; // TCP - if (request.parsedURL.scheme === 'https') roundTrips += 1; - return timing.sendStart / roundTrips; - } - - /** - * Estimates the observed RTT to each origin based on how long it took until Chrome received the - * headers of the response (~TTFB). - * NOTE: this is the most inaccurate way to estimate the RTT, but in some environments it's all - * we have access to :( - * - * @param {RequestInfo} info - * @return {number|undefined} - */ - static _estimateRTTViaHeadersEndTiming(info) { - const {timing, connectionReused, request} = info; - if (!Number.isFinite(timing.receiveHeadersEnd) || timing.receiveHeadersEnd < 0) return; - if (!request.resourceType) return; - - const serverResponseTimePercentage = - SERVER_RESPONSE_PERCENTAGE_OF_TTFB[request.resourceType] || - DEFAULT_SERVER_RESPONSE_PERCENTAGE; - const estimatedServerResponseTime = timing.receiveHeadersEnd * serverResponseTimePercentage; - - // When connection was reused... - // TTFB = 1 RT for request + server response time - let roundTrips = 1; - - // When connection was fresh... - // TTFB = DNS + (SSL)? + TCP handshake + 1 RT for request + server response time - if (!connectionReused) { - roundTrips += 1; // DNS - if (!request.protocol.startsWith('h3')) roundTrips += 1; // TCP - if (request.parsedURL.scheme === 'https') roundTrips += 1; // SSL - } - - // subtract out our estimated server response time - return Math.max((timing.receiveHeadersEnd - estimatedServerResponseTime) / roundTrips, 3); - } - - /** - * Given the RTT to each origin, estimates the observed server response times. - * - * @param {Lantern.NetworkRequest[]} records - * @param {Map} rttByOrigin - * @return {Map} - */ - static _estimateResponseTimeByOrigin(records, rttByOrigin) { - return NetworkAnalyzer._estimateValueByOrigin(records, ({request, timing}) => { - if (request.serverResponseTime !== undefined) return request.serverResponseTime; - - if (!Number.isFinite(timing.receiveHeadersEnd) || timing.receiveHeadersEnd < 0) return; - if (!Number.isFinite(timing.sendEnd) || timing.sendEnd < 0) return; - - const ttfb = timing.receiveHeadersEnd - timing.sendEnd; - const origin = request.parsedURL.securityOrigin; - const rtt = rttByOrigin.get(origin) || rttByOrigin.get(NetworkAnalyzer.SUMMARY) || 0; - return Math.max(ttfb - rtt, 0); - }); - } - - /** - * @param {Lantern.NetworkRequest[]} requests - * @return {boolean} - */ - static canTrustConnectionInformation(requests) { - const connectionIdWasStarted = new Map(); - for (const request of requests) { - const started = connectionIdWasStarted.get(request.connectionId) || !request.connectionReused; - connectionIdWasStarted.set(request.connectionId, started); - } - - // We probably can't trust the network information if all the connection IDs were the same - if (connectionIdWasStarted.size <= 1) return false; - // Or if there were connections that were always reused (a connection had to have started at some point) - return Array.from(connectionIdWasStarted.values()).every(started => started); - } - - /** - * Returns a map of requestId -> connectionReused, estimating the information if the information - * available in the records themselves appears untrustworthy. - * - * @param {Lantern.NetworkRequest[]} records - * @param {{forceCoarseEstimates: boolean}} [options] - * @return {Map} - */ - static estimateIfConnectionWasReused(records, options) { - const {forceCoarseEstimates = false} = options || {}; - - // Check if we can trust the connection information coming from the protocol - if (!forceCoarseEstimates && NetworkAnalyzer.canTrustConnectionInformation(records)) { - return new Map(records.map(request => [request.requestId, !!request.connectionReused])); - } - - // Otherwise we're on our own, a request may not have needed a fresh connection if... - // - It was not the first request to the domain - // - It was H2 - // - It was after the first request to the domain ended - const connectionWasReused = new Map(); - const groupedByOrigin = NetworkAnalyzer.groupByOrigin(records); - for (const [_, originRecords] of groupedByOrigin.entries()) { - const earliestReusePossible = originRecords - .map(request => request.networkEndTime) - .reduce((a, b) => Math.min(a, b), Infinity); - - for (const request of originRecords) { - connectionWasReused.set( - request.requestId, - request.networkRequestTime >= earliestReusePossible || request.protocol === 'h2' - ); - } - - const firstRecord = originRecords.reduce((a, b) => { - return a.networkRequestTime > b.networkRequestTime ? b : a; - }); - connectionWasReused.set(firstRecord.requestId, false); - } - - return connectionWasReused; - } - - /** - * Estimates the RTT to each origin by examining observed network timing information. - * Attempts to use the most accurate information first and falls back to coarser estimates when it - * is unavailable. - * - * @param {Lantern.NetworkRequest[]} records - * @param {RTTEstimateOptions} [options] - * @return {Map} - */ - static estimateRTTByOrigin(records, options) { - const { - forceCoarseEstimates = false, - // coarse estimates include lots of extra time and noise - // multiply by some factor to deflate the estimates a bit. - coarseEstimateMultiplier = 0.3, - useDownloadEstimates = true, - useSendStartEstimates = true, - useHeadersEndEstimates = true, - } = options || {}; - - const connectionWasReused = NetworkAnalyzer.estimateIfConnectionWasReused(records); - const groupedByOrigin = NetworkAnalyzer.groupByOrigin(records); - - const estimatesByOrigin = new Map(); - for (const [origin, originRequests] of groupedByOrigin.entries()) { - /** @type {number[]} */ - const originEstimates = []; - - /** - * @param {(e: RequestInfo) => number[]|number|undefined} estimator - */ - // eslint-disable-next-line no-inner-declarations - function collectEstimates(estimator, multiplier = 1) { - for (const request of originRequests) { - const timing = request.timing; - if (!timing) continue; - - const estimates = estimator({ - request, - timing, - connectionReused: connectionWasReused.get(request.requestId), - }); - if (estimates === undefined) continue; - - if (!Array.isArray(estimates)) { - originEstimates.push(estimates * multiplier); - } else { - originEstimates.push(...estimates.map(e => e * multiplier)); - } - } - } - - if (!forceCoarseEstimates) { - collectEstimates(this._estimateRTTViaConnectionTiming); - } - - // Connection timing can be missing for a few reasons: - // - Origin was preconnected, which we don't have instrumentation for. - // - Trace began recording after a connection has already been established (for example, in timespan mode) - // - Perhaps Chrome established a connection already in the background (service worker? Just guessing here) - // - Not provided in LR netstack. - if (!originEstimates.length) { - if (useDownloadEstimates) { - collectEstimates(this._estimateRTTViaDownloadTiming, coarseEstimateMultiplier); - } - if (useSendStartEstimates) { - collectEstimates(this._estimateRTTViaSendStartTiming, coarseEstimateMultiplier); - } - if (useHeadersEndEstimates) { - collectEstimates(this._estimateRTTViaHeadersEndTiming, coarseEstimateMultiplier); - } - } - - if (originEstimates.length) { - estimatesByOrigin.set(origin, originEstimates); - } - } - - if (!estimatesByOrigin.size) throw new Error('No timing information available'); - return NetworkAnalyzer.summarize(estimatesByOrigin); - } - - /** - * Estimates the server response time of each origin. RTT times can be passed in or will be - * estimated automatically if not provided. - * - * @param {Lantern.NetworkRequest[]} records - * @param {RTTEstimateOptions & {rttByOrigin?: Map}} [options] - * @return {Map} - */ - static estimateServerResponseTimeByOrigin(records, options) { - let rttByOrigin = (options || {}).rttByOrigin; - if (!rttByOrigin) { - /** @type {Map} */ - rttByOrigin = new Map(); - - const rttSummaryByOrigin = NetworkAnalyzer.estimateRTTByOrigin(records, options); - for (const [origin, summary] of rttSummaryByOrigin.entries()) { - rttByOrigin.set(origin, summary.min); - } - } - - const estimatesByOrigin = NetworkAnalyzer._estimateResponseTimeByOrigin(records, rttByOrigin); - return NetworkAnalyzer.summarize(estimatesByOrigin); - } - - - /** - * Computes the average throughput for the given requests in bits/second. - * Excludes data URI, failed or otherwise incomplete, and cached requests. - * Returns Infinity if there were no analyzable network requests. - * - * @param {Lantern.NetworkRequest[]} records - * @return {number} - */ - static estimateThroughput(records) { - let totalBytes = 0; - - // We will measure throughput by summing the total bytes downloaded by the total time spent - // downloading those bytes. We slice up all the network requests into start/end boundaries, so - // it's easier to deal with the gaps in downloading. - const timeBoundaries = records.reduce((boundaries, request) => { - const scheme = request.parsedURL?.scheme; - // Requests whose bodies didn't come over the network or didn't completely finish will mess - // with the computation, just skip over them. - if (scheme === 'data' || request.failed || !request.finished || - request.statusCode > 300 || !request.transferSize) { - return boundaries; - } - - // If we've made it this far, all the times we need should be valid (i.e. not undefined/-1). - totalBytes += request.transferSize; - boundaries.push({time: request.responseHeadersEndTime / 1000, isStart: true}); - boundaries.push({time: request.networkEndTime / 1000, isStart: false}); - return boundaries; - }, /** @type {Array<{time: number, isStart: boolean}>} */([])).sort((a, b) => a.time - b.time); - - if (!timeBoundaries.length) { - return Infinity; - } - - let inflight = 0; - let currentStart = 0; - let totalDuration = 0; - - timeBoundaries.forEach(boundary => { - if (boundary.isStart) { - if (inflight === 0) { - // We just ended a quiet period, keep track of when the download period started - currentStart = boundary.time; - } - inflight++; - } else { - inflight--; - if (inflight === 0) { - // We just entered a quiet period, update our duration with the time we spent downloading - totalDuration += boundary.time - currentStart; - } - } - }); - - return totalBytes * 8 / totalDuration; - } - - /** - * @param {Lantern.NetworkRequest[]} records - */ - static computeRTTAndServerResponseTime(records) { - // First pass compute the estimated observed RTT to each origin's servers. - /** @type {Map} */ - const rttByOrigin = new Map(); - for (const [origin, summary] of NetworkAnalyzer.estimateRTTByOrigin(records).entries()) { - rttByOrigin.set(origin, summary.min); - } - - // We'll use the minimum RTT as the assumed connection latency since we care about how much addt'l - // latency each origin introduces as Lantern will be simulating with its own connection latency. - const minimumRtt = Math.min(...Array.from(rttByOrigin.values())); - // We'll use the observed RTT information to help estimate the server response time - const responseTimeSummaries = NetworkAnalyzer.estimateServerResponseTimeByOrigin(records, { - rttByOrigin, - }); - - /** @type {Map} */ - const additionalRttByOrigin = new Map(); - /** @type {Map} */ - const serverResponseTimeByOrigin = new Map(); - for (const [origin, summary] of responseTimeSummaries.entries()) { - // Not all origins have usable timing data, we'll default to using no additional latency. - const rttForOrigin = rttByOrigin.get(origin) || minimumRtt; - additionalRttByOrigin.set(origin, rttForOrigin - minimumRtt); - serverResponseTimeByOrigin.set(origin, summary.median); - } - - return { - rtt: minimumRtt, - additionalRttByOrigin, - serverResponseTimeByOrigin, - }; - } - - /** - * @param {Lantern.NetworkRequest[]} records - * @return {Lantern.Simulation.Settings['networkAnalysis']} - */ - static analyze(records) { - const throughput = NetworkAnalyzer.estimateThroughput(records); - return { - throughput, - ...NetworkAnalyzer.computeRTTAndServerResponseTime(records), - }; - } - - /** - * @template {Lantern.NetworkRequest} T - * @param {Array} records - * @param {string} resourceUrl - * @return {T|undefined} - */ - static findResourceForUrl(records, resourceUrl) { - // equalWithExcludedFragments is expensive, so check that the resourceUrl starts with the request url first - return records.find(request => - resourceUrl.startsWith(request.url) && - UrlUtils.equalWithExcludedFragments(request.url, resourceUrl) - ); - } - - /** - * @template {Lantern.NetworkRequest} T - * @param {Array} records - * @param {string} resourceUrl - * @return {T|undefined} - */ - static findLastDocumentForUrl(records, resourceUrl) { - // equalWithExcludedFragments is expensive, so check that the resourceUrl starts with the request url first - const matchingRequests = records.filter(request => - request.resourceType === 'Document' && - // Note: `request.url` should never have a fragment, else this optimization gives wrong results. - resourceUrl.startsWith(request.url) && - UrlUtils.equalWithExcludedFragments(request.url, resourceUrl) - ); - return matchingRequests[matchingRequests.length - 1]; - } - - /** - * Resolves redirect chain given a main document. - * See: {@link NetworkAnalyzer.findLastDocumentForUrl}) for how to retrieve main document. - * - * @template {Lantern.NetworkRequest} T - * @param {T} request - * @return {T} - */ - static resolveRedirects(request) { - while (request.redirectDestination) request = /** @type {T} */(request.redirectDestination); - return request; - } -} - -export {NetworkAnalyzer}; - -/** - * @typedef Summary - * @property {number} min - * @property {number} max - * @property {number} avg - * @property {number} median - */ - -/** - * @typedef RTTEstimateOptions - * @property {boolean} [forceCoarseEstimates] TCP connection handshake information will be used when available, but in some circumstances this data can be unreliable. This flag exposes an option to ignore the handshake data and use the coarse download/TTFB timing data. - * @property {number} [coarseEstimateMultiplier] Coarse estimates include lots of extra time and noise multiply by some factor to deflate the estimates a bit. - * @property {boolean} [useDownloadEstimates] Useful for testing to isolate the different methods of estimation. - * @property {boolean} [useSendStartEstimates] Useful for testing to isolate the different methods of estimation. - * @property {boolean} [useHeadersEndEstimates] Useful for testing to isolate the different methods of estimation. - */ diff --git a/core/lib/lantern/simulation/NetworkAnalyzer.test.js b/core/lib/lantern/simulation/NetworkAnalyzer.test.js deleted file mode 100644 index 7334bc9106a9..000000000000 --- a/core/lib/lantern/simulation/NetworkAnalyzer.test.js +++ /dev/null @@ -1,475 +0,0 @@ -/** - * @license - * Copyright 2018 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import assert from 'assert/strict'; - -import * as Lantern from '../lantern.js'; -import {readJson} from '../../../test/test-utils.js'; -import {runTraceEngine} from '../metrics/MetricTestUtils.js'; - -const {NetworkAnalyzer} = Lantern.Simulation; - -const trace = readJson('../../../test/fixtures/artifacts/paul/trace.json', import.meta); -const traceWithRedirect = readJson('../../../test/fixtures/artifacts/redirect/trace.json', import.meta); - -/** - * @param {Lantern.Trace} trace - */ -async function createRequests(trace) { - const traceEngineData = await runTraceEngine( - /** @type {TraceEngine.Types.TraceEvents.TraceEventData[]} */ (trace.traceEvents) - ); - return Lantern.TraceEngineComputationData.createNetworkRequests(trace, traceEngineData); -} - -describe('NetworkAnalyzer', () => { - afterEach(() => { - global.isLightrider = undefined; - }); - - let recordId; - - function createRecord(opts) { - const url = opts.url || 'https://example.com'; - if (opts.networkRequestTime) opts.networkRequestTime *= 1000; - if (opts.networkEndTime) opts.networkEndTime *= 1000; - return Object.assign( - { - url, - requestId: recordId++, - connectionId: 0, - connectionReused: false, - networkRequestTime: 10, - networkEndTime: 10, - transferSize: 0, - protocol: opts.protocol || 'http/1.1', - parsedURL: {scheme: url.match(/https?/)[0], securityOrigin: url.match(/.*\.com/)[0]}, - timing: opts.timing || null, - }, - opts - ); - } - - beforeEach(() => { - recordId = 1; - }); - - function assertCloseEnough(valueA, valueB, threshold = 1) { - const message = `${valueA} was not close enough to ${valueB}`; - assert.ok(Math.abs(valueA - valueB) < threshold, message); - } - - describe('#estimateIfConnectionWasReused', () => { - it('should use built-in value when trustworthy', () => { - const records = [ - {requestId: 1, connectionId: 1, connectionReused: false}, - {requestId: 2, connectionId: 1, connectionReused: true}, - {requestId: 3, connectionId: 2, connectionReused: false}, - {requestId: 4, connectionId: 3, connectionReused: false}, - {requestId: 5, connectionId: 2, connectionReused: true}, - ]; - - const result = NetworkAnalyzer.estimateIfConnectionWasReused(records); - const expected = new Map([[1, false], [2, true], [3, false], [4, false], [5, true]]); - assert.deepStrictEqual(result, expected); - }); - - it('should estimate values when not trustworthy (duplicate IDs)', () => { - const records = [ - createRecord({requestId: 1, networkRequestTime: 0, networkEndTime: 15}), - createRecord({requestId: 2, networkRequestTime: 10, networkEndTime: 25}), - createRecord({requestId: 3, networkRequestTime: 20, networkEndTime: 40}), - createRecord({requestId: 4, networkRequestTime: 30, networkEndTime: 40}), - ]; - - const result = NetworkAnalyzer.estimateIfConnectionWasReused(records); - const expected = new Map([[1, false], [2, false], [3, true], [4, true]]); - assert.deepStrictEqual(result, expected); - }); - - it('should estimate values when not trustworthy (connectionReused nonsense)', () => { - const records = [ - createRecord({ - requestId: 1, - connectionId: 1, - connectionReused: true, - networkRequestTime: 0, - networkEndTime: 15, - }), - createRecord({ - requestId: 2, - connectionId: 1, - connectionReused: true, - networkRequestTime: 10, - networkEndTime: 25, - }), - createRecord({ - requestId: 3, - connectionId: 1, - connectionReused: true, - networkRequestTime: 20, - networkEndTime: 40, - }), - createRecord({ - requestId: 4, - connectionId: 2, - connectionReused: false, - networkRequestTime: 30, - networkEndTime: 40, - }), - ]; - - const result = NetworkAnalyzer.estimateIfConnectionWasReused(records); - const expected = new Map([[1, false], [2, false], [3, true], [4, true]]); - assert.deepStrictEqual(result, expected); - }); - - it('should estimate with earliest allowed reuse', () => { - const records = [ - createRecord({requestId: 1, networkRequestTime: 0, networkEndTime: 40}), - createRecord({requestId: 2, networkRequestTime: 10, networkEndTime: 15}), - createRecord({requestId: 3, networkRequestTime: 20, networkEndTime: 30}), - createRecord({requestId: 4, networkRequestTime: 35, networkEndTime: 40}), - ]; - - const result = NetworkAnalyzer.estimateIfConnectionWasReused(records); - const expected = new Map([[1, false], [2, false], [3, true], [4, true]]); - assert.deepStrictEqual(result, expected); - }); - - it('should work on a real trace', async () => { - const requests = await createRequests(trace); - const result = NetworkAnalyzer.estimateIfConnectionWasReused(requests); - const distinctConnections = Array.from(result.values()).filter(item => !item).length; - assert.equal(result.size, 25); - assert.equal(distinctConnections, 9); - }); - }); - - describe('#estimateRTTByOrigin', () => { - it('should infer from tcp timing when available', () => { - const timing = {connectStart: 0, connectEnd: 99}; - const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); - const result = NetworkAnalyzer.estimateRTTByOrigin([request]); - const expected = {min: 99, max: 99, avg: 99, median: 99}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should infer only one estimate if tcp and ssl start times are equal', () => { - const timing = {connectStart: 0, connectEnd: 99, sslStart: 0, sslEnd: 99}; - const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); - const result = NetworkAnalyzer.estimateRTTByOrigin([request]); - const expected = {min: 99, max: 99, avg: 99, median: 99}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should infer from tcp and ssl timing when available', () => { - const timing = {connectStart: 0, connectEnd: 99, sslStart: 50, sslEnd: 99}; - const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); - const result = NetworkAnalyzer.estimateRTTByOrigin([request]); - const expected = {min: 49, max: 50, avg: 49.5, median: 49.5}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should infer from connection timing when available for h3 (one estimate)', () => { - const timing = {connectStart: 0, connectEnd: 99, sslStart: 1, sslEnd: 99}; - const request = - createRecord({networkRequestTime: 0, networkEndTime: 1, timing, protocol: 'h3'}); - const result = NetworkAnalyzer.estimateRTTByOrigin([request]); - const expected = {min: 99, max: 99, avg: 99, median: 99}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should infer from sendStart when available', () => { - const timing = {sendStart: 150}; - // this request took 150ms before Chrome could send the request - // i.e. DNS (maybe) + queuing (maybe) + TCP handshake took ~100ms - // 150ms / 3 round trips ~= 50ms RTT - const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); - const result = NetworkAnalyzer.estimateRTTByOrigin([request], {coarseEstimateMultiplier: 1}); - const expected = {min: 50, max: 50, avg: 50, median: 50}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should infer from download timing when available', () => { - const timing = {receiveHeadersEnd: 100}; - // this request took 1000ms after the first byte was received to download the payload - // i.e. it took at least one full additional roundtrip after first byte to download the rest - // 1000ms / 1 round trip ~= 1000ms RTT - const request = createRecord({networkRequestTime: 0, networkEndTime: 1.1, - transferSize: 28 * 1024, timing}); - const result = NetworkAnalyzer.estimateRTTByOrigin([request], { - coarseEstimateMultiplier: 1, - useHeadersEndEstimates: false, - }); - const expected = {min: 1000, max: 1000, avg: 1000, median: 1000}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should infer from TTFB when available', () => { - const timing = {receiveHeadersEnd: 1000}; - const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing, - resourceType: 'Other'}); - const result = NetworkAnalyzer.estimateRTTByOrigin([request], { - coarseEstimateMultiplier: 1, - }); - - // this request's TTFB was 1000ms, it used SSL and was a fresh connection requiring a handshake - // which needs ~4 RTs. We don't know its resource type so it'll be assumed that 40% of it was - // server response time. - // 600 ms / 4 = 150ms - const expected = {min: 150, max: 150, avg: 150, median: 150}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should use coarse estimates on a per-origin basis', () => { - const records = [ - createRecord({url: 'https://example.com', timing: {connectStart: 1, connectEnd: 100, sendStart: 150}}), - createRecord({url: 'https://example2.com', timing: {sendStart: 150}}), - ]; - const result = NetworkAnalyzer.estimateRTTByOrigin(records); - assert.deepStrictEqual(result.get('https://example.com'), {min: 99, max: 99, avg: 99, median: 99}); - assert.deepStrictEqual(result.get('https://example2.com'), {min: 15, max: 15, avg: 15, median: 15}); - }); - - it('should handle untrustworthy connection information', () => { - const timing = {sendStart: 150}; - const recordA = createRecord({networkRequestTime: 0, networkEndTime: 1, timing, - connectionReused: true}); - const recordB = createRecord({ - networkRequestTime: 0, - networkEndTime: 1, - timing, - connectionId: 2, - connectionReused: true, - }); - const result = NetworkAnalyzer.estimateRTTByOrigin([recordA, recordB], { - coarseEstimateMultiplier: 1, - }); - const expected = {min: 50, max: 50, avg: 50, median: 50}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should work on a real trace', async () => { - const requests = await createRequests(trace); - const result = NetworkAnalyzer.estimateRTTByOrigin(requests); - assertCloseEnough(result.get('https://www.paulirish.com').min, 10); - assertCloseEnough(result.get('https://www.googletagmanager.com').min, 17); - assertCloseEnough(result.get('https://www.google-analytics.com').min, 10); - }); - - it('should approximate well with either method', async () => { - const requests = await createRequests(trace); - const result = NetworkAnalyzer.estimateRTTByOrigin(requests).get(NetworkAnalyzer.SUMMARY); - const resultApprox = NetworkAnalyzer.estimateRTTByOrigin(requests, { - forceCoarseEstimates: true, - }).get(NetworkAnalyzer.SUMMARY); - assertCloseEnough(result.min, resultApprox.min, 20); - assertCloseEnough(result.avg, resultApprox.avg, 30); - assertCloseEnough(result.median, resultApprox.median, 30); - }); - }); - - describe('#estimateServerResponseTimeByOrigin', () => { - it('should estimate server response time using ttfb times', () => { - const timing = {sendEnd: 100, receiveHeadersEnd: 200}; - const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); - const rttByOrigin = new Map([[NetworkAnalyzer.SUMMARY, 0]]); - const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin([request], {rttByOrigin}); - const expected = {min: 100, max: 100, avg: 100, median: 100}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should subtract out rtt', () => { - const timing = {sendEnd: 100, receiveHeadersEnd: 200}; - const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); - const rttByOrigin = new Map([[NetworkAnalyzer.SUMMARY, 50]]); - const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin([request], {rttByOrigin}); - const expected = {min: 50, max: 50, avg: 50, median: 50}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should compute rtts when not provided', () => { - const timing = {connectStart: 5, connectEnd: 55, sendEnd: 100, receiveHeadersEnd: 200}; - const request = createRecord({networkRequestTime: 0, networkEndTime: 1, timing}); - const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin([request]); - const expected = {min: 50, max: 50, avg: 50, median: 50}; - assert.deepStrictEqual(result.get('https://example.com'), expected); - }); - - it('should work on a real trace', async () => { - const requests = await createRequests(trace); - const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin(requests); - assertCloseEnough(result.get('https://www.paulirish.com').avg, 35); - assertCloseEnough(result.get('https://www.googletagmanager.com').avg, 8); - assertCloseEnough(result.get('https://www.google-analytics.com').avg, 8); - }); - - it('should approximate well with either method', async () => { - const requests = await createRequests(trace); - const result = NetworkAnalyzer.estimateServerResponseTimeByOrigin(requests).get( - NetworkAnalyzer.SUMMARY - ); - const resultApprox = NetworkAnalyzer.estimateServerResponseTimeByOrigin(requests, { - forceCoarseEstimates: true, - }).get(NetworkAnalyzer.SUMMARY); - assertCloseEnough(result.min, resultApprox.min, 20); - assertCloseEnough(result.avg, resultApprox.avg, 30); - assertCloseEnough(result.median, resultApprox.median, 30); - }); - }); - - describe('#estimateThroughput', () => { - const estimateThroughput = NetworkAnalyzer.estimateThroughput; - - function createThroughputRecord(responseHeadersEndTimeInS, networkEndTimeInS, extras) { - return Object.assign( - { - responseHeadersEndTime: responseHeadersEndTimeInS * 1000, - networkEndTime: networkEndTimeInS * 1000, - transferSize: 1000, - finished: true, - failed: false, - statusCode: 200, - url: 'https://google.com/logo.png', - parsedURL: {scheme: 'https'}, - }, - extras - ); - } - - it('should return Infinity for no/missing records', () => { - assert.equal(estimateThroughput([]), Infinity); - assert.equal(estimateThroughput([createThroughputRecord(0, 0, {finished: false})]), Infinity); - }); - - it('should compute correctly for a basic waterfall', () => { - const result = estimateThroughput([ - createThroughputRecord(0, 1), - createThroughputRecord(1, 2), - createThroughputRecord(2, 6), - ]); - - assert.equal(result, 500 * 8); - }); - - it('should compute correctly for concurrent requests', () => { - const result = estimateThroughput([ - createThroughputRecord(0, 1), - createThroughputRecord(0.5, 1), - ]); - - assert.equal(result, 2000 * 8); - }); - - it('should compute correctly for gaps', () => { - const result = estimateThroughput([ - createThroughputRecord(0, 1), - createThroughputRecord(3, 4), - ]); - - assert.equal(result, 1000 * 8); - }); - - it('should compute correctly for partially overlapping requests', () => { - const result = estimateThroughput([ - createThroughputRecord(0, 1), - createThroughputRecord(0.5, 1.5), - createThroughputRecord(1.25, 3), - createThroughputRecord(1.4, 4), - createThroughputRecord(5, 9), - ]); - - assert.equal(result, 625 * 8); - }); - - it('should exclude failed records', () => { - const result = estimateThroughput([ - createThroughputRecord(0, 2), - createThroughputRecord(3, 4, {failed: true}), - ]); - assert.equal(result, 500 * 8); - }); - - it('should exclude cached records', () => { - const result = estimateThroughput([ - createThroughputRecord(0, 2), - createThroughputRecord(3, 4, {statusCode: 304}), - ]); - assert.equal(result, 500 * 8); - }); - - it('should exclude unfinished records', () => { - const result = estimateThroughput([ - createThroughputRecord(0, 2), - createThroughputRecord(3, 4, {finished: false}), - ]); - assert.equal(result, 500 * 8); - }); - - it('should exclude data URIs', () => { - const result = estimateThroughput([ - createThroughputRecord(0, 2), - createThroughputRecord(3, 4, {parsedURL: {scheme: 'data'}}), - ]); - assert.equal(result, 500 * 8); - }); - }); - - describe('#computeRTTAndServerResponseTime', () => { - it('should work', async () => { - const requests = await createRequests(trace); - const result = NetworkAnalyzer.computeRTTAndServerResponseTime(requests); - - expect(result.rtt).toBeCloseTo(0.082); - expect(result.additionalRttByOrigin).toMatchInlineSnapshot(` -Map { - "https://www.paulirish.com" => 9.788999999999994, - "https://www.googletagmanager.com" => 17.21999999999999, - "https://fonts.googleapis.com" => 16.816000000000003, - "https://fonts.gstatic.com" => 1.6889999999999998, - "https://www.google-analytics.com" => 9.924999999999997, - "https://paulirish.disqus.com" => 9.000999999999998, - "https://firebaseinstallations.googleapis.com" => 0, - "https://firebaseremoteconfig.googleapis.com" => 0.1823, - "__SUMMARY__" => 0, -} -`); - }); - }); - - describe('#findMainDocument', () => { - it('should find the main document', async () => { - const requests = await createRequests(trace); - const mainDocument = NetworkAnalyzer.findResourceForUrl(requests, 'https://www.paulirish.com/'); - assert.equal(mainDocument.url, 'https://www.paulirish.com/'); - }); - - it('should find the main document if the URL includes a fragment', async () => { - const requests = await createRequests(trace); - const mainDocument = NetworkAnalyzer.findResourceForUrl(requests, 'https://www.paulirish.com/#info'); - assert.equal(mainDocument.url, 'https://www.paulirish.com/'); - }); - }); - - describe('#resolveRedirects', () => { - it('should resolve to the same document when no redirect', async () => { - const requests = await createRequests(trace); - const mainDocument = NetworkAnalyzer.findResourceForUrl(requests, 'https://www.paulirish.com/'); - const finalDocument = NetworkAnalyzer.resolveRedirects(mainDocument); - assert.equal(mainDocument.url, finalDocument.url); - assert.equal(finalDocument.url, 'https://www.paulirish.com/'); - }); - - it('should resolve to the final document with redirects', async () => { - const requests = await createRequests(traceWithRedirect); - const mainDocument = NetworkAnalyzer.findResourceForUrl(requests, 'http://www.vkontakte.ru/'); - const finalDocument = NetworkAnalyzer.resolveRedirects(mainDocument); - assert.notEqual(mainDocument.url, finalDocument.url); - assert.equal(finalDocument.url, 'https://m.vk.com/'); - }); - }); -}); diff --git a/core/lib/lantern/simulation/SimulationTimingMap.js b/core/lib/lantern/simulation/SimulationTimingMap.js deleted file mode 100644 index 4050e2d66b83..000000000000 --- a/core/lib/lantern/simulation/SimulationTimingMap.js +++ /dev/null @@ -1,219 +0,0 @@ -/** - * @license - * Copyright 2020 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; - -/** - * @fileoverview - * - * This class encapsulates the type-related validation logic for moving timing information for nodes - * through the different simulation phases. Methods here ensure that the invariants of simulation hold - * as nodes are queued, partially simulated, and completed. - */ - -/** @typedef {import('../BaseNode.js').Node} Node */ -/** @typedef {import('../NetworkNode.js').NetworkNode} NetworkNode */ -/** @typedef {import('../CpuNode.js').CPUNode} CpuNode */ - -/** - * @typedef NodeTimingComplete - * @property {number} startTime - * @property {number} endTime - * @property {number} queuedTime Helpful for debugging. - * @property {number} estimatedTimeElapsed - * @property {number} timeElapsed - * @property {number} timeElapsedOvershoot - * @property {number} bytesDownloaded - */ - -/** @typedef {Pick} NodeTimingQueued */ - -/** @typedef {NodeTimingQueued & Pick} CpuNodeTimingStarted */ -/** @typedef {CpuNodeTimingStarted & Pick} NetworkNodeTimingStarted */ - -/** @typedef {CpuNodeTimingStarted & Pick} CpuNodeTimingInProgress */ -/** @typedef {NetworkNodeTimingStarted & Pick} NetworkNodeTimingInProgress */ - -/** @typedef {CpuNodeTimingInProgress & Pick} CpuNodeTimingComplete */ -/** @typedef {NetworkNodeTimingInProgress & Pick & {connectionTiming: ConnectionTiming}} NetworkNodeTimingComplete */ - -/** @typedef {NodeTimingQueued | CpuNodeTimingStarted | NetworkNodeTimingStarted | CpuNodeTimingInProgress | NetworkNodeTimingInProgress | CpuNodeTimingComplete | NetworkNodeTimingComplete} NodeTimingData */ - -/** - * @typedef ConnectionTiming A breakdown of network connection timings. - * @property {number} [dnsResolutionTime] - * @property {number} [connectionTime] - * @property {number} [sslTime] - * @property {number} timeToFirstByte - */ - -class SimulatorTimingMap { - constructor() { - /** @type {Map} */ - this._nodeTimings = new Map(); - } - - /** @return {Array} */ - getNodes() { - return Array.from(this._nodeTimings.keys()); - } - - /** - * @param {Node} node - * @param {{queuedTime: number}} values - */ - setReadyToStart(node, values) { - this._nodeTimings.set(node, values); - } - - /** - * @param {Node} node - * @param {{startTime: number}} values - */ - setInProgress(node, values) { - const nodeTiming = { - ...this.getQueued(node), - startTime: values.startTime, - timeElapsed: 0, - }; - - this._nodeTimings.set( - node, - node.type === Lantern.BaseNode.TYPES.NETWORK - ? {...nodeTiming, timeElapsedOvershoot: 0, bytesDownloaded: 0} - : nodeTiming - ); - } - - /** - * @param {Node} node - * @param {{endTime: number, connectionTiming?: ConnectionTiming}} values - */ - setCompleted(node, values) { - const nodeTiming = { - ...this.getInProgress(node), - endTime: values.endTime, - connectionTiming: values.connectionTiming, - }; - - this._nodeTimings.set(node, nodeTiming); - } - - /** - * @param {CpuNode} node - * @param {{timeElapsed: number}} values - */ - setCpu(node, values) { - const nodeTiming = { - ...this.getCpuStarted(node), - timeElapsed: values.timeElapsed, - }; - - this._nodeTimings.set(node, nodeTiming); - } - - /** - * @param {CpuNode} node - * @param {{estimatedTimeElapsed: number}} values - */ - setCpuEstimated(node, values) { - const nodeTiming = { - ...this.getCpuStarted(node), - estimatedTimeElapsed: values.estimatedTimeElapsed, - }; - - this._nodeTimings.set(node, nodeTiming); - } - - /** - * @param {NetworkNode} node - * @param {{timeElapsed: number, timeElapsedOvershoot: number, bytesDownloaded: number}} values - */ - setNetwork(node, values) { - const nodeTiming = { - ...this.getNetworkStarted(node), - timeElapsed: values.timeElapsed, - timeElapsedOvershoot: values.timeElapsedOvershoot, - bytesDownloaded: values.bytesDownloaded, - }; - - this._nodeTimings.set(node, nodeTiming); - } - - /** - * @param {NetworkNode} node - * @param {{estimatedTimeElapsed: number}} values - */ - setNetworkEstimated(node, values) { - const nodeTiming = { - ...this.getNetworkStarted(node), - estimatedTimeElapsed: values.estimatedTimeElapsed, - }; - - this._nodeTimings.set(node, nodeTiming); - } - - /** - * @param {Node} node - * @return {NodeTimingQueued} - */ - getQueued(node) { - const timing = this._nodeTimings.get(node); - if (!timing) throw new Error(`Node ${node.id} not yet queued`); - return timing; - } - - /** - * @param {CpuNode} node - * @return {CpuNodeTimingStarted} - */ - getCpuStarted(node) { - const timing = this._nodeTimings.get(node); - if (!timing) throw new Error(`Node ${node.id} not yet queued`); - if (!('startTime' in timing)) throw new Error(`Node ${node.id} not yet started`); - if ('bytesDownloaded' in timing) throw new Error(`Node ${node.id} timing not valid`); - return timing; - } - - /** - * @param {NetworkNode} node - * @return {NetworkNodeTimingStarted} - */ - getNetworkStarted(node) { - const timing = this._nodeTimings.get(node); - if (!timing) throw new Error(`Node ${node.id} not yet queued`); - if (!('startTime' in timing)) throw new Error(`Node ${node.id} not yet started`); - if (!('bytesDownloaded' in timing)) throw new Error(`Node ${node.id} timing not valid`); - return timing; - } - - /** - * @param {Node} node - * @return {CpuNodeTimingInProgress | NetworkNodeTimingInProgress} - */ - getInProgress(node) { - const timing = this._nodeTimings.get(node); - if (!timing) throw new Error(`Node ${node.id} not yet queued`); - if (!('startTime' in timing)) throw new Error(`Node ${node.id} not yet started`); - if (!('estimatedTimeElapsed' in timing)) throw new Error(`Node ${node.id} not yet in progress`); - return timing; - } - - /** - * @param {Node} node - * @return {CpuNodeTimingComplete | NetworkNodeTimingComplete} - */ - getCompleted(node) { - const timing = this._nodeTimings.get(node); - if (!timing) throw new Error(`Node ${node.id} not yet queued`); - if (!('startTime' in timing)) throw new Error(`Node ${node.id} not yet started`); - if (!('estimatedTimeElapsed' in timing)) throw new Error(`Node ${node.id} not yet in progress`); - if (!('endTime' in timing)) throw new Error(`Node ${node.id} not yet completed`); - return timing; - } -} - -export {SimulatorTimingMap}; diff --git a/core/lib/lantern/simulation/Simulator.js b/core/lib/lantern/simulation/Simulator.js deleted file mode 100644 index 48258f18265a..000000000000 --- a/core/lib/lantern/simulation/Simulator.js +++ /dev/null @@ -1,593 +0,0 @@ -/** - * @license - * Copyright 2017 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../lantern.js'; -import {TcpConnection} from './TcpConnection.js'; -import {ConnectionPool} from './ConnectionPool.js'; -import {DNSCache} from './DNSCache.js'; -import {SimulatorTimingMap} from './SimulationTimingMap.js'; - -const Constants = Lantern.Simulation.Constants; -const defaultThrottling = Constants.throttling.mobileSlow4G; - -/** @typedef {import('../BaseNode.js').Node} Node */ -/** @typedef {import('../NetworkNode.js').NetworkNode} NetworkNode */ -/** @typedef {import('../CpuNode.js').CPUNode} CpuNode */ -/** @typedef {import('./SimulationTimingMap.js').CpuNodeTimingComplete | import('./SimulationTimingMap.js').NetworkNodeTimingComplete} CompleteNodeTiming */ -/** @typedef {import('./SimulationTimingMap.js').ConnectionTiming} ConnectionTiming */ - -// see https://cs.chromium.org/search/?q=kDefaultMaxNumDelayableRequestsPerClient&sq=package:chromium&type=cs -const DEFAULT_MAXIMUM_CONCURRENT_REQUESTS = 10; -// layout tasks tend to be less CPU-bound and do not experience the same increase in duration -const DEFAULT_LAYOUT_TASK_MULTIPLIER = 0.5; -// if a task takes more than 10 seconds it's usually a sign it isn't actually CPU bound and we're overestimating -const DEFAULT_MAXIMUM_CPU_TASK_DURATION = 10000; - -const NodeState = { - NotReadyToStart: 0, - ReadyToStart: 1, - InProgress: 2, - Complete: 3, -}; - -/** @type {Record} */ -const PriorityStartTimePenalty = { - VeryHigh: 0, - High: 0.25, - Medium: 0.5, - Low: 1, - VeryLow: 2, -}; - -/** @type {Map>} */ -const ALL_SIMULATION_NODE_TIMINGS = new Map(); - -/** - * @template [T=any] - */ -class Simulator { - /** - * @param {Lantern.Simulation.Settings} settings - */ - static createSimulator(settings) { - const {throttlingMethod, throttling, precomputedLanternData, networkAnalysis} = settings; - - /** @type {Lantern.Simulation.Options} */ - const options = { - additionalRttByOrigin: networkAnalysis.additionalRttByOrigin, - serverResponseTimeByOrigin: networkAnalysis.serverResponseTimeByOrigin, - observedThroughput: networkAnalysis.throughput, - }; - - // If we have precomputed lantern data, overwrite our observed estimates and use precomputed instead - // for increased stability. - if (precomputedLanternData) { - options.additionalRttByOrigin = new Map(Object.entries( - precomputedLanternData.additionalRttByOrigin)); - options.serverResponseTimeByOrigin = new Map(Object.entries( - precomputedLanternData.serverResponseTimeByOrigin)); - } - - switch (throttlingMethod) { - case 'provided': - options.rtt = networkAnalysis.rtt; - options.throughput = networkAnalysis.throughput; - options.cpuSlowdownMultiplier = 1; - options.layoutTaskMultiplier = 1; - break; - case 'devtools': - if (throttling) { - options.rtt = - throttling.requestLatencyMs / Constants.throttling.DEVTOOLS_RTT_ADJUSTMENT_FACTOR; - options.throughput = - throttling.downloadThroughputKbps * 1024 / - Constants.throttling.DEVTOOLS_THROUGHPUT_ADJUSTMENT_FACTOR; - } - - options.cpuSlowdownMultiplier = 1; - options.layoutTaskMultiplier = 1; - break; - case 'simulate': - if (throttling) { - options.rtt = throttling.rttMs; - options.throughput = throttling.throughputKbps * 1024; - options.cpuSlowdownMultiplier = throttling.cpuSlowdownMultiplier; - } - break; - default: - // intentionally fallback to simulator defaults - break; - } - - return new Simulator(options); - } - - /** - * @param {Lantern.Simulation.Options} [options] - */ - constructor(options) { - /** @type {Required} */ - this._options = Object.assign( - { - rtt: defaultThrottling.rttMs, - throughput: defaultThrottling.throughputKbps * 1024, - maximumConcurrentRequests: DEFAULT_MAXIMUM_CONCURRENT_REQUESTS, - cpuSlowdownMultiplier: defaultThrottling.cpuSlowdownMultiplier, - layoutTaskMultiplier: DEFAULT_LAYOUT_TASK_MULTIPLIER, - additionalRttByOrigin: new Map(), - serverResponseTimeByOrigin: new Map(), - }, - options - ); - - this._rtt = this._options.rtt; - this._throughput = this._options.throughput; - this._maximumConcurrentRequests = Math.max(Math.min( - TcpConnection.maximumSaturatedConnections(this._rtt, this._throughput), - this._options.maximumConcurrentRequests - ), 1); - this._cpuSlowdownMultiplier = this._options.cpuSlowdownMultiplier; - this._layoutTaskMultiplier = this._cpuSlowdownMultiplier * this._options.layoutTaskMultiplier; - /** @type {Array} */ - this._cachedNodeListByStartPosition = []; - - // Properties reset on every `.simulate` call but duplicated here for type checking - this._nodeTimings = new SimulatorTimingMap(); - /** @type {Map} */ - this._numberInProgressByType = new Map(); - /** @type {Record>} */ - this._nodes = {}; - this._dns = new DNSCache({rtt: this._rtt}); - /** @type {ConnectionPool} */ - // @ts-expect-error - this._connectionPool = null; - - if (!Number.isFinite(this._rtt)) throw new Error(`Invalid rtt ${this._rtt}`); - if (!Number.isFinite(this._throughput)) throw new Error(`Invalid rtt ${this._throughput}`); - } - - /** @return {number} */ - get rtt() { - return this._rtt; - } - - /** - * @param {Node} graph - */ - _initializeConnectionPool(graph) { - /** @type {Lantern.NetworkRequest[]} */ - const records = []; - graph.getRootNode().traverse(node => { - if (node.type === Lantern.BaseNode.TYPES.NETWORK) { - records.push(node.request); - } - }); - - this._connectionPool = new ConnectionPool(records, this._options); - } - - /** - * Initializes the various state data structures such _nodeTimings and the _node Sets by state. - */ - _initializeAuxiliaryData() { - this._nodeTimings = new SimulatorTimingMap(); - this._numberInProgressByType = new Map(); - - this._nodes = {}; - this._cachedNodeListByStartPosition = []; - // NOTE: We don't actually need *all* of these sets, but the clarity that each node progresses - // through the system is quite nice. - for (const state of Object.values(NodeState)) { - this._nodes[state] = new Set(); - } - } - - /** - * @param {string} type - * @return {number} - */ - _numberInProgress(type) { - return this._numberInProgressByType.get(type) || 0; - } - - /** - * @param {Node} node - * @param {number} queuedTime - */ - _markNodeAsReadyToStart(node, queuedTime) { - const nodeStartPosition = Simulator._computeNodeStartPosition(node); - const firstNodeIndexWithGreaterStartPosition = this._cachedNodeListByStartPosition - .findIndex(candidate => Simulator._computeNodeStartPosition(candidate) > nodeStartPosition); - const insertionIndex = firstNodeIndexWithGreaterStartPosition === -1 ? - this._cachedNodeListByStartPosition.length : firstNodeIndexWithGreaterStartPosition; - this._cachedNodeListByStartPosition.splice(insertionIndex, 0, node); - - this._nodes[NodeState.ReadyToStart].add(node); - this._nodes[NodeState.NotReadyToStart].delete(node); - this._nodeTimings.setReadyToStart(node, {queuedTime}); - } - - /** - * @param {Node} node - * @param {number} startTime - */ - _markNodeAsInProgress(node, startTime) { - const indexOfNodeToStart = this._cachedNodeListByStartPosition.indexOf(node); - this._cachedNodeListByStartPosition.splice(indexOfNodeToStart, 1); - - this._nodes[NodeState.InProgress].add(node); - this._nodes[NodeState.ReadyToStart].delete(node); - this._numberInProgressByType.set(node.type, this._numberInProgress(node.type) + 1); - this._nodeTimings.setInProgress(node, {startTime}); - } - - /** - * @param {Node} node - * @param {number} endTime - * @param {ConnectionTiming} [connectionTiming] Optional network connection information. - */ - _markNodeAsComplete(node, endTime, connectionTiming) { - this._nodes[NodeState.Complete].add(node); - this._nodes[NodeState.InProgress].delete(node); - this._numberInProgressByType.set(node.type, this._numberInProgress(node.type) - 1); - this._nodeTimings.setCompleted(node, {endTime, connectionTiming}); - - // Try to add all its dependents to the queue - for (const dependent of node.getDependents()) { - // Skip dependent node if one of its dependencies hasn't finished yet - const dependencies = dependent.getDependencies(); - if (dependencies.some(dep => !this._nodes[NodeState.Complete].has(dep))) continue; - - // Otherwise add it to the queue - this._markNodeAsReadyToStart(dependent, endTime); - } - } - - /** - * @param {Lantern.NetworkRequest} request - * @return {?TcpConnection} - */ - _acquireConnection(request) { - return this._connectionPool.acquire(request); - } - - /** - * @return {Node[]} - */ - _getNodesSortedByStartPosition() { - // Make a copy so we don't skip nodes due to concurrent modification - return Array.from(this._cachedNodeListByStartPosition); - } - - /** - * @param {Node} node - * @param {number} totalElapsedTime - */ - _startNodeIfPossible(node, totalElapsedTime) { - if (node.type === Lantern.BaseNode.TYPES.CPU) { - // Start a CPU task if there's no other CPU task in process - if (this._numberInProgress(node.type) === 0) { - this._markNodeAsInProgress(node, totalElapsedTime); - } - - return; - } - - if (node.type !== Lantern.BaseNode.TYPES.NETWORK) throw new Error('Unsupported'); - - // If a network request is connectionless, we can always start it, so skip the connection checks - if (!node.isConnectionless) { - // Start a network request if we're not at max requests and a connection is available - const numberOfActiveRequests = this._numberInProgress(node.type); - if (numberOfActiveRequests >= this._maximumConcurrentRequests) return; - const connection = this._acquireConnection(node.request); - if (!connection) return; - } - - this._markNodeAsInProgress(node, totalElapsedTime); - } - - /** - * Updates each connection in use with the available throughput based on the number of network requests - * currently in flight. - */ - _updateNetworkCapacity() { - const inFlight = this._numberInProgress(Lantern.BaseNode.TYPES.NETWORK); - if (inFlight === 0) return; - - for (const connection of this._connectionPool.connectionsInUse()) { - connection.setThroughput(this._throughput / inFlight); - } - } - - /** - * Estimates the number of milliseconds remaining given current condidtions before the node is complete. - * @param {Node} node - * @return {number} - */ - _estimateTimeRemaining(node) { - if (node.type === Lantern.BaseNode.TYPES.CPU) { - return this._estimateCPUTimeRemaining(node); - } else if (node.type === Lantern.BaseNode.TYPES.NETWORK) { - return this._estimateNetworkTimeRemaining(node); - } else { - throw new Error('Unsupported'); - } - } - - /** - * @param {CpuNode} cpuNode - * @return {number} - */ - _estimateCPUTimeRemaining(cpuNode) { - const timingData = this._nodeTimings.getCpuStarted(cpuNode); - const multiplier = cpuNode.didPerformLayout() - ? this._layoutTaskMultiplier - : this._cpuSlowdownMultiplier; - const totalDuration = Math.min( - Math.round(cpuNode.duration / 1000 * multiplier), - DEFAULT_MAXIMUM_CPU_TASK_DURATION - ); - const estimatedTimeElapsed = totalDuration - timingData.timeElapsed; - this._nodeTimings.setCpuEstimated(cpuNode, {estimatedTimeElapsed}); - return estimatedTimeElapsed; - } - - /** - * @param {NetworkNode} networkNode - * @return {number} - */ - _estimateNetworkTimeRemaining(networkNode) { - const request = networkNode.request; - const timingData = this._nodeTimings.getNetworkStarted(networkNode); - - let timeElapsed = 0; - if (networkNode.fromDiskCache) { - // Rough access time for seeking to location on disk and reading sequentially. - // 8ms per seek + 20ms/MB - // @see http://norvig.com/21-days.html#answers - const sizeInMb = (request.resourceSize || 0) / 1024 / 1024; - timeElapsed = 8 + 20 * sizeInMb - timingData.timeElapsed; - } else if (networkNode.isNonNetworkProtocol) { - // Estimates for the overhead of a data URL in Chromium and the decoding time for base64-encoded data. - // 2ms per request + 10ms/MB - // @see traces on https://dopiaza.org/tools/datauri/examples/index.php - const sizeInMb = (request.resourceSize || 0) / 1024 / 1024; - timeElapsed = 2 + 10 * sizeInMb - timingData.timeElapsed; - } else { - const connection = this._connectionPool.acquireActiveConnectionFromRequest(request); - const dnsResolutionTime = this._dns.getTimeUntilResolution(request, { - requestedAt: timingData.startTime, - shouldUpdateCache: true, - }); - const timeAlreadyElapsed = timingData.timeElapsed; - const calculation = connection.simulateDownloadUntil( - request.transferSize - timingData.bytesDownloaded, - {timeAlreadyElapsed, dnsResolutionTime, maximumTimeToElapse: Infinity} - ); - - timeElapsed = calculation.timeElapsed; - } - - const estimatedTimeElapsed = timeElapsed + timingData.timeElapsedOvershoot; - this._nodeTimings.setNetworkEstimated(networkNode, {estimatedTimeElapsed}); - return estimatedTimeElapsed; - } - - /** - * Computes and returns the minimum estimated completion time of the nodes currently in progress. - * @return {number} - */ - _findNextNodeCompletionTime() { - let minimumTime = Infinity; - for (const node of this._nodes[NodeState.InProgress]) { - minimumTime = Math.min(minimumTime, this._estimateTimeRemaining(node)); - } - - return minimumTime; - } - - /** - * Given a time period, computes the progress toward completion that the node made durin that time. - * @param {Node} node - * @param {number} timePeriodLength - * @param {number} totalElapsedTime - */ - _updateProgressMadeInTimePeriod(node, timePeriodLength, totalElapsedTime) { - const timingData = this._nodeTimings.getInProgress(node); - const isFinished = timingData.estimatedTimeElapsed === timePeriodLength; - - if (node.type === Lantern.BaseNode.TYPES.CPU || node.isConnectionless) { - return isFinished - ? this._markNodeAsComplete(node, totalElapsedTime) - : (timingData.timeElapsed += timePeriodLength); - } - - if (node.type !== Lantern.BaseNode.TYPES.NETWORK) throw new Error('Unsupported'); - if (!('bytesDownloaded' in timingData)) throw new Error('Invalid timing data'); - - const request = node.request; - const connection = this._connectionPool.acquireActiveConnectionFromRequest(request); - const dnsResolutionTime = this._dns.getTimeUntilResolution(request, { - requestedAt: timingData.startTime, - shouldUpdateCache: true, - }); - const calculation = connection.simulateDownloadUntil( - request.transferSize - timingData.bytesDownloaded, - { - dnsResolutionTime, - timeAlreadyElapsed: timingData.timeElapsed, - maximumTimeToElapse: timePeriodLength - timingData.timeElapsedOvershoot, - } - ); - - connection.setCongestionWindow(calculation.congestionWindow); - connection.setH2OverflowBytesDownloaded(calculation.extraBytesDownloaded); - - if (isFinished) { - connection.setWarmed(true); - this._connectionPool.release(request); - this._markNodeAsComplete(node, totalElapsedTime, calculation.connectionTiming); - } else { - timingData.timeElapsed += calculation.timeElapsed; - timingData.timeElapsedOvershoot += calculation.timeElapsed - timePeriodLength; - timingData.bytesDownloaded += calculation.bytesDownloaded; - } - } - - /** - * @return {{nodeTimings: Map, completeNodeTimings: Map}} - */ - _computeFinalNodeTimings() { - /** @type {Array<[Node, CompleteNodeTiming]>} */ - const completeNodeTimingEntries = this._nodeTimings.getNodes().map(node => { - return [node, this._nodeTimings.getCompleted(node)]; - }); - - // Most consumers will want the entries sorted by startTime, so insert them in that order - completeNodeTimingEntries.sort((a, b) => a[1].startTime - b[1].startTime); - - // Trimmed version of type `Lantern.Simulation.NodeTiming`. - /** @type {Array<[Node, Lantern.Simulation.NodeTiming]>} */ - const nodeTimingEntries = completeNodeTimingEntries.map(([node, timing]) => { - return [node, { - startTime: timing.startTime, - endTime: timing.endTime, - duration: timing.endTime - timing.startTime, - }]; - }); - - return { - nodeTimings: new Map(nodeTimingEntries), - completeNodeTimings: new Map(completeNodeTimingEntries), - }; - } - - /** - * @return {Required} - */ - getOptions() { - return this._options; - } - - /** - * Estimates the time taken to process all of the graph's nodes, returns the overall time along with - * each node annotated by start/end times. - * - * Simulator/connection pool are allowed to deviate from what was - * observed in the trace/devtoolsLog and start requests as soon as they are queued (i.e. do not - * wait around for a warm connection to be available if the original request was fetched on a warm - * connection). - * - * @param {Node} graph - * @param {{label?: string}=} options - * @return {Lantern.Simulation.Result} - */ - simulate(graph, options) { - if (Lantern.BaseNode.hasCycle(graph)) { - throw new Error('Cannot simulate graph with cycle'); - } - - options = Object.assign({ - label: undefined, - }, options); - - // initialize the necessary data containers - this._dns = new DNSCache({rtt: this._rtt}); - this._initializeConnectionPool(graph); - this._initializeAuxiliaryData(); - - const nodesNotReadyToStart = this._nodes[NodeState.NotReadyToStart]; - const nodesReadyToStart = this._nodes[NodeState.ReadyToStart]; - const nodesInProgress = this._nodes[NodeState.InProgress]; - - const rootNode = graph.getRootNode(); - rootNode.traverse(node => nodesNotReadyToStart.add(node)); - let totalElapsedTime = 0; - let iteration = 0; - - // root node is always ready to start - this._markNodeAsReadyToStart(rootNode, totalElapsedTime); - - // loop as long as we have nodes in the queue or currently in progress - while (nodesReadyToStart.size || nodesInProgress.size) { - // move all possible queued nodes to in progress - for (const node of this._getNodesSortedByStartPosition()) { - this._startNodeIfPossible(node, totalElapsedTime); - } - - if (!nodesInProgress.size) { - // Interplay between fromDiskCache and connectionReused can be incorrect, - // have to give up. - throw new Error('Failed to start a node'); - } - - // set the available throughput for all connections based on # inflight - this._updateNetworkCapacity(); - - // find the time that the next node will finish - const minimumTime = this._findNextNodeCompletionTime(); - totalElapsedTime += minimumTime; - - // While this is no longer strictly necessary, it's always better than hanging - if (!Number.isFinite(minimumTime) || iteration > 100000) { - throw new Error('Simulation failed, depth exceeded'); - } - - iteration++; - // update how far each node will progress until that point - for (const node of nodesInProgress) { - this._updateProgressMadeInTimePeriod(node, minimumTime, totalElapsedTime); - } - } - - // `nodeTimings` are used for simulator consumers, `completeNodeTimings` kept for debugging. - const {nodeTimings, completeNodeTimings} = this._computeFinalNodeTimings(); - ALL_SIMULATION_NODE_TIMINGS.set(options.label || 'unlabeled', completeNodeTimings); - - return { - timeInMs: totalElapsedTime, - nodeTimings, - }; - } - - /** - * @param {number} wastedBytes - */ - computeWastedMsFromWastedBytes(wastedBytes) { - const {throughput, observedThroughput} = this._options; - - // https://github.com/GoogleChrome/lighthouse/pull/13323#issuecomment-962031709 - // 0 throughput means the no (additional) throttling is expected. - // This is common for desktop + devtools throttling where throttling is additive and we don't want any additional. - const bitsPerSecond = throughput === 0 ? observedThroughput : throughput; - if (bitsPerSecond === 0) return 0; - - const wastedBits = wastedBytes * 8; - const wastedMs = wastedBits / bitsPerSecond * 1000; - - // This is an estimate of wasted time, so we won't be more precise than 10ms. - return Math.round(wastedMs / 10) * 10; - } - - /** @return {Map>} */ - static get ALL_NODE_TIMINGS() { - return ALL_SIMULATION_NODE_TIMINGS; - } - - /** - * We attempt to start nodes by their observed start time using the request priority as a tie breaker. - * When simulating, just because a low priority image started 5ms before a high priority image doesn't mean - * it would have happened like that when the network was slower. - * @param {Node} node - */ - static _computeNodeStartPosition(node) { - if (node.type === 'cpu') return node.startTime; - return node.startTime + (PriorityStartTimePenalty[node.request.priority] * 1000 * 1000 || 0); - } -} - -export {Simulator}; diff --git a/core/lib/lantern/simulation/Simulator.test.js b/core/lib/lantern/simulation/Simulator.test.js deleted file mode 100644 index cc76f686dca3..000000000000 --- a/core/lib/lantern/simulation/Simulator.test.js +++ /dev/null @@ -1,434 +0,0 @@ -/** - * @license - * Copyright 2017 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import assert from 'assert/strict'; - -import * as Lantern from '../lantern.js'; -import {readJson} from '../../../test/test-utils.js'; -import {runTraceEngine} from '../metrics/MetricTestUtils.js'; - -const {NetworkNode, CPUNode} = Lantern; -const {Simulator, DNSCache} = Lantern.Simulation; - -const pwaTrace = readJson('../../../test/fixtures/artifacts/progressive-app/trace.json', import.meta); - -let nextRequestId = 1; -let nextTid = 1; - -/** - * @param {Lantern.Trace} trace - */ -async function createGraph(trace) { - const traceEngineData = await runTraceEngine(trace.traceEvents); - const requests = - Lantern.TraceEngineComputationData.createNetworkRequests(trace, traceEngineData); - return Lantern.TraceEngineComputationData.createGraph(requests, trace, traceEngineData); -} - -function request(opts) { - const scheme = opts.scheme || 'http'; - const url = `${scheme}://example.com`; - const rendererStartTime = opts.startTime; - const networkEndTime = opts.endTime; - delete opts.startTime; - delete opts.endTime; - - return Object.assign({ - requestId: opts.requestId || nextRequestId++, - url, - transferSize: opts.transferSize || 1000, - protocol: scheme, - parsedURL: {scheme, host: 'example.com', securityOrigin: url}, - timing: opts.timing, - rendererStartTime, - networkEndTime, - }, opts); -} - -function cpuTask({tid, ts, duration}) { - tid = tid || nextTid++; - ts = ts || 0; - const dur = ((duration || 0) * 1000) / 5; - return {tid, ts, dur}; -} -describe('DependencyGraph/Simulator', () => { - // Insulate the simulator tests from DNS multiplier changes - let originalDNSMultiplier; - - before(() => { - originalDNSMultiplier = DNSCache.RTT_MULTIPLIER; - DNSCache.RTT_MULTIPLIER = 1; - }); - - after(() => { - DNSCache.RTT_MULTIPLIER = originalDNSMultiplier; - }); - - describe('.simulate', () => { - const serverResponseTimeByOrigin = new Map([['http://example.com', 500]]); - - function assertNodeTiming(result, node, assertions) { - const timing = result.nodeTimings.get(node); - assert.ok(timing, 'missing node timing information'); - Object.keys(assertions).forEach(key => { - assert.equal(timing[key], assertions[key]); - }); - } - - it('should simulate basic network graphs', () => { - const rootNode = new NetworkNode(request({})); - const simulator = new Simulator({serverResponseTimeByOrigin}); - const result = simulator.simulate(rootNode); - // should be 3 RTTs and 500ms for the server response time - assert.equal(result.timeInMs, 450 + 500); - assertNodeTiming(result, rootNode, {startTime: 0, endTime: 950}); - }); - - it('should simulate basic mixed graphs', () => { - const rootNode = new NetworkNode(request({})); - const cpuNode = new CPUNode(cpuTask({duration: 200})); - cpuNode.addDependency(rootNode); - - const simulator = new Simulator({ - serverResponseTimeByOrigin, - cpuSlowdownMultiplier: 5, - }); - const result = simulator.simulate(rootNode); - // should be 3 RTTs and 500ms for the server response time + 200 CPU - assert.equal(result.timeInMs, 450 + 500 + 200); - assertNodeTiming(result, rootNode, {startTime: 0, endTime: 950}); - assertNodeTiming(result, cpuNode, {startTime: 950, endTime: 1150}); - }); - - it('should simulate basic network waterfall graphs', () => { - const nodeA = new NetworkNode(request({startTime: 0, endTime: 1})); - const nodeB = new NetworkNode(request({startTime: 0, endTime: 3})); - const nodeC = new NetworkNode(request({startTime: 0, endTime: 5})); - const nodeD = new NetworkNode(request({startTime: 0, endTime: 7})); - - nodeA.addDependent(nodeB); - nodeB.addDependent(nodeC); - nodeC.addDependent(nodeD); - - const simulator = new Simulator({serverResponseTimeByOrigin}); - const result = simulator.simulate(nodeA); - // should be 950ms for A, 650ms each for B, C, D (no DNS and one-way connection) - assert.equal(result.timeInMs, 2900); - assertNodeTiming(result, nodeA, {startTime: 0, endTime: 950}); - assertNodeTiming(result, nodeB, {startTime: 950, endTime: 1600}); - assertNodeTiming(result, nodeC, {startTime: 1600, endTime: 2250}); - assertNodeTiming(result, nodeD, {startTime: 2250, endTime: 2900}); - }); - - it('should simulate cached network graphs', () => { - const nodeA = new NetworkNode(request({startTime: 0, endTime: 1, - fromDiskCache: true})); - const nodeB = new NetworkNode(request({startTime: 0, endTime: 3, - fromDiskCache: true})); - nodeA.addDependent(nodeB); - - const simulator = new Simulator({serverResponseTimeByOrigin}); - const result = simulator.simulate(nodeA); - // should be ~8ms each for A, B - assert.equal(result.timeInMs, 16); - assertNodeTiming(result, nodeA, {startTime: 0, endTime: 8}); - assertNodeTiming(result, nodeB, {startTime: 8, endTime: 16}); - }); - - it('should simulate data URL network graphs', () => { - const url = 'data:image/jpeg;base64,foobar'; - const protocol = 'data'; - const parsedURL = {scheme: 'data', host: '', securityOrigin: 'null'}; - const nodeA = new NetworkNode(request({startTime: 0, endTime: 1, url, - parsedURL, protocol})); - const nodeB = new NetworkNode(request({startTime: 0, endTime: 3, url, - parsedURL, protocol, resourceSize: 1024 * 1024})); - nodeA.addDependent(nodeB); - - const simulator = new Simulator({serverResponseTimeByOrigin}); - const result = simulator.simulate(nodeA); - - // should be ~2ms for A (resourceSize 0), ~12ms for B (resourceSize 1MB) - assert.equal(result.timeInMs, 14); - assertNodeTiming(result, nodeA, {startTime: 0, endTime: 2}); - assertNodeTiming(result, nodeB, {startTime: 2, endTime: 14}); - }); - - it('should simulate basic CPU queue graphs', () => { - const nodeA = new NetworkNode(request({})); - const nodeB = new CPUNode(cpuTask({duration: 100})); - const nodeC = new CPUNode(cpuTask({duration: 600})); - const nodeD = new CPUNode(cpuTask({duration: 300})); - - nodeA.addDependent(nodeB); - nodeA.addDependent(nodeC); - nodeA.addDependent(nodeD); - - const simulator = new Simulator({ - serverResponseTimeByOrigin, - cpuSlowdownMultiplier: 5, - }); - const result = simulator.simulate(nodeA); - // should be 800ms A, then 1000 ms total for B, C, D in serial - assert.equal(result.timeInMs, 1950); - assertNodeTiming(result, nodeA, {startTime: 0, endTime: 950}); - assertNodeTiming(result, nodeB, {startTime: 950, endTime: 1050}); - assertNodeTiming(result, nodeC, {startTime: 1050, endTime: 1650}); - assertNodeTiming(result, nodeD, {startTime: 1650, endTime: 1950}); - }); - - it('should simulate basic network waterfall graphs with CPU', () => { - const nodeA = new NetworkNode(request({})); - const nodeB = new NetworkNode(request({})); - const nodeC = new NetworkNode(request({})); - const nodeD = new NetworkNode(request({})); - const nodeE = new CPUNode(cpuTask({duration: 1000})); - const nodeF = new CPUNode(cpuTask({duration: 1000})); - - nodeA.addDependent(nodeB); - nodeB.addDependent(nodeC); - nodeB.addDependent(nodeE); // finishes 350 ms after C - nodeC.addDependent(nodeD); - nodeC.addDependent(nodeF); // finishes 700 ms after D - - const simulator = new Simulator({ - serverResponseTimeByOrigin, - cpuSlowdownMultiplier: 5, - }); - const result = simulator.simulate(nodeA); - // should be 950ms for A, 650ms each for B, C, D, with F finishing 700 ms after D - assert.equal(result.timeInMs, 3600); - }); - - it('should simulate basic parallel requests', () => { - const nodeA = new NetworkNode(request({})); - const nodeB = new NetworkNode(request({})); - const nodeC = new NetworkNode(request({transferSize: 15000})); - const nodeD = new NetworkNode(request({})); - - nodeA.addDependent(nodeB); - nodeA.addDependent(nodeC); - nodeA.addDependent(nodeD); - - const simulator = new Simulator({serverResponseTimeByOrigin}); - const result = simulator.simulate(nodeA); - // should be 950ms for A and 950ms for C (2 round trips of downloading, but no DNS) - assert.equal(result.timeInMs, 950 + 950); - }); - - it('should make connections in parallel', () => { - const nodeA = new NetworkNode(request({startTime: 0, networkRequestTime: 0, endTime: 1})); - const nodeB = new NetworkNode(request({startTime: 2, networkRequestTime: 2, endTime: 3})); - const nodeC = new NetworkNode(request({startTime: 2, networkRequestTime: 2, endTime: 5})); - const nodeD = new NetworkNode(request({startTime: 2, networkRequestTime: 2, endTime: 7})); - - nodeA.addDependent(nodeB); - nodeA.addDependent(nodeC); - nodeA.addDependent(nodeD); - - const simulator = new Simulator({serverResponseTimeByOrigin}); - const result = simulator.simulate(nodeA); - // should be 950ms for A, 650ms for B reusing connection, 800ms for C and D in parallel. - assert.equal(result.timeInMs, 950 + 800); - assertNodeTiming(result, nodeA, {startTime: 0, endTime: 950}); - assertNodeTiming(result, nodeB, {startTime: 950, endTime: 1600}); - assertNodeTiming(result, nodeC, {startTime: 950, endTime: 1750}); - assertNodeTiming(result, nodeD, {startTime: 950, endTime: 1750}); - }); - - it('should adjust throughput based on number of requests', () => { - const nodeA = new NetworkNode(request({})); - const nodeB = new NetworkNode(request({})); - const nodeC = new NetworkNode(request({transferSize: 14000})); - const nodeD = new NetworkNode(request({})); - - nodeA.addDependent(nodeB); - nodeA.addDependent(nodeC); - nodeA.addDependent(nodeD); - - // 80 kbps while all 3 download at 150ms/RT = ~1460 bytes/RT - // 240 kbps while the last one finishes at 150ms/RT = ~4380 bytes/RT - // ~14000 bytes = 5 RTs - // 1 RT 80 kbps b/c its shared - // 1 RT 80 kbps b/c it needs to grow congestion window from being shared - // 1 RT 160 kbps b/c TCP - // 2 RT 240 kbps b/c throughput cap - const simulator = new Simulator({serverResponseTimeByOrigin, throughput: 240000}); - const result = simulator.simulate(nodeA); - // should be 950ms for A and 1400ms for C (5 round trips of downloading) - assert.equal(result.timeInMs, 950 + (150 + 750 + 500)); - }); - - it('should start network requests in startTime order', () => { - const rootNode = new NetworkNode(request({startTime: 0, endTime: 0.05, - connectionId: 1})); - const imageNodes = [ - new NetworkNode(request({startTime: 5})), - new NetworkNode(request({startTime: 4})), - new NetworkNode(request({startTime: 3})), - new NetworkNode(request({startTime: 2})), - new NetworkNode(request({startTime: 1})), - ]; - - for (const imageNode of imageNodes) { - imageNode.request.connectionReused = true; - imageNode.request.connectionId = 1; - rootNode.addDependent(imageNode); - } - - const simulator = new Simulator({serverResponseTimeByOrigin, maximumConcurrentRequests: 1}); - const result = simulator.simulate(rootNode); - - // should be 3 RTs + SRT for rootNode (950ms) - // should be 1 RT + SRT for image nodes in observed order (650ms) - assertNodeTiming(result, rootNode, {startTime: 0, endTime: 950}); - assertNodeTiming(result, imageNodes[4], {startTime: 950, endTime: 1600}); - assertNodeTiming(result, imageNodes[3], {startTime: 1600, endTime: 2250}); - assertNodeTiming(result, imageNodes[2], {startTime: 2250, endTime: 2900}); - assertNodeTiming(result, imageNodes[1], {startTime: 2900, endTime: 3550}); - assertNodeTiming(result, imageNodes[0], {startTime: 3550, endTime: 4200}); - }); - - it('should start network requests in priority order to break startTime ties', () => { - const rootNode = new NetworkNode(request({startTime: 0, endTime: 0.05, - connectionId: 1})); - const imageNodes = [ - new NetworkNode(request({startTime: 0.1, priority: 'VeryLow'})), - new NetworkNode(request({startTime: 0.2, priority: 'Low'})), - new NetworkNode(request({startTime: 0.3, priority: 'Medium'})), - new NetworkNode(request({startTime: 0.4, priority: 'High'})), - new NetworkNode(request({startTime: 0.5, priority: 'VeryHigh'})), - ]; - - for (const imageNode of imageNodes) { - imageNode.request.connectionReused = true; - imageNode.request.connectionId = 1; - rootNode.addDependent(imageNode); - } - - const simulator = new Simulator({serverResponseTimeByOrigin, maximumConcurrentRequests: 1}); - const result = simulator.simulate(rootNode); - - // should be 3 RTs + SRT for rootNode (950ms) - // should be 1 RT + SRT for image nodes in priority order (650ms) - assertNodeTiming(result, rootNode, {startTime: 0, endTime: 950}); - assertNodeTiming(result, imageNodes[4], {startTime: 950, endTime: 1600}); - assertNodeTiming(result, imageNodes[3], {startTime: 1600, endTime: 2250}); - assertNodeTiming(result, imageNodes[2], {startTime: 2250, endTime: 2900}); - assertNodeTiming(result, imageNodes[1], {startTime: 2900, endTime: 3550}); - assertNodeTiming(result, imageNodes[0], {startTime: 3550, endTime: 4200}); - }); - - it('should simulate two graphs in a row', () => { - const simulator = new Simulator({serverResponseTimeByOrigin}); - - const nodeA = new NetworkNode(request({})); - const nodeB = new NetworkNode(request({})); - const nodeC = new NetworkNode(request({transferSize: 15000})); - const nodeD = new NetworkNode(request({})); - - nodeA.addDependent(nodeB); - nodeA.addDependent(nodeC); - nodeA.addDependent(nodeD); - - const resultA = simulator.simulate(nodeA); - // should be 950ms for A and 950ms for C (2 round trips of downloading, no DNS) - assert.equal(resultA.timeInMs, 950 + 950); - - const nodeE = new NetworkNode(request({})); - const nodeF = new NetworkNode(request({})); - const nodeG = new NetworkNode(request({})); - - nodeE.addDependent(nodeF); - nodeE.addDependent(nodeG); - - const resultB = simulator.simulate(nodeE); - // should be 950ms for E and 800ms for F/G - assert.equal(resultB.timeInMs, 950 + 800); - }); - - it('should maximize throughput with H2', () => { - const simulator = new Simulator({serverResponseTimeByOrigin}); - const connectionDefaults = {protocol: 'h2', connectionId: 1}; - const nodeA = new NetworkNode(request({startTime: 0, endTime: 1, - ...connectionDefaults})); - const nodeB = new NetworkNode(request({startTime: 1, endTime: 2, - ...connectionDefaults})); - const nodeC = new NetworkNode(request({startTime: 2, endTime: 3, - ...connectionDefaults})); - const nodeD = new NetworkNode(request({startTime: 3, endTime: 4, - ...connectionDefaults})); - - nodeA.addDependent(nodeB); - nodeB.addDependent(nodeC); - nodeB.addDependent(nodeD); - - // Run two simulations: - // - The first with C & D in parallel. - // - The second with C & D in series. - // Under HTTP/2 simulation these should be equivalent, but definitely parallel - // shouldn't be slower. - const resultA = simulator.simulate(nodeA); - nodeC.addDependent(nodeD); - const resultB = simulator.simulate(nodeA); - expect(resultA.timeInMs).toBeLessThanOrEqual(resultB.timeInMs); - }); - - it('should throw (not hang) on graphs with cycles', () => { - const rootNode = new NetworkNode(request({})); - const depNode = new NetworkNode(request({})); - rootNode.addDependency(depNode); - depNode.addDependency(rootNode); - - const simulator = new Simulator({serverResponseTimeByOrigin}); - assert.throws(() => simulator.simulate(rootNode), /cycle/); - }); - - describe('on a real trace', () => { - const trace = pwaTrace; - - it('should compute a timeInMs', async () => { - const graph = await createGraph(trace); - const simulator = new Simulator({serverResponseTimeByOrigin}); - const result = simulator.simulate(graph); - expect(result.timeInMs).toBeGreaterThan(100); - }); - - it('should sort the task event times', async () => { - const graph = await createGraph(trace); - const simulator = new Simulator({serverResponseTimeByOrigin}); - const result = simulator.simulate(graph); - const nodeTimings = Array.from(result.nodeTimings.entries()); - - for (let i = 1; i < nodeTimings.length; i++) { - const startTime = nodeTimings[i][1].startTime; - const previousStartTime = nodeTimings[i - 1][1].startTime; - expect(startTime).toBeGreaterThanOrEqual(previousStartTime); - } - }); - }); - }); - - describe('.simulateTimespan', () => { - it('calculates savings using throughput', () => { - const simulator = new Simulator({throughput: 1000, observedThroughput: 2000}); - const wastedMs = simulator.computeWastedMsFromWastedBytes(500); - expect(wastedMs).toBeCloseTo(4000); - }); - - it('falls back to observed throughput if throughput is 0', () => { - const simulator = new Simulator({throughput: 0, observedThroughput: 2000}); - const wastedMs = simulator.computeWastedMsFromWastedBytes(500); - expect(wastedMs).toBeCloseTo(2000); - }); - - it('returns 0 if throughput and observed throughput are 0', () => { - const simulator = new Simulator({throughput: 0, observedThroughput: 0}); - const wastedMs = simulator.computeWastedMsFromWastedBytes(500); - expect(wastedMs).toEqual(0); - }); - }); -}); diff --git a/core/lib/lantern/simulation/TCPConnection.test.js b/core/lib/lantern/simulation/TCPConnection.test.js deleted file mode 100644 index dcf2902822de..000000000000 --- a/core/lib/lantern/simulation/TCPConnection.test.js +++ /dev/null @@ -1,374 +0,0 @@ -/** - * @license - * Copyright 2017 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import assert from 'assert/strict'; - -import * as Lantern from '../lantern.js'; - -const {TcpConnection} = Lantern.Simulation; - -describe('DependencyGraph/Simulator/TcpConnection', () => { - describe('#constructor', () => { - it('should create the connection', () => { - const rtt = 150; - const throughput = 1600 * 1024; - const connection = new TcpConnection(rtt, throughput); - assert.ok(connection); - assert.equal(connection._rtt, rtt); - }); - }); - - describe('#maximumSaturatedConnections', () => { - it('should compute number of supported simulated requests', () => { - const availableThroughput = 1460 * 8 * 10; // 10 TCP segments/second - assert.equal(TcpConnection.maximumSaturatedConnections(100, availableThroughput), 1); - assert.equal(TcpConnection.maximumSaturatedConnections(300, availableThroughput), 3); - assert.equal(TcpConnection.maximumSaturatedConnections(1000, availableThroughput), 10); - }); - }); - - describe('.setWarmed', () => { - it('adjusts the time to download appropriately', () => { - const connection = new TcpConnection(100, Infinity); - assert.equal(connection.simulateDownloadUntil(0).timeElapsed, 300); - connection.setWarmed(true); - assert.equal(connection.simulateDownloadUntil(0).timeElapsed, 100); - }); - }); - - describe('.setCongestionWindow', () => { - it('adjusts the time to download appropriately', () => { - const connection = new TcpConnection(100, Infinity); - assert.deepEqual(connection.simulateDownloadUntil(50000), { - bytesDownloaded: 50000, - extraBytesDownloaded: 0, - congestionWindow: 40, - roundTrips: 5, - timeElapsed: 500, - connectionTiming: { - connectionTime: 250, - dnsResolutionTime: 0, - sslTime: 100, - timeToFirstByte: 300, - }, - }); - connection.setCongestionWindow(40); // will download all in one round trip - assert.deepEqual(connection.simulateDownloadUntil(50000), { - bytesDownloaded: 50000, - extraBytesDownloaded: 0, - congestionWindow: 40, - roundTrips: 3, - timeElapsed: 300, - connectionTiming: { - connectionTime: 250, - dnsResolutionTime: 0, - sslTime: 100, - timeToFirstByte: 300, - }, - }); - }); - }); - - describe('.setH2OverflowBytesDownloaded', () => { - it('adjusts the time to download appropriately for H2 connections', () => { - const connection = new TcpConnection(100, Infinity, 0, true, true); - connection.setWarmed(true); - assert.equal(connection.simulateDownloadUntil(30000).timeElapsed, 200); - connection.setH2OverflowBytesDownloaded(20000); - assert.equal(connection.simulateDownloadUntil(30000).timeElapsed, 100); - connection.setH2OverflowBytesDownloaded(50000); - assert.equal(connection.simulateDownloadUntil(30000).timeElapsed, 0); - }); - - it('does not adjust the time to download for non-H2 connections', () => { - const connection = new TcpConnection(100, Infinity, 0, true, false); - connection.setWarmed(true); - assert.equal(connection.simulateDownloadUntil(30000).timeElapsed, 200); - connection.setH2OverflowBytesDownloaded(20000); - assert.equal(connection.simulateDownloadUntil(30000).timeElapsed, 200); - connection.setH2OverflowBytesDownloaded(50000); - assert.equal(connection.simulateDownloadUntil(30000).timeElapsed, 200); - }); - }); - - describe('.simulateDownloadUntil', () => { - describe('when maximumTime is not set', () => { - it('should provide the correct values small payload non-SSL', () => { - const connection = new TcpConnection(100, Infinity, 0, false); - assert.deepEqual(connection.simulateDownloadUntil(7300), { - bytesDownloaded: 7300, - extraBytesDownloaded: 0, - congestionWindow: 10, - roundTrips: 2, - timeElapsed: 200, - connectionTiming: { - connectionTime: 150, - dnsResolutionTime: 0, - sslTime: undefined, // non-SSL - timeToFirstByte: 200, - }, - }); - }); - - it('should provide the correct values small payload SSL', () => { - const connection = new TcpConnection(100, Infinity, 0, true); - assert.deepEqual(connection.simulateDownloadUntil(7300), { - bytesDownloaded: 7300, - extraBytesDownloaded: 0, - congestionWindow: 10, - roundTrips: 3, - timeElapsed: 300, - connectionTiming: { - connectionTime: 250, - dnsResolutionTime: 0, - sslTime: 100, - timeToFirstByte: 300, - }, - }); - }); - - it('should provide the correct values small payload H2', () => { - const connection = new TcpConnection(100, Infinity, 0, true, true); - assert.deepEqual(connection.simulateDownloadUntil(7300), { - bytesDownloaded: 7300, - extraBytesDownloaded: 7300, - congestionWindow: 10, - roundTrips: 3, - timeElapsed: 300, - connectionTiming: { - connectionTime: 250, - dnsResolutionTime: 0, - sslTime: 100, - timeToFirstByte: 300, - }, - }); - }); - - it('should provide the correct values response time', () => { - const responseTime = 78; - const connection = new TcpConnection(100, Infinity, responseTime, true); - assert.deepEqual(connection.simulateDownloadUntil(7300), { - bytesDownloaded: 7300, - extraBytesDownloaded: 0, - congestionWindow: 10, - roundTrips: 3, - timeElapsed: 300 + responseTime, - connectionTiming: { - connectionTime: 250, - dnsResolutionTime: 0, - sslTime: 100, - timeToFirstByte: 378, - }, - }); - }); - - it('should provide the correct values large payload', () => { - const connection = new TcpConnection(100, 8 * 1000 * 1000); - const bytesToDownload = 10 * 1000 * 1000; // 10 mb - assert.deepEqual(connection.simulateDownloadUntil(bytesToDownload), { - bytesDownloaded: bytesToDownload, - extraBytesDownloaded: 0, - congestionWindow: 68, - roundTrips: 105, - timeElapsed: 10500, - connectionTiming: { - connectionTime: 250, - dnsResolutionTime: 0, - sslTime: 100, - timeToFirstByte: 300, - }, - }); - }); - - it('should provide the correct values resumed small payload', () => { - const connection = new TcpConnection(100, Infinity, 0, true); - assert.deepEqual(connection.simulateDownloadUntil(7300, {timeAlreadyElapsed: 250}), { - bytesDownloaded: 7300, - extraBytesDownloaded: 0, - congestionWindow: 10, - roundTrips: 3, - timeElapsed: 50, - connectionTiming: { - connectionTime: 250, - dnsResolutionTime: 0, - sslTime: 100, - timeToFirstByte: 300, - }, - }); - }); - - it('should provide the correct values resumed small payload H2', () => { - const connection = new TcpConnection(100, Infinity, 0, true, true); - connection.setWarmed(true); - connection.setH2OverflowBytesDownloaded(10000); - assert.deepEqual(connection.simulateDownloadUntil(7300), { - bytesDownloaded: 0, - extraBytesDownloaded: 2700, // 10000 - 7300 - congestionWindow: 10, - roundTrips: 0, - timeElapsed: 0, - connectionTiming: { - timeToFirstByte: 0, - }, - }); - }); - - it('should provide the correct values resumed large payload', () => { - const connection = new TcpConnection(100, 8 * 1000 * 1000); - const bytesToDownload = 5 * 1000 * 1000; // 5 mb - connection.setCongestionWindow(68); - assert.deepEqual( - connection.simulateDownloadUntil(bytesToDownload, {timeAlreadyElapsed: 5234}), - { - bytesDownloaded: bytesToDownload, - extraBytesDownloaded: 0, - congestionWindow: 68, - roundTrips: 51, // 5 mb / (1460 * 68) - timeElapsed: 5100, - connectionTiming: { - connectionTime: 250, - dnsResolutionTime: 0, - sslTime: 100, - timeToFirstByte: 300, - }, - } - ); - }); - }); - - describe('when maximumTime is set', () => { - it('should provide the correct values less than TTFB', () => { - const connection = new TcpConnection(100, Infinity, 0, false); - assert.deepEqual( - connection.simulateDownloadUntil(7300, {timeAlreadyElapsed: 0, maximumTimeToElapse: 68}), - { - bytesDownloaded: 7300, - extraBytesDownloaded: 0, - congestionWindow: 10, - roundTrips: 2, - timeElapsed: 200, - connectionTiming: { - connectionTime: 150, - dnsResolutionTime: 0, - sslTime: undefined, // non-SSL - timeToFirstByte: 200, - }, - } - ); - }); - - it('should provide the correct values just over TTFB', () => { - const connection = new TcpConnection(100, Infinity, 0, false); - assert.deepEqual( - connection.simulateDownloadUntil(7300, {timeAlreadyElapsed: 0, maximumTimeToElapse: 250}), - { - bytesDownloaded: 7300, - extraBytesDownloaded: 0, - congestionWindow: 10, - roundTrips: 2, - timeElapsed: 200, - connectionTiming: { - connectionTime: 150, - dnsResolutionTime: 0, - sslTime: undefined, // non-SSL - timeToFirstByte: 200, - }, - } - ); - }); - - it('should provide the correct values with already elapsed', () => { - const connection = new TcpConnection(100, Infinity, 0, false); - assert.deepEqual( - connection.simulateDownloadUntil(7300, { - timeAlreadyElapsed: 75, - maximumTimeToElapse: 250, - }), - { - bytesDownloaded: 7300, - extraBytesDownloaded: 0, - congestionWindow: 10, - roundTrips: 2, - timeElapsed: 125, - connectionTiming: { - connectionTime: 150, - dnsResolutionTime: 0, - sslTime: undefined, // non-SSL - timeToFirstByte: 200, - }, - } - ); - }); - - it('should provide the correct values large payloads', () => { - const connection = new TcpConnection(100, 8 * 1000 * 1000); - const bytesToDownload = 10 * 1000 * 1000; // 10 mb - assert.deepEqual( - connection.simulateDownloadUntil(bytesToDownload, { - timeAlreadyElapsed: 500, - maximumTimeToElapse: 740, - }), - { - bytesDownloaded: 683280, // should be less than 68 * 1460 * 8 - extraBytesDownloaded: 0, - congestionWindow: 68, - roundTrips: 8, - timeElapsed: 800, // skips the handshake because time already elapsed - connectionTiming: { - connectionTime: 250, - dnsResolutionTime: 0, - sslTime: 100, - timeToFirstByte: 300, - }, - } - ); - }); - - it('should all add up', () => { - const connection = new TcpConnection(100, 8 * 1000 * 1000); - const bytesToDownload = 10 * 1000 * 1000; // 10 mb - const firstStoppingPoint = 5234; - const secondStoppingPoint = 315; - const thirdStoppingPoint = 10500 - firstStoppingPoint - secondStoppingPoint; - - const firstSegment = connection.simulateDownloadUntil(bytesToDownload, { - timeAlreadyElapsed: 0, - maximumTimeToElapse: firstStoppingPoint, - }); - const firstOvershoot = firstSegment.timeElapsed - firstStoppingPoint; - - connection.setCongestionWindow(firstSegment.congestionWindow); - const secondSegment = connection.simulateDownloadUntil( - bytesToDownload - firstSegment.bytesDownloaded, - { - timeAlreadyElapsed: firstSegment.timeElapsed, - maximumTimeToElapse: secondStoppingPoint - firstOvershoot, - } - ); - const secondOvershoot = firstOvershoot + secondSegment.timeElapsed - secondStoppingPoint; - - connection.setCongestionWindow(secondSegment.congestionWindow); - const thirdSegment = connection.simulateDownloadUntil( - bytesToDownload - firstSegment.bytesDownloaded - secondSegment.bytesDownloaded, - {timeAlreadyElapsed: firstSegment.timeElapsed + secondSegment.timeElapsed} - ); - const thirdOvershoot = secondOvershoot + thirdSegment.timeElapsed - thirdStoppingPoint; - - assert.equal(thirdOvershoot, 0); - assert.equal( - firstSegment.bytesDownloaded + - secondSegment.bytesDownloaded + - thirdSegment.bytesDownloaded, - bytesToDownload - ); - assert.equal( - firstSegment.timeElapsed + secondSegment.timeElapsed + thirdSegment.timeElapsed, - 10500 - ); - }); - }); - }); -}); diff --git a/core/lib/lantern/simulation/TcpConnection.js b/core/lib/lantern/simulation/TcpConnection.js deleted file mode 100644 index 3d1ad29ccd00..000000000000 --- a/core/lib/lantern/simulation/TcpConnection.js +++ /dev/null @@ -1,230 +0,0 @@ -/** - * @license - * Copyright 2017 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -/** @typedef {import('./SimulationTimingMap.js').ConnectionTiming} ConnectionTiming */ - -const INITIAL_CONGESTION_WINDOW = 10; -const TCP_SEGMENT_SIZE = 1460; - -class TcpConnection { - /** - * @param {number} rtt - * @param {number} throughput - * @param {number=} serverLatency - * @param {boolean=} ssl - * @param {boolean=} h2 - */ - constructor(rtt, throughput, serverLatency = 0, ssl = true, h2 = false) { - this._warmed = false; - this._ssl = ssl; - this._h2 = h2; - this._rtt = rtt; - this._throughput = throughput; - this._serverLatency = serverLatency; - this._congestionWindow = INITIAL_CONGESTION_WINDOW; - this._h2OverflowBytesDownloaded = 0; - } - - /** - * @param {number} rtt - * @param {number} availableThroughput - * @return {number} - */ - static maximumSaturatedConnections(rtt, availableThroughput) { - const roundTripsPerSecond = 1000 / rtt; - const bytesPerRoundTrip = TCP_SEGMENT_SIZE; - const bytesPerSecond = roundTripsPerSecond * bytesPerRoundTrip; - const minimumThroughputRequiredPerRequest = bytesPerSecond * 8; - return Math.floor(availableThroughput / minimumThroughputRequiredPerRequest); - } - - /** - * @return {number} - */ - _computeMaximumCongestionWindowInSegments() { - const bytesPerSecond = this._throughput / 8; - const secondsPerRoundTrip = this._rtt / 1000; - const bytesPerRoundTrip = bytesPerSecond * secondsPerRoundTrip; - return Math.floor(bytesPerRoundTrip / TCP_SEGMENT_SIZE); - } - - /** - * @param {number} throughput - */ - setThroughput(throughput) { - this._throughput = throughput; - } - - /** - * @param {number} congestion - */ - setCongestionWindow(congestion) { - this._congestionWindow = congestion; - } - - /** - * @param {boolean} warmed - */ - setWarmed(warmed) { - this._warmed = warmed; - } - - /** - * @return {boolean} - */ - isWarm() { - return this._warmed; - } - - /** - * @return {boolean} - */ - isH2() { - return this._h2; - } - - /** - * @return {number} - */ - get congestionWindow() { - return this._congestionWindow; - } - - /** - * Sets the number of excess bytes that are available to this connection on future downloads, only - * applies to H2 connections. - * @param {number} bytes - */ - setH2OverflowBytesDownloaded(bytes) { - if (!this._h2) return; - this._h2OverflowBytesDownloaded = bytes; - } - - /** - * @return {TcpConnection} - */ - clone() { - return Object.assign(new TcpConnection(this._rtt, this._throughput), this); - } - - /** - * Simulates a network download of a particular number of bytes over an optional maximum amount of time - * and returns information about the ending state. - * - * See https://hpbn.co/building-blocks-of-tcp/#three-way-handshake and - * https://hpbn.co/transport-layer-security-tls/#tls-handshake for details. - * - * @param {number} bytesToDownload - * @param {DownloadOptions} [options] - * @return {DownloadResults} - */ - simulateDownloadUntil(bytesToDownload, options) { - const {timeAlreadyElapsed = 0, maximumTimeToElapse = Infinity, dnsResolutionTime = 0} = - options || {}; - - if (this._warmed && this._h2) { - bytesToDownload -= this._h2OverflowBytesDownloaded; - } - const twoWayLatency = this._rtt; - const oneWayLatency = twoWayLatency / 2; - const maximumCongestionWindow = this._computeMaximumCongestionWindowInSegments(); - - let handshakeAndRequest = oneWayLatency; - if (!this._warmed) { - handshakeAndRequest = - // DNS lookup - dnsResolutionTime + - // SYN - oneWayLatency + - // SYN ACK - oneWayLatency + - // ACK + initial request - oneWayLatency + - // ClientHello/ServerHello assuming TLS False Start is enabled (https://istlsfastyet.com/#server-performance). - (this._ssl ? twoWayLatency : 0); - } - - let roundTrips = Math.ceil(handshakeAndRequest / twoWayLatency); - let timeToFirstByte = handshakeAndRequest + this._serverLatency + oneWayLatency; - if (this._warmed && this._h2) timeToFirstByte = 0; - - const timeElapsedForTTFB = Math.max(timeToFirstByte - timeAlreadyElapsed, 0); - const maximumDownloadTimeToElapse = maximumTimeToElapse - timeElapsedForTTFB; - - let congestionWindow = Math.min(this._congestionWindow, maximumCongestionWindow); - let totalBytesDownloaded = 0; - if (timeElapsedForTTFB > 0) { - totalBytesDownloaded = congestionWindow * TCP_SEGMENT_SIZE; - } else { - roundTrips = 0; - } - - let downloadTimeElapsed = 0; - let bytesRemaining = bytesToDownload - totalBytesDownloaded; - while (bytesRemaining > 0 && downloadTimeElapsed <= maximumDownloadTimeToElapse) { - roundTrips++; - downloadTimeElapsed += twoWayLatency; - congestionWindow = Math.max(Math.min(maximumCongestionWindow, congestionWindow * 2), 1); - - const bytesDownloadedInWindow = congestionWindow * TCP_SEGMENT_SIZE; - totalBytesDownloaded += bytesDownloadedInWindow; - bytesRemaining -= bytesDownloadedInWindow; - } - - const timeElapsed = timeElapsedForTTFB + downloadTimeElapsed; - const extraBytesDownloaded = this._h2 ? Math.max(totalBytesDownloaded - bytesToDownload, 0) : 0; - const bytesDownloaded = Math.max(Math.min(totalBytesDownloaded, bytesToDownload), 0); - - /** @type {ConnectionTiming} */ - let connectionTiming; - if (!this._warmed) { - connectionTiming = { - dnsResolutionTime, - connectionTime: handshakeAndRequest - dnsResolutionTime, - sslTime: this._ssl ? twoWayLatency : undefined, - timeToFirstByte, - }; - } else if (this._h2) { - // TODO: timing information currently difficult to model for warm h2 connections. - connectionTiming = { - timeToFirstByte, - }; - } else { - connectionTiming = { - connectionTime: handshakeAndRequest, - timeToFirstByte, - }; - } - - return { - roundTrips, - timeElapsed, - bytesDownloaded, - extraBytesDownloaded, - congestionWindow, - connectionTiming, - }; - } -} - -export {TcpConnection}; - -/** - * @typedef DownloadOptions - * @property {number} [dnsResolutionTime] - * @property {number} [timeAlreadyElapsed] - * @property {number} [maximumTimeToElapse] - */ - -/** - * @typedef DownloadResults - * @property {number} roundTrips - * @property {number} timeElapsed - * @property {number} bytesDownloaded - * @property {number} extraBytesDownloaded - * @property {number} congestionWindow - * @property {ConnectionTiming} connectionTiming - */ diff --git a/core/lib/lantern/simulation/simulation.js b/core/lib/lantern/simulation/simulation.js deleted file mode 100644 index 6fa461ad199e..000000000000 --- a/core/lib/lantern/simulation/simulation.js +++ /dev/null @@ -1,28 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Lantern from '../types/lantern.js'; - -export {ConnectionPool} from './ConnectionPool.js'; -export {Constants} from './Constants.js'; -export {DNSCache} from './DNSCache.js'; -export {NetworkAnalyzer} from './NetworkAnalyzer.js'; -export {Simulator} from './Simulator.js'; -export {SimulatorTimingMap} from './SimulationTimingMap.js'; -export {TcpConnection} from './TcpConnection.js'; - -/** @template [T=any] @typedef {Lantern.Simulation.GraphNetworkNode} GraphNetworkNode */ -/** @template [T=any] @typedef {Lantern.Simulation.GraphNode} GraphNode */ -/** @template [T=any] @typedef {Lantern.Simulation.Result} Result */ -/** @typedef {Lantern.Simulation.GraphCPUNode} GraphCPUNode */ -/** @typedef {Lantern.Simulation.MetricCoefficients} MetricCoefficients */ -/** @typedef {Lantern.Simulation.MetricComputationDataInput} MetricComputationDataInput */ -/** @typedef {Lantern.Simulation.NodeTiming} NodeTiming */ -/** @typedef {Lantern.Simulation.Options} Options */ -/** @typedef {Lantern.Simulation.PrecomputedLanternData} PrecomputedLanternData */ -/** @typedef {Lantern.Simulation.ProcessedNavigation} ProcessedNavigation */ -/** @typedef {Lantern.Simulation.Settings} Settings */ -/** @typedef {Lantern.Simulation.URL} URL */ diff --git a/core/lib/lantern/types/lantern.d.ts b/core/lib/lantern/types/lantern.d.ts deleted file mode 100644 index a29b53cb608c..000000000000 --- a/core/lib/lantern/types/lantern.d.ts +++ /dev/null @@ -1,256 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import * as Protocol from '@paulirish/trace_engine/generated/protocol.js'; - -declare module Util { - /** An object with the keys in the union K mapped to themselves as values. */ - type SelfMap = { - [P in K]: P; - }; -} - -type TraceEvent = { - name: string; - cat: string; - args: { - name?: string; - fileName?: string; - snapshot?: string; - sync_id?: string; - beginData?: { - frame?: string; - startLine?: number; - url?: string; - }; - data?: { - frame?: string; - readyState?: number; - stackTrace?: { - url: string - }[]; - url?: string; - }; - }; - pid: number; - tid: number; - /** Timestamp of the event in microseconds. */ - ts: number; - dur: number; -} -type Trace = {traceEvents: TraceEvent[]}; -type ResourcePriority = ('VeryLow' | 'Low' | 'Medium' | 'High' | 'VeryHigh'); -type ResourceType = ('Document' | 'Stylesheet' | 'Image' | 'Media' | 'Font' | 'Script' | 'TextTrack' | 'XHR' | 'Fetch' | 'Prefetch' | 'EventSource' | 'WebSocket' | 'Manifest' | 'SignedExchange' | 'Ping' | 'CSPViolationReport' | 'Preflight' | 'Other'); -type InitiatorType = ('parser' | 'script' | 'preload' | 'SignedExchange' | 'preflight' | 'other'); -type ResourceTiming = Protocol.Network.ResourceTiming; -type CallStack = { - callFrames: Array<{ - scriptId: string; - url: string; - lineNumber: number; - columnNumber: number; - functionName: string; - }>; - parent?: CallStack; -} - -type ParsedURL = { - /** - * Equivalent to a `new URL(url).protocol` BUT w/o the trailing colon (:) - */ - scheme: string; - /** - * Equivalent to a `new URL(url).hostname` - */ - host: string; - securityOrigin: string; -}; -type LightriderStatistics = { - /** - * The difference in networkEndTime between the observed Lighthouse networkEndTime and Lightrider's derived networkEndTime. - */ - endTimeDeltaMs: number; - /** - * The time spent making a TCP connection (connect + SSL). Note: this is poorly named. - */ - TCPMs: number; - /** - * The time spent requesting a resource from a remote server, we use this to approx RTT. Note: this is poorly names, it really should be "server response time". - */ - requestMs: number; - /** - * Time to receive the entire response payload starting the clock on receiving the first fragment (first non-header byte). - */ - responseMs: number; -}; - -export class NetworkRequest { - /** - * Implementation-specifc canoncial data structure that this Lantern NetworkRequest - * was derived from. - * Users of Lantern create a NetworkRequest matching this interface, - * but can store the source-of-truth for their network model in this property. - * This is then accessible as a read-only property on NetworkNode. - */ - rawRequest?: T; - - requestId: string; - connectionId: number; - connectionReused: boolean; - url: string; - protocol: string; - parsedURL: ParsedURL; - documentURL: string; - /** When the renderer process initially discovers a network request, in milliseconds. */ - rendererStartTime: number; - /** - * When the network service is about to handle a request, ie. just before going to the - * HTTP cache or going to the network for DNS/connection setup, in milliseconds. - */ - networkRequestTime: number; - /** - * When the last byte of the response headers is received, in milliseconds. - * Equal to networkRequestTime if no data is recieved over the - * network (ex: cached requests or data urls). - */ - responseHeadersEndTime: number; - /** When the last byte of the response body is received, in milliseconds. */ - networkEndTime: number; - transferSize: number; - resourceSize: number; - fromDiskCache: boolean; - fromMemoryCache: boolean; - isLinkPreload: boolean; - finished: boolean; - failed: boolean; - statusCode: number; - /** The network request that redirected to this one */ - redirectSource: NetworkRequest | undefined; - /** The network request that this one redirected to */ - redirectDestination: NetworkRequest | undefined; - // TODO: can't use Protocol.Network.Initiator because of type mismatch in Lighthouse initiator. - initiator: { - type: InitiatorType; - url?: string; - stack?: CallStack; - }; - initiatorRequest: NetworkRequest | undefined; - /** The chain of network requests that redirected to this one */ - redirects: NetworkRequest[] | undefined; - timing: Protocol.Network.ResourceTiming | undefined; - /** - * Optional value for how long the server took to respond to this request. - * When not provided, the server response time is derived from the timing object. - */ - serverResponseTime?: number; - resourceType: ResourceType | undefined; - mimeType: string; - priority: ResourcePriority; - frameId: string | undefined; - fromWorker: boolean; -} - -interface MetricResult { - timing: number; - timestamp?: never; - optimisticEstimate: Simulation.Result; - pessimisticEstimate: Simulation.Result; - optimisticGraph: Simulation.GraphNode; - pessimisticGraph: Simulation.GraphNode; -} - -export namespace Simulation { - type URL = { - /** URL of the initially requested URL */ - requestedUrl?: string; - /** URL of the last document request */ - mainDocumentUrl?: string; - }; - - type GraphNode = import('../BaseNode.js').Node; - type GraphNetworkNode = import('../NetworkNode.js').NetworkNode; - type GraphCPUNode = import('../CpuNode.js').CPUNode; - type Simulator = import('../simulation/Simulator.js').Simulator; - - interface MetricCoefficients { - intercept: number; - optimistic: number; - pessimistic: number; - } - - /** Simulation settings that control the amount of network & cpu throttling in the run. */ - interface ThrottlingSettings { - /** The round trip time in milliseconds. */ - rttMs?: number; - /** The network throughput in kilobits per second. */ - throughputKbps?: number; - // devtools settings - /** The network request latency in milliseconds. */ - requestLatencyMs?: number; - /** The network download throughput in kilobits per second. */ - downloadThroughputKbps?: number; - /** The network upload throughput in kilobits per second. */ - uploadThroughputKbps?: number; - // used by both - /** The amount of slowdown applied to the cpu (1/). */ - cpuSlowdownMultiplier?: number - } - - interface PrecomputedLanternData { - additionalRttByOrigin: {[origin: string]: number}; - serverResponseTimeByOrigin: {[origin: string]: number}; - } - - interface Settings { - networkAnalysis: { - rtt: number; - additionalRttByOrigin: Map; - serverResponseTimeByOrigin: Map; - throughput: number; - }; - /** The method used to throttle the network. */ - throttlingMethod: 'devtools'|'simulate'|'provided'; - /** The throttling config settings. */ - throttling: Required; - /** Precomputed lantern estimates to use instead of observed analysis. */ - precomputedLanternData?: PrecomputedLanternData | null; - } - - interface Options { - rtt?: number; - throughput?: number; - observedThroughput: number; - maximumConcurrentRequests?: number; - cpuSlowdownMultiplier?: number; - layoutTaskMultiplier?: number; - additionalRttByOrigin?: Map; - serverResponseTimeByOrigin?: Map; - } - - interface NodeTiming { - startTime: number; - endTime: number; - duration: number; - } - - interface Result { - timeInMs: number; - nodeTimings: Map, NodeTiming>; - } - - interface ProcessedNavigation { - timestamps: { - firstContentfulPaint: number; - largestContentfulPaint?: number; - }; - } - - interface MetricComputationDataInput { - simulator: Simulator; - graph: GraphNode; - processedNavigation: ProcessedNavigation; - } -} diff --git a/core/lib/lantern/types/lantern.js b/core/lib/lantern/types/lantern.js deleted file mode 100644 index 2740c5b56c64..000000000000 --- a/core/lib/lantern/types/lantern.js +++ /dev/null @@ -1,7 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -export {}; diff --git a/core/lib/navigation-error.js b/core/lib/navigation-error.js index 6d55d41e8c3f..658160429d9f 100644 --- a/core/lib/navigation-error.js +++ b/core/lib/navigation-error.js @@ -131,7 +131,7 @@ function getNonHtmlError(finalRecord) { function getPageLoadError(navigationError, context) { const {url, networkRecords} = context; /** @type {LH.Artifacts.NetworkRequest|undefined} */ - let mainRecord = Lantern.Simulation.NetworkAnalyzer.findResourceForUrl(networkRecords, url); + let mainRecord = Lantern.Core.NetworkAnalyzer.findResourceForUrl(networkRecords, url); // If the url doesn't give us a network request, it's possible we landed on a chrome-error:// page // In this case, just get the first document request. @@ -149,7 +149,7 @@ function getPageLoadError(navigationError, context) { // MIME Type is only set on the final redirected document request. Use this for the HTML check instead of root. let finalRecord; if (mainRecord) { - finalRecord = Lantern.Simulation.NetworkAnalyzer.resolveRedirects(mainRecord); + finalRecord = Lantern.Core.NetworkAnalyzer.resolveRedirects(mainRecord); } else { // We have no network requests to process, use the navError return navigationError; diff --git a/core/lib/network-recorder.js b/core/lib/network-recorder.js index bbd2f50d9858..0a9787bca387 100644 --- a/core/lib/network-recorder.js +++ b/core/lib/network-recorder.js @@ -253,7 +253,7 @@ class NetworkRecorder extends RequestEventEmitter { return record.redirectSource; } - const initiatorURL = Lantern.PageDependencyGraph.getNetworkInitiators(record)[0]; + const initiatorURL = Lantern.Graph.PageDependencyGraph.getNetworkInitiators(record)[0]; let candidates = recordsByURL.get(initiatorURL) || []; // The (valid) initiator must come before the initiated request. candidates = candidates.filter(c => { diff --git a/core/lib/network-request.js b/core/lib/network-request.js index 1b971691e82e..e6373feeff52 100644 --- a/core/lib/network-request.js +++ b/core/lib/network-request.js @@ -577,7 +577,7 @@ class NetworkRequest { /** * @param {NetworkRequest} record - * @return {Lantern.NetworkRequest} + * @return {Lantern.Types.NetworkRequest} */ static asLanternNetworkRequest(record) { // In LR, network records are missing connection timing, but we've smuggled it in via headers. @@ -616,7 +616,7 @@ class NetworkRequest { } /** - * @param {NetworkRequest} record + * @param {Pick} record * @return {boolean} */ static isNonNetworkRequest(record) { diff --git a/core/scripts/lantern/run-once.js b/core/scripts/lantern/run-once.js index 3f0bc767b001..de3848ecfee1 100755 --- a/core/scripts/lantern/run-once.js +++ b/core/scripts/lantern/run-once.js @@ -43,7 +43,7 @@ async function run() { // Dump the TTI graph with simulated timings to a trace if LANTERN_DEBUG is enabled const pessimisticTTINodeTimings = - Lantern.Simulation.Simulator.ALL_NODE_TIMINGS.get('pessimisticInteractive'); + Lantern.Simulation.Simulator.allNodeTimings.get('pessimisticInteractive'); if (process.env.LANTERN_DEBUG && pessimisticTTINodeTimings) { const outputTraceFile = path.basename(tracePath).replace(/.trace.json$/, '.lantern.trace.json'); const outputTracePath = path.join(LH_ROOT, '.tmp', outputTraceFile); diff --git a/core/test/audits/byte-efficiency/render-blocking-resources-test.js b/core/test/audits/byte-efficiency/render-blocking-resources-test.js index f83e73c76bb7..e952c6f89e76 100644 --- a/core/test/audits/byte-efficiency/render-blocking-resources-test.js +++ b/core/test/audits/byte-efficiency/render-blocking-resources-test.js @@ -12,7 +12,7 @@ import * as Lantern from '../../../lib/lantern/lantern.js'; import {NetworkRequest} from '../../../lib/network-request.js'; import {getURLArtifactFromDevtoolsLog, readJson} from '../../test-utils.js'; -const {NetworkNode, CPUNode} = Lantern; +const {NetworkNode, CPUNode} = Lantern.Graph; const {Simulator} = Lantern.Simulation; const trace = readJson('../../fixtures/artifacts/render-blocking/trace.json', import.meta); diff --git a/core/test/computed/load-simulator-test.js b/core/test/computed/load-simulator-test.js index d2e1a2ecd574..a6a46f2224a1 100644 --- a/core/test/computed/load-simulator-test.js +++ b/core/test/computed/load-simulator-test.js @@ -19,7 +19,7 @@ function createNetworkNode() { protocol: 'http', parsedURL: {scheme: 'http', securityOrigin: 'https://pwa.rocks'}, }; - return new Lantern.NetworkNode(NetworkRequest.asLanternNetworkRequest(record)); + return new Lantern.Graph.NetworkNode(NetworkRequest.asLanternNetworkRequest(record)); } describe('Simulator artifact', () => { diff --git a/core/test/computed/page-dependency-graph-test.js b/core/test/computed/page-dependency-graph-test.js index 1bc719c3c568..0b3c9911f0b4 100644 --- a/core/test/computed/page-dependency-graph-test.js +++ b/core/test/computed/page-dependency-graph-test.js @@ -22,7 +22,7 @@ describe('PageDependencyGraph computed artifact', () => { devtoolsLog: sampleDevtoolsLog, URL: getURLArtifactFromDevtoolsLog(sampleDevtoolsLog), }, context); - assert.ok(output instanceof Lantern.BaseNode, 'did not return a graph'); + assert.ok(output instanceof Lantern.Graph.BaseNode, 'did not return a graph'); const dependents = output.getDependents(); const nodeWithNestedDependents = dependents.find(node => node.getDependents().length); assert.ok(nodeWithNestedDependents, 'did not link initiators'); diff --git a/core/test/scripts/run-mocha-tests.js b/core/test/scripts/run-mocha-tests.js index d270a6f74ba0..7f0a88e2f20a 100644 --- a/core/test/scripts/run-mocha-tests.js +++ b/core/test/scripts/run-mocha-tests.js @@ -171,7 +171,6 @@ const defaultTestMatches = [ 'cli/**/*-test.js', 'core/**/*-test.js', 'core/test/**/*-test-pptr.js', - 'core/lib/lantern/**/*.test.js', 'report/**/*-test.js', 'shared/**/*-test.js', 'third-party/**/*-test.js', diff --git a/package.json b/package.json index ef270abb2067..51e57e75424b 100644 --- a/package.json +++ b/package.json @@ -181,7 +181,7 @@ "webtreemap-cdt": "^3.2.1" }, "dependencies": { - "@paulirish/trace_engine": "^0.0.23", + "@paulirish/trace_engine": "0.0.28", "@sentry/node": "^6.17.4", "axe-core": "^4.9.1", "chrome-launcher": "^1.1.2", diff --git a/tsconfig.json b/tsconfig.json index 2ec64deb58c7..2566e5a6b85d 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -17,7 +17,6 @@ "clients/**/*.js", "build/**/*.js", "./types/**/*.d.ts", - "./core/lib/lantern/types/**/*.d.ts", "eslint-local-rules.cjs", "third-party/axe/valid-langs.js", "third-party/esbuild-plugins-polyfills/esbuild-polyfills.js", @@ -67,15 +66,6 @@ "core/test/lib/emulation-test.js", "core/test/lib/i18n/i18n-test.js", "core/test/lib/icons-test.js", - // TODO(15841): remove when importing Lantern from npm - "core/lib/lantern/BaseNode.test.js", - "core/lib/lantern/metrics/*.test.js", - "core/lib/lantern/PageDependencyGraph.test.js", - "core/lib/lantern/simulation/ConnectionPool.test.js", - "core/lib/lantern/simulation/DNSCache.test.js", - "core/lib/lantern/simulation/NetworkAnalyzer.test.js", - "core/lib/lantern/simulation/Simulator.test.js", - // ------ done TODO "core/test/lib/lh-element-test.js", "core/test/lib/lighthouse-compatibility-test.js", "core/test/lib/manifest-parser-test.js", @@ -112,6 +102,5 @@ "core/test/computed/metrics/interactive-test.js", "core/test/computed/tbt-impact-tasks-test.js", "core/test/fixtures/config-plugins/lighthouse-plugin-simple/plugin-simple.js", - "core/lib/lantern/metrics/MetricTestUtils.js", ], } diff --git a/types/artifacts.d.ts b/types/artifacts.d.ts index 2cbb827ef134..1dd3faddb62d 100644 --- a/types/artifacts.d.ts +++ b/types/artifacts.d.ts @@ -565,7 +565,7 @@ declare module Artifacts { trace: Trace; settings: Audit.Context['settings']; gatherContext: Artifacts['GatherContext']; - simulator?: InstanceType; + simulator?: Gatherer.Simulation.Simulator; URL: Artifacts['URL']; } @@ -591,7 +591,7 @@ declare module Artifacts { throughput: number; } - type LanternMetric = Lantern.Metrics.Result; + type LanternMetric = Lantern.Metrics.MetricResult; type Speedline = speedline.Output<'speedIndex'>; diff --git a/types/gatherer.d.ts b/types/gatherer.d.ts index 5d818a1062ce..8f1a382a25d9 100644 --- a/types/gatherer.d.ts +++ b/types/gatherer.d.ts @@ -133,12 +133,11 @@ declare module Gatherer { type AnyGathererInstance = GathererInstanceExpander namespace Simulation { - type GraphNode = Lantern.Simulation.GraphNode; - type GraphNetworkNode = Lantern.Simulation.GraphNetworkNode; - type GraphCPUNode = Lantern.Simulation.GraphCPUNode; + type GraphNode = Lantern.Graph.Node; + type GraphNetworkNode = Lantern.Graph.NetworkNode; + type GraphCPUNode = Lantern.Graph.CPUNode; type Simulator = Lantern.Simulation.Simulator; - type NodeTiming = Lantern.Simulation.NodeTiming; - type MetricCoefficients = Lantern.Simulation.MetricCoefficients; + type NodeTiming = Lantern.Types.Simulation.NodeTiming; type Result = Lantern.Simulation.Result; } } diff --git a/yarn.lock b/yarn.lock index 2c7df8f5a7fd..615fd1ff34f6 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1080,10 +1080,10 @@ "@nodelib/fs.scandir" "2.1.3" fastq "^1.6.0" -"@paulirish/trace_engine@^0.0.23": - version "0.0.23" - resolved "https://registry.yarnpkg.com/@paulirish/trace_engine/-/trace_engine-0.0.23.tgz#b3eec22421ee562837b371ddcd4659483837ec92" - integrity sha512-2ym/q7HhC5K+akXkNV6Gip3oaHpbI6TsGjmcAsl7bcJ528MVbacPQeoauLFEeLXH4ulJvsxQwNDIg/kAEhFZxw== +"@paulirish/trace_engine@0.0.28": + version "0.0.28" + resolved "https://registry.yarnpkg.com/@paulirish/trace_engine/-/trace_engine-0.0.28.tgz#9c7163c68fc697a3c39638248aa8f143eb188927" + integrity sha512-hxeqhny/jggy3y3yEUE/91UQmNREjoebaYlggWNTtX1ZFdIXJ4ea8MSMlO+dc1oWcnKHGQzdU3+Mhl01ZEuU0w== "@protobufjs/aspromise@^1.1.1", "@protobufjs/aspromise@^1.1.2": version "1.1.2"