diff --git a/packages/react-dom/src/__tests__/ReactDOMFizzServer-test.js b/packages/react-dom/src/__tests__/ReactDOMFizzServer-test.js index 3a7d7205dcb27..9021c23c6ce74 100644 --- a/packages/react-dom/src/__tests__/ReactDOMFizzServer-test.js +++ b/packages/react-dom/src/__tests__/ReactDOMFizzServer-test.js @@ -336,7 +336,6 @@ describe('ReactDOMFizzServer', () => { writable.write(chunk, encoding, next); }; - writable.write('
'); await act(async () => { const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable( }> @@ -346,13 +345,17 @@ describe('ReactDOMFizzServer', () => {
, writableA, - {identifierPrefix: 'A_'}, + { + identifierPrefix: 'A_', + onReadyToStream() { + writableA.write('
'); + startWriting(); + writableA.write('
'); + }, + }, ); - startWriting(); }); - writable.write(''); - writable.write('
'); await act(async () => { const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable( }> @@ -362,11 +365,16 @@ describe('ReactDOMFizzServer', () => {
, writableB, - {identifierPrefix: 'B_'}, + { + identifierPrefix: 'B_', + onReadyToStream() { + writableB.write('
'); + startWriting(); + writableB.write('
'); + }, + }, ); - startWriting(); }); - writable.write(''); expect(getVisibleChildren(container)).toEqual([
Loading A...
, diff --git a/packages/react-dom/src/__tests__/ReactDOMFizzServerBrowser-test.js b/packages/react-dom/src/__tests__/ReactDOMFizzServerBrowser-test.js index 0167103db57cf..b55bb446f6fce 100644 --- a/packages/react-dom/src/__tests__/ReactDOMFizzServerBrowser-test.js +++ b/packages/react-dom/src/__tests__/ReactDOMFizzServerBrowser-test.js @@ -58,12 +58,56 @@ describe('ReactDOMFizzServer', () => { expect(result).toBe('
hello world
'); }); + // @gate experimental + it('emits all HTML as one unit if we wait until the end to start', async () => { + let hasLoaded = false; + let resolve; + const promise = new Promise(r => (resolve = r)); + function Wait() { + if (!hasLoaded) { + throw promise; + } + return 'Done'; + } + let isComplete = false; + const stream = ReactDOMFizzServer.renderToReadableStream( +
+ + + +
, + { + onCompleteAll() { + isComplete = true; + }, + }, + ); + await jest.runAllTimers(); + expect(isComplete).toBe(false); + // Resolve the loading. + hasLoaded = true; + await resolve(); + + await jest.runAllTimers(); + + expect(isComplete).toBe(true); + + const result = await readResult(stream); + expect(result).toBe('
Done
'); + }); + // @gate experimental it('should error the stream when an error is thrown at the root', async () => { + const reportedErrors = []; const stream = ReactDOMFizzServer.renderToReadableStream(
, + { + onError(x) { + reportedErrors.push(x); + }, + }, ); let caughtError = null; @@ -75,16 +119,23 @@ describe('ReactDOMFizzServer', () => { } expect(caughtError).toBe(theError); expect(result).toBe(''); + expect(reportedErrors).toEqual([theError]); }); // @gate experimental it('should error the stream when an error is thrown inside a fallback', async () => { + const reportedErrors = []; const stream = ReactDOMFizzServer.renderToReadableStream(
}>
, + { + onError(x) { + reportedErrors.push(x); + }, + }, ); let caughtError = null; @@ -96,20 +147,28 @@ describe('ReactDOMFizzServer', () => { } expect(caughtError).toBe(theError); expect(result).toBe(''); + expect(reportedErrors).toEqual([theError]); }); // @gate experimental it('should not error the stream when an error is thrown inside suspense boundary', async () => { + const reportedErrors = []; const stream = ReactDOMFizzServer.renderToReadableStream(
Loading
}> , + { + onError(x) { + reportedErrors.push(x); + }, + }, ); const result = await readResult(stream); expect(result).toContain('Loading'); + expect(reportedErrors).toEqual([theError]); }); // @gate experimental diff --git a/packages/react-dom/src/__tests__/ReactDOMFizzServerNode-test.js b/packages/react-dom/src/__tests__/ReactDOMFizzServerNode-test.js index a7ea970bece51..5377607a5795b 100644 --- a/packages/react-dom/src/__tests__/ReactDOMFizzServerNode-test.js +++ b/packages/react-dom/src/__tests__/ReactDOMFizzServerNode-test.js @@ -86,14 +86,68 @@ describe('ReactDOMFizzServer', () => { ); }); + // @gate experimental + it('emits all HTML as one unit if we wait until the end to start', async () => { + let hasLoaded = false; + let resolve; + const promise = new Promise(r => (resolve = r)); + function Wait() { + if (!hasLoaded) { + throw promise; + } + return 'Done'; + } + let isComplete = false; + const {writable, output} = getTestWritable(); + const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable( +
+ + + +
, + writable, + { + onCompleteAll() { + isComplete = true; + }, + }, + ); + await jest.runAllTimers(); + expect(output.result).toBe(''); + expect(isComplete).toBe(false); + // Resolve the loading. + hasLoaded = true; + await resolve(); + + await jest.runAllTimers(); + + expect(output.result).toBe(''); + expect(isComplete).toBe(true); + + // First we write our header. + output.result += + 'test'; + // Then React starts writing. + startWriting(); + expect(output.result).toBe( + 'test
Done
', + ); + }); + // @gate experimental it('should error the stream when an error is thrown at the root', async () => { + const reportedErrors = []; const {writable, output, completed} = getTestWritable(); ReactDOMFizzServer.pipeToNodeWritable(
, writable, + { + onError(x) { + reportedErrors.push(x); + }, + }, ); // The stream is errored even if we haven't started writing. @@ -102,10 +156,13 @@ describe('ReactDOMFizzServer', () => { expect(output.error).toBe(theError); expect(output.result).toBe(''); + // This type of error is reported to the error callback too. + expect(reportedErrors).toEqual([theError]); }); // @gate experimental it('should error the stream when an error is thrown inside a fallback', async () => { + const reportedErrors = []; const {writable, output, completed} = getTestWritable(); const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable(
@@ -114,6 +171,11 @@ describe('ReactDOMFizzServer', () => {
, writable, + { + onError(x) { + reportedErrors.push(x); + }, + }, ); startWriting(); @@ -121,10 +183,12 @@ describe('ReactDOMFizzServer', () => { expect(output.error).toBe(theError); expect(output.result).toBe(''); + expect(reportedErrors).toEqual([theError]); }); // @gate experimental it('should not error the stream when an error is thrown inside suspense boundary', async () => { + const reportedErrors = []; const {writable, output, completed} = getTestWritable(); const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable(
@@ -133,6 +197,11 @@ describe('ReactDOMFizzServer', () => {
, writable, + { + onError(x) { + reportedErrors.push(x); + }, + }, ); startWriting(); @@ -140,6 +209,8 @@ describe('ReactDOMFizzServer', () => { expect(output.error).toBe(undefined); expect(output.result).toContain('Loading'); + // While no error is reported to the stream, the error is reported to the callback. + expect(reportedErrors).toEqual([theError]); }); // @gate experimental diff --git a/packages/react-dom/src/server/ReactDOMFizzServerBrowser.js b/packages/react-dom/src/server/ReactDOMFizzServerBrowser.js index 902989fefca0f..95a58f0688dd8 100644 --- a/packages/react-dom/src/server/ReactDOMFizzServerBrowser.js +++ b/packages/react-dom/src/server/ReactDOMFizzServerBrowser.js @@ -22,6 +22,9 @@ type Options = { identifierPrefix?: string, progressiveChunkSize?: number, signal?: AbortSignal, + onReadyToStream?: () => void, + onCompleteAll?: () => void, + onError?: (error: mixed) => void, }; function renderToReadableStream( @@ -37,21 +40,31 @@ function renderToReadableStream( }; signal.addEventListener('abort', listener); } - return new ReadableStream({ + const stream = new ReadableStream({ start(controller) { request = createRequest( children, controller, createResponseState(options ? options.identifierPrefix : undefined), options ? options.progressiveChunkSize : undefined, + options ? options.onError : undefined, + options ? options.onCompleteAll : undefined, + options ? options.onReadyToStream : undefined, ); startWork(request); }, pull(controller) { - startFlowing(request); + // Pull is called immediately even if the stream is not passed to anything. + // That's buffering too early. We want to start buffering once the stream + // is actually used by something so we can give it the best result possible + // at that point. + if (stream.locked) { + startFlowing(request); + } }, cancel(reason) {}, }); + return stream; } export {renderToReadableStream}; diff --git a/packages/react-dom/src/server/ReactDOMFizzServerNode.js b/packages/react-dom/src/server/ReactDOMFizzServerNode.js index 8de76f7095f9d..51a091948d4b1 100644 --- a/packages/react-dom/src/server/ReactDOMFizzServerNode.js +++ b/packages/react-dom/src/server/ReactDOMFizzServerNode.js @@ -26,6 +26,9 @@ function createDrainHandler(destination, request) { type Options = { identifierPrefix?: string, progressiveChunkSize?: number, + onReadyToStream?: () => void, + onCompleteAll?: () => void, + onError?: (error: mixed) => void, }; type Controls = { @@ -44,6 +47,9 @@ function pipeToNodeWritable( destination, createResponseState(options ? options.identifierPrefix : undefined), options ? options.progressiveChunkSize : undefined, + options ? options.onError : undefined, + options ? options.onCompleteAll : undefined, + options ? options.onReadyToStream : undefined, ); let hasStartedFlowing = false; startWork(request); diff --git a/packages/react-noop-renderer/src/ReactNoopServer.js b/packages/react-noop-renderer/src/ReactNoopServer.js index d2a4bf032933c..feec884743efc 100644 --- a/packages/react-noop-renderer/src/ReactNoopServer.js +++ b/packages/react-noop-renderer/src/ReactNoopServer.js @@ -217,6 +217,9 @@ const ReactNoopServer = ReactFizzServer({ type Options = { progressiveChunkSize?: number, + onReadyToStream?: () => void, + onCompleteAll?: () => void, + onError?: (error: mixed) => void, }; function render(children: React$Element, options?: Options): Destination { @@ -234,6 +237,9 @@ function render(children: React$Element, options?: Options): Destination { destination, null, options ? options.progressiveChunkSize : undefined, + options ? options.onError : undefined, + options ? options.onCompleteAll : undefined, + options ? options.onReadyToStream : undefined, ); ReactNoopServer.startWork(request); ReactNoopServer.startFlowing(request); diff --git a/packages/react-server/src/ReactFizzServer.js b/packages/react-server/src/ReactFizzServer.js index a52079c1587ce..b936376e85eae 100644 --- a/packages/react-server/src/ReactFizzServer.js +++ b/packages/react-server/src/ReactFizzServer.js @@ -110,6 +110,15 @@ type Request = { clientRenderedBoundaries: Array, // Errored or client rendered but not yet flushed. completedBoundaries: Array, // Completed but not yet fully flushed boundaries to show. partialBoundaries: Array, // Partially completed boundaries that can flush its segments early. + // onError is called when an error happens anywhere in the tree. It might recover. + onError: (error: mixed) => void, + // onCompleteAll is called when all pending work is done but it may not have flushed yet. + // This is a good time to start writing if you want only HTML and no intermediate steps. + onCompleteAll: () => void, + // onReadyToStream is called when there is at least a root fallback ready to show. + // Typically you don't need this callback because it's best practice to always have a + // root fallback ready so there's no need to wait. + onReadyToStream: () => void, }; // This is a default heuristic for how to split up the HTML content into progressive @@ -134,6 +143,9 @@ export function createRequest( destination: Destination, responseState: ResponseState, progressiveChunkSize: number = DEFAULT_PROGRESSIVE_CHUNK_SIZE, + onError: (error: mixed) => void = noop, + onCompleteAll: () => void = noop, + onReadyToStream: () => void = noop, ): Request { const pingedWork = []; const abortSet: Set = new Set(); @@ -151,6 +163,9 @@ export function createRequest( clientRenderedBoundaries: [], completedBoundaries: [], partialBoundaries: [], + onError, + onCompleteAll, + onReadyToStream, }; // This segment represents the root fallback. const rootSegment = createPendingSegment(request, 0, null); @@ -235,7 +250,9 @@ function createPendingSegment( } function reportError(request: Request, error: mixed): void { - // TODO: Report errors on the server. + // If this callback errors, we intentionally let that error bubble up to become a fatal error + // so that someone fixes the error reporting instead of hiding it. + request.onError(error); } function fatalError(request: Request, error: mixed): void { @@ -389,28 +406,31 @@ function erroredWork( segment: Segment, error: mixed, ) { - request.allPendingWork--; - if (boundary !== null) { - boundary.pendingWork--; - } - // Report the error to a global handler. reportError(request, error); if (boundary === null) { fatalError(request, error); - } else if (!boundary.forceClientRender) { - boundary.forceClientRender = true; - - // Regardless of what happens next, this boundary won't be displayed, - // so we can flush it, if the parent already flushed. - if (boundary.parentFlushed) { - // We don't have a preference where in the queue this goes since it's likely - // to error on the client anyway. However, intentionally client-rendered - // boundaries should be flushed earlier so that they can start on the client. - // We reuse the same queue for errors. - request.clientRenderedBoundaries.push(boundary); + } else { + boundary.pendingWork--; + if (!boundary.forceClientRender) { + boundary.forceClientRender = true; + + // Regardless of what happens next, this boundary won't be displayed, + // so we can flush it, if the parent already flushed. + if (boundary.parentFlushed) { + // We don't have a preference where in the queue this goes since it's likely + // to error on the client anyway. However, intentionally client-rendered + // boundaries should be flushed earlier so that they can start on the client. + // We reuse the same queue for errors. + request.clientRenderedBoundaries.push(boundary); + } } } + + request.allPendingWork--; + if (request.allPendingWork === 0) { + request.onCompleteAll(); + } } function abortWorkSoft(suspendedWork: SuspendedWork): void { @@ -454,6 +474,10 @@ function abortWork(suspendedWork: SuspendedWork): void { request.clientRenderedBoundaries.push(boundary); } } + + if (request.allPendingWork === 0) { + request.onCompleteAll(); + } } } @@ -462,10 +486,7 @@ function finishedWork( boundary: Root | SuspenseBoundary, segment: Segment, ) { - request.allPendingWork--; - if (boundary === null) { - request.pendingRootWork--; if (segment.parentFlushed) { invariant( request.completedRootSegment === null, @@ -473,43 +494,51 @@ function finishedWork( ); request.completedRootSegment = segment; } - return; - } - - boundary.pendingWork--; - if (boundary.forceClientRender) { - // This already errored. - return; - } - if (boundary.pendingWork === 0) { - // This must have been the last segment we were waiting on. This boundary is now complete. - // We can now cancel any pending work on the fallback since we won't need to show it anymore. - boundary.fallbackAbortableWork.forEach(abortWorkSoft, request); - boundary.fallbackAbortableWork.clear(); - if (segment.parentFlushed) { - // Our parent segment already flushed, so we need to schedule this segment to be emitted. - boundary.completedSegments.push(segment); - } - if (boundary.parentFlushed) { - // The segment might be part of a segment that didn't flush yet, but if the boundary's - // parent flushed, we need to schedule the boundary to be emitted. - request.completedBoundaries.push(boundary); + request.pendingRootWork--; + if (request.pendingRootWork === 0) { + request.onReadyToStream(); } } else { - if (segment.parentFlushed) { - // Our parent already flushed, so we need to schedule this segment to be emitted. - const completedSegments = boundary.completedSegments; - completedSegments.push(segment); - if (completedSegments.length === 1) { - // This is the first time since we last flushed that we completed anything. - // We can schedule this boundary to emit its partially completed segments early - // in case the parent has already been flushed. - if (boundary.parentFlushed) { - request.partialBoundaries.push(boundary); + boundary.pendingWork--; + if (boundary.forceClientRender) { + // This already errored. + } else if (boundary.pendingWork === 0) { + // This must have been the last segment we were waiting on. This boundary is now complete. + // We can now cancel any pending work on the fallback since we won't need to show it anymore. + boundary.fallbackAbortableWork.forEach(abortWorkSoft, request); + boundary.fallbackAbortableWork.clear(); + if (segment.parentFlushed) { + // Our parent segment already flushed, so we need to schedule this segment to be emitted. + boundary.completedSegments.push(segment); + } + if (boundary.parentFlushed) { + // The segment might be part of a segment that didn't flush yet, but if the boundary's + // parent flushed, we need to schedule the boundary to be emitted. + request.completedBoundaries.push(boundary); + } + } else { + if (segment.parentFlushed) { + // Our parent already flushed, so we need to schedule this segment to be emitted. + const completedSegments = boundary.completedSegments; + completedSegments.push(segment); + if (completedSegments.length === 1) { + // This is the first time since we last flushed that we completed anything. + // We can schedule this boundary to emit its partially completed segments early + // in case the parent has already been flushed. + if (boundary.parentFlushed) { + request.partialBoundaries.push(boundary); + } } } } } + + request.allPendingWork--; + if (request.allPendingWork === 0) { + // This needs to be called at the very end so that we can synchronously write the result + // in the callback if needed. + request.onCompleteAll(); + } } function retryWork(request: Request, work: SuspendedWork): void { @@ -573,6 +602,7 @@ function performWork(request: Request): void { flushCompletedQueues(request); } } catch (error) { + reportError(request, error); fatalError(request, error); } finally { ReactCurrentDispatcher.current = prevDispatcher; @@ -920,6 +950,7 @@ export function startFlowing(request: Request): void { try { flushCompletedQueues(request); } catch (error) { + reportError(request, error); fatalError(request, error); } } @@ -934,6 +965,7 @@ export function abort(request: Request): void { flushCompletedQueues(request); } } catch (error) { + reportError(request, error); fatalError(request, error); } }