[Flight] Fix detached ArrayBuffer error when streaming typed arrays (#34849)

Using `renderToReadableStream` in Node.js with binary data from
`fs.readFileSync` (or `Buffer.allocUnsafe`) could cause downstream
consumers (like compression middleware) to fail with "Cannot perform
Construct on a detached ArrayBuffer".

The issue occurs because Node.js uses an 8192-byte Buffer pool for small
allocations (< 4KB). When React's `VIEW_SIZE` was 2KB, files between
~2KB and 4KB would be passed through as views of pooled buffers rather
than copied into `currentView`. ByteStreams (`type: 'bytes'`) detach
ArrayBuffers during transfer, which corrupts the shared Buffer pool and
causes subsequent Buffer operations to fail.

Increasing `VIEW_SIZE` from 2KB to 4KB ensures all chunks smaller than
4KB are copied into `currentView` (which uses a dedicated 4KB buffer
outside the pool), while chunks 4KB or larger don't use the pool anyway.
Thus no pooled buffers are ever exposed to ByteStream detachment.

This adds 2KB memory per active stream, copies chunks in the 2-4KB range
instead of passing them as views (small CPU cost), and buffers up to 2KB
more data before flushing. However, it avoids duplicating large binary
data (which copying everything would require, like the Edge entry point
currently does in `typedArrayToBinaryChunk`).

Related issues:

- https://github.com/vercel/next.js/issues/84753
- https://github.com/vercel/next.js/issues/84858
This commit is contained in:
Hendrik Liebau
2025-10-17 22:13:52 +02:00
committed by GitHub
parent c35f6a3041
commit dc485c7303
3 changed files with 47 additions and 13 deletions

View File

@@ -10,11 +10,11 @@
'use strict';
import fs from 'fs';
import os from 'os';
import path from 'path';
import {patchSetImmediate} from '../../../../scripts/jest/patchSetImmediate';
global.ReadableStream =
require('web-streams-polyfill/ponyfill/es6').ReadableStream;
let clientExports;
let webpackMap;
let webpackModules;
@@ -1136,4 +1136,37 @@ describe('ReactFlightDOMNode', () => {
'Switched to client rendering because the server rendering errored:\n\nssr-throw',
);
});
// This is a regression test for a specific issue where byte Web Streams are
// detaching ArrayBuffers, which caused downstream issues (e.g. "Cannot
// perform Construct on a detached ArrayBuffer") for chunks that are using
// Node's internal Buffer pool.
it('should not corrupt the Node.js Buffer pool by detaching ArrayBuffers when using Web Streams', async () => {
// Create a temp file smaller than 4KB to ensure it uses the Buffer pool.
const file = path.join(os.tmpdir(), 'test.bin');
fs.writeFileSync(file, Buffer.alloc(4095));
const fileChunk = fs.readFileSync(file);
fs.unlinkSync(file);
// Verify this chunk uses the Buffer pool (8192 bytes for files < 4KB).
expect(fileChunk.buffer.byteLength).toBe(8192);
const readable = await serverAct(() =>
ReactServerDOMServer.renderToReadableStream(fileChunk, webpackMap),
);
// Create a Web Streams WritableStream that tries to use Buffer operations.
const writable = new WritableStream({
write(chunk) {
// Only write one byte to ensure Node.js is not creating a new Buffer
// pool. Typically, library code (e.g. a compression middleware) would
// call Buffer.from(chunk) or similar, instead of allocating a new
// Buffer directly. With that, the test file could only be ~2600 bytes.
Buffer.allocUnsafe(1);
},
});
// Must not throw an error.
await readable.pipeTo(writable);
});
});

View File

@@ -37,7 +37,11 @@ export function flushBuffered(destination: Destination) {
// transform streams. https://github.com/whatwg/streams/issues/960
}
const VIEW_SIZE = 2048;
// Chunks larger than VIEW_SIZE are written directly, without copying into the
// internal view buffer. This must be at least half of Node's internal Buffer
// pool size (8192) to avoid corrupting the pool when using
// renderToReadableStream, which uses a byte stream that detaches ArrayBuffers.
const VIEW_SIZE = 4096;
let currentView = null;
let writtenBytes = 0;
@@ -147,14 +151,7 @@ export function typedArrayToBinaryChunk(
// If we passed through this straight to enqueue we wouldn't have to convert it but since
// we need to copy the buffer in that case, we need to convert it to copy it.
// When we copy it into another array using set() it needs to be a Uint8Array.
const buffer = new Uint8Array(
content.buffer,
content.byteOffset,
content.byteLength,
);
// We clone large chunks so that we can transfer them when we write them.
// Others get copied into the target buffer.
return content.byteLength > VIEW_SIZE ? buffer.slice() : buffer;
return new Uint8Array(content.buffer, content.byteOffset, content.byteLength);
}
export function byteLengthOfChunk(chunk: Chunk | PrecomputedChunk): number {

View File

@@ -38,7 +38,11 @@ export function flushBuffered(destination: Destination) {
}
}
const VIEW_SIZE = 2048;
// Chunks larger than VIEW_SIZE are written directly, without copying into the
// internal view buffer. This must be at least half of Node's internal Buffer
// pool size (8192) to avoid corrupting the pool when using
// renderToReadableStream, which uses a byte stream that detaches ArrayBuffers.
const VIEW_SIZE = 4096;
let currentView = null;
let writtenBytes = 0;
let destinationHasCapacity = true;