Skip to content

Commit

Permalink
http: add maximum chunk extension size
Browse files Browse the repository at this point in the history
PR-URL: nodejs-private/node-private#518
Fixes: https://hackerone.com/reports/2233486
Reviewed-By: Matteo Collina <[email protected]>
Reviewed-By: Marco Ippolito <[email protected]>
Reviewed-By: Rafael Gonzaga <[email protected]>
CVE-ID: CVE-2024-22019
  • Loading branch information
ShogunPanda authored and marco-ippolito committed Feb 13, 2024
1 parent ef3eea2 commit 6155a1f
Show file tree
Hide file tree
Showing 4 changed files with 171 additions and 4 deletions.
12 changes: 12 additions & 0 deletions doc/api/errors.md
Original file line number Diff line number Diff line change
Expand Up @@ -3124,6 +3124,18 @@ malconfigured clients, if more than 8 KiB of HTTP header data is received then
HTTP parsing will abort without a request or response object being created, and
an `Error` with this code will be emitted.

<a id="HPE_CHUNK_EXTENSIONS_OVERFLOW"></a>

### `HPE_CHUNK_EXTENSIONS_OVERFLOW`

<!-- YAML
added: REPLACEME
-->

Too much data was received for a chunk extensions. In order to protect against
malicious or malconfigured clients, if more than 16 KiB of data is received
then an `Error` with this code will be emitted.

<a id="HPE_UNEXPECTED_CONTENT_LENGTH"></a>

### `HPE_UNEXPECTED_CONTENT_LENGTH`
Expand Down
8 changes: 8 additions & 0 deletions lib/_http_server.js
Original file line number Diff line number Diff line change
Expand Up @@ -857,6 +857,11 @@ const requestHeaderFieldsTooLargeResponse = Buffer.from(
'Connection: close\r\n\r\n', 'ascii',
);

const requestChunkExtensionsTooLargeResponse = Buffer.from(
`HTTP/1.1 413 ${STATUS_CODES[413]}\r\n` +
'Connection: close\r\n\r\n', 'ascii',
);

function socketOnError(e) {
// Ignore further errors
this.removeListener('error', socketOnError);
Expand All @@ -877,6 +882,9 @@ function socketOnError(e) {
case 'HPE_HEADER_OVERFLOW':
response = requestHeaderFieldsTooLargeResponse;
break;
case 'HPE_CHUNK_EXTENSIONS_OVERFLOW':
response = requestChunkExtensionsTooLargeResponse;
break;
case 'ERR_HTTP_REQUEST_TIMEOUT':
response = requestTimeoutResponse;
break;
Expand Down
24 changes: 20 additions & 4 deletions src/node_http_parser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ const uint32_t kOnExecute = 5;
const uint32_t kOnTimeout = 6;
// Any more fields than this will be flushed into JS
const size_t kMaxHeaderFieldsCount = 32;
// Maximum size of chunk extensions
const size_t kMaxChunkExtensionsSize = 16384;

const uint32_t kLenientNone = 0;
const uint32_t kLenientHeaders = 1 << 0;
Expand Down Expand Up @@ -271,6 +273,7 @@ class Parser : public AsyncWrap, public StreamListener {

num_fields_ = num_values_ = 0;
headers_completed_ = false;
chunk_extensions_nread_ = 0;
last_message_start_ = uv_hrtime();
url_.Reset();
status_message_.Reset();
Expand Down Expand Up @@ -526,9 +529,22 @@ class Parser : public AsyncWrap, public StreamListener {
return 0;
}

// Reset nread for the next chunk
int on_chunk_extension(const char* at, size_t length) {
chunk_extensions_nread_ += length;

if (chunk_extensions_nread_ > kMaxChunkExtensionsSize) {
llhttp_set_error_reason(&parser_,
"HPE_CHUNK_EXTENSIONS_OVERFLOW:Chunk extensions overflow");
return HPE_USER;
}

return 0;
}

// Reset nread for the next chunk and also reset the extensions counter
int on_chunk_header() {
header_nread_ = 0;
chunk_extensions_nread_ = 0;
return 0;
}

Expand Down Expand Up @@ -1017,6 +1033,7 @@ class Parser : public AsyncWrap, public StreamListener {
bool headers_completed_ = false;
bool pending_pause_ = false;
uint64_t header_nread_ = 0;
uint64_t chunk_extensions_nread_ = 0;
uint64_t max_http_header_size_;
uint64_t last_message_start_;
ConnectionsList* connectionsList_;
Expand Down Expand Up @@ -1195,10 +1212,9 @@ const llhttp_settings_t Parser::settings = {
Proxy<DataCall, &Parser::on_header_value>::Raw,

// on_chunk_extension_name
nullptr,
Proxy<DataCall, &Parser::on_chunk_extension>::Raw,
// on_chunk_extension_value
nullptr,

Proxy<DataCall, &Parser::on_chunk_extension>::Raw,
Proxy<Call, &Parser::on_headers_complete>::Raw,
Proxy<DataCall, &Parser::on_body>::Raw,
Proxy<Call, &Parser::on_message_complete>::Raw,
Expand Down
131 changes: 131 additions & 0 deletions test/parallel/test-http-chunk-extensions-limit.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
'use strict';

const common = require('../common');
const http = require('http');
const net = require('net');
const assert = require('assert');

// Verify that chunk extensions are limited in size when sent all together.
{
const server = http.createServer((req, res) => {
req.on('end', () => {
res.writeHead(200, { 'Content-Type': 'text/plain' });
res.end('bye');
});

req.resume();
});

server.listen(0, () => {
const sock = net.connect(server.address().port);
let data = '';

sock.on('data', (chunk) => data += chunk.toString('utf-8'));

sock.on('end', common.mustCall(function() {
assert.strictEqual(data, 'HTTP/1.1 413 Payload Too Large\r\nConnection: close\r\n\r\n');
server.close();
}));

sock.end('' +
'GET / HTTP/1.1\r\n' +
'Host: localhost:8080\r\n' +
'Transfer-Encoding: chunked\r\n\r\n' +
'2;' + 'A'.repeat(20000) + '=bar\r\nAA\r\n' +
'0\r\n\r\n'
);
});
}

// Verify that chunk extensions are limited in size when sent in intervals.
{
const server = http.createServer((req, res) => {
req.on('end', () => {
res.writeHead(200, { 'Content-Type': 'text/plain' });
res.end('bye');
});

req.resume();
});

server.listen(0, () => {
const sock = net.connect(server.address().port);
let remaining = 20000;
let data = '';

const interval = setInterval(
() => {
if (remaining > 0) {
sock.write('A'.repeat(1000));
} else {
sock.write('=bar\r\nAA\r\n0\r\n\r\n');
clearInterval(interval);
}

remaining -= 1000;
},
common.platformTimeout(20),
).unref();

sock.on('data', (chunk) => data += chunk.toString('utf-8'));

sock.on('end', common.mustCall(function() {
assert.strictEqual(data, 'HTTP/1.1 413 Payload Too Large\r\nConnection: close\r\n\r\n');
server.close();
}));

sock.write('' +
'GET / HTTP/1.1\r\n' +
'Host: localhost:8080\r\n' +
'Transfer-Encoding: chunked\r\n\r\n' +
'2;'
);
});
}

// Verify the chunk extensions is correctly reset after a chunk
{
const server = http.createServer((req, res) => {
req.on('end', () => {
res.writeHead(200, { 'content-type': 'text/plain', 'connection': 'close', 'date': 'now' });
res.end('bye');
});

req.resume();
});

server.listen(0, () => {
const sock = net.connect(server.address().port);
let data = '';

sock.on('data', (chunk) => data += chunk.toString('utf-8'));

sock.on('end', common.mustCall(function() {
assert.strictEqual(
data,
'HTTP/1.1 200 OK\r\n' +
'content-type: text/plain\r\n' +
'connection: close\r\n' +
'date: now\r\n' +
'Transfer-Encoding: chunked\r\n' +
'\r\n' +
'3\r\n' +
'bye\r\n' +
'0\r\n' +
'\r\n',
);

server.close();
}));

sock.end('' +
'GET / HTTP/1.1\r\n' +
'Host: localhost:8080\r\n' +
'Transfer-Encoding: chunked\r\n\r\n' +
'2;' + 'A'.repeat(10000) + '=bar\r\nAA\r\n' +
'2;' + 'A'.repeat(10000) + '=bar\r\nAA\r\n' +
'2;' + 'A'.repeat(10000) + '=bar\r\nAA\r\n' +
'0\r\n\r\n'
);
});
}

0 comments on commit 6155a1f

Please sign in to comment.