parent
7d9e4e429b
commit
4ad418473b
@ -0,0 +1,367 @@
|
||||
From 8d0ee420a4d91ac7fd97316338f1e28b4b060cbf Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
|
||||
Date: Thu, 10 Oct 2024 19:26:27 +0200
|
||||
Subject: [PATCH 1/6] Ignore whitespace chars after chunk-size
|
||||
|
||||
Previously (before #1498 change), squid was accepting TE-chunked replies
|
||||
with whitespaces after chunk-size and missing chunk-ext data. After
|
||||
|
||||
It turned out that replies with such whitespace chars are pretty
|
||||
common and other webservers which can act as forward proxies (e.g.
|
||||
nginx, httpd...) are accepting them.
|
||||
|
||||
This change will allow to proxy chunked responses from origin server,
|
||||
which had whitespaces inbetween chunk-size and CRLF.
|
||||
---
|
||||
src/http/one/TeChunkedParser.cc | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
||||
index 9cce10fdc91..04753395e16 100644
|
||||
--- a/src/http/one/TeChunkedParser.cc
|
||||
+++ b/src/http/one/TeChunkedParser.cc
|
||||
@@ -125,6 +125,7 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
// Code becomes much simpler when incremental parsing functions throw on
|
||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
||||
try {
|
||||
+ tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
|
||||
parseChunkExtensions(tok); // a possibly empty chunk-ext list
|
||||
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
||||
buf_ = tok.remaining();
|
||||
|
||||
From 9c8d35f899035fa06021ab3fe6919f892c2f0c6b Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
|
||||
Date: Fri, 11 Oct 2024 02:06:31 +0200
|
||||
Subject: [PATCH 2/6] Added new argument to Http::One::ParseBws()
|
||||
|
||||
Depending on new wsp_only argument in ParseBws() it will be decided
|
||||
which set of whitespaces characters will be parsed. If wsp_only is set
|
||||
to true, only SP and HTAB chars will be parsed.
|
||||
|
||||
Also optimized number of ParseBws calls.
|
||||
---
|
||||
src/http/one/Parser.cc | 4 ++--
|
||||
src/http/one/Parser.h | 3 ++-
|
||||
src/http/one/TeChunkedParser.cc | 13 +++++++++----
|
||||
src/http/one/TeChunkedParser.h | 2 +-
|
||||
4 files changed, 14 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
|
||||
index b1908316a0b..01d7e3bc0e8 100644
|
||||
--- a/src/http/one/Parser.cc
|
||||
+++ b/src/http/one/Parser.cc
|
||||
@@ -273,9 +273,9 @@ Http::One::ErrorLevel()
|
||||
|
||||
// BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
|
||||
void
|
||||
-Http::One::ParseBws(Parser::Tokenizer &tok)
|
||||
+Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
|
||||
{
|
||||
- const auto count = tok.skipAll(Parser::WhitespaceCharacters());
|
||||
+ const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
|
||||
|
||||
if (tok.atEnd())
|
||||
throw InsufficientInput(); // even if count is positive
|
||||
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
|
||||
index d9a0ac8c273..08200371cd6 100644
|
||||
--- a/src/http/one/Parser.h
|
||||
+++ b/src/http/one/Parser.h
|
||||
@@ -163,8 +163,9 @@ class Parser : public RefCountable
|
||||
};
|
||||
|
||||
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
|
||||
+/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
|
||||
/// \throws InsufficientInput when the end of BWS cannot be confirmed
|
||||
-void ParseBws(Parser::Tokenizer &);
|
||||
+void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
|
||||
|
||||
/// the right debugs() level for logging HTTP violation messages
|
||||
int ErrorLevel();
|
||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
||||
index 04753395e16..41e1e5ddaea 100644
|
||||
--- a/src/http/one/TeChunkedParser.cc
|
||||
+++ b/src/http/one/TeChunkedParser.cc
|
||||
@@ -125,8 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
// Code becomes much simpler when incremental parsing functions throw on
|
||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
||||
try {
|
||||
- tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
|
||||
- parseChunkExtensions(tok); // a possibly empty chunk-ext list
|
||||
+ // A possibly empty chunk-ext list. If no chunk-ext has been found,
|
||||
+ // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
|
||||
+ if (!parseChunkExtensions(tok))
|
||||
+ ParseBws(tok, true);
|
||||
+
|
||||
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
||||
buf_ = tok.remaining();
|
||||
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
|
||||
@@ -140,20 +143,22 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
|
||||
/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
|
||||
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
|
||||
-void
|
||||
+bool
|
||||
Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
|
||||
{
|
||||
+ bool foundChunkExt = false;
|
||||
do {
|
||||
auto tok = callerTok;
|
||||
|
||||
ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
||||
|
||||
if (!tok.skip(';'))
|
||||
- return; // reached the end of extensions (if any)
|
||||
+ return foundChunkExt; // reached the end of extensions (if any)
|
||||
|
||||
parseOneChunkExtension(tok);
|
||||
buf_ = tok.remaining(); // got one extension
|
||||
callerTok = tok;
|
||||
+ foundChunkExt = true;
|
||||
} while (true);
|
||||
}
|
||||
|
||||
diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
|
||||
index 02eacd1bb89..8c5d4bb4cba 100644
|
||||
--- a/src/http/one/TeChunkedParser.h
|
||||
+++ b/src/http/one/TeChunkedParser.h
|
||||
@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
|
||||
private:
|
||||
bool parseChunkSize(Tokenizer &tok);
|
||||
bool parseChunkMetadataSuffix(Tokenizer &);
|
||||
- void parseChunkExtensions(Tokenizer &);
|
||||
+ bool parseChunkExtensions(Tokenizer &);
|
||||
void parseOneChunkExtension(Tokenizer &);
|
||||
bool parseChunkBody(Tokenizer &tok);
|
||||
bool parseChunkEnd(Tokenizer &tok);
|
||||
|
||||
From 81e67f97f9c386bdd0bb4a5e182395c46adb70ad Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
|
||||
Date: Fri, 11 Oct 2024 02:44:33 +0200
|
||||
Subject: [PATCH 3/6] Fix typo in Parser.h
|
||||
|
||||
---
|
||||
src/http/one/Parser.h | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
|
||||
index 08200371cd6..3ef4c5f7752 100644
|
||||
--- a/src/http/one/Parser.h
|
||||
+++ b/src/http/one/Parser.h
|
||||
@@ -163,7 +163,7 @@ class Parser : public RefCountable
|
||||
};
|
||||
|
||||
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
|
||||
-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
|
||||
+/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
|
||||
/// \throws InsufficientInput when the end of BWS cannot be confirmed
|
||||
void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
|
||||
|
||||
|
||||
From a0d4fe1794e605f8299a5c118c758a807453f016 Mon Sep 17 00:00:00 2001
|
||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: Thu, 10 Oct 2024 22:39:42 -0400
|
||||
Subject: [PATCH 4/6] Bug 5449 is a regression of Bug 4492!
|
||||
|
||||
Both bugs deal with "chunk-size SP+ CRLF" use cases. Bug 4492 had _two_
|
||||
spaces after chunk-size, which answers one of the PR review questions:
|
||||
Should we skip just one space? No, we should not.
|
||||
|
||||
The lines moved around in many commits, but I believe this regression
|
||||
was introduced in commit 951013d0 because that commit stopped consuming
|
||||
partially parsed chunk-ext sequences. That consumption was wrong, but it
|
||||
had a positive side effect -- fixing Bug 4492...
|
||||
---
|
||||
src/http/one/TeChunkedParser.cc | 10 +++++-----
|
||||
1 file changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
||||
index 41e1e5ddaea..aa4a840fdcf 100644
|
||||
--- a/src/http/one/TeChunkedParser.cc
|
||||
+++ b/src/http/one/TeChunkedParser.cc
|
||||
@@ -125,10 +125,10 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
// Code becomes much simpler when incremental parsing functions throw on
|
||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
||||
try {
|
||||
- // A possibly empty chunk-ext list. If no chunk-ext has been found,
|
||||
- // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
|
||||
- if (!parseChunkExtensions(tok))
|
||||
- ParseBws(tok, true);
|
||||
+ // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
||||
+ ParseBws(tok, true);
|
||||
+
|
||||
+ parseChunkExtensions(tok);
|
||||
|
||||
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
||||
buf_ = tok.remaining();
|
||||
@@ -150,7 +150,7 @@ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
|
||||
do {
|
||||
auto tok = callerTok;
|
||||
|
||||
- ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
||||
+ ParseBws(tok);
|
||||
|
||||
if (!tok.skip(';'))
|
||||
return foundChunkExt; // reached the end of extensions (if any)
|
||||
|
||||
From f837f5ff61301a17008f16ce1fb793c2abf19786 Mon Sep 17 00:00:00 2001
|
||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: Thu, 10 Oct 2024 23:06:42 -0400
|
||||
Subject: [PATCH 5/6] fixup: Fewer conditionals/ifs and more explicit spelling
|
||||
|
||||
... to draw code reader attention when something unusual is going on.
|
||||
---
|
||||
src/http/one/Parser.cc | 22 ++++++++++++++++++----
|
||||
src/http/one/Parser.h | 10 ++++++++--
|
||||
src/http/one/TeChunkedParser.cc | 14 ++++++--------
|
||||
src/http/one/TeChunkedParser.h | 2 +-
|
||||
4 files changed, 33 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
|
||||
index 01d7e3bc0e8..d3937e5e96b 100644
|
||||
--- a/src/http/one/Parser.cc
|
||||
+++ b/src/http/one/Parser.cc
|
||||
@@ -271,11 +271,12 @@ Http::One::ErrorLevel()
|
||||
return Config.onoff.relaxed_header_parser < 0 ? DBG_IMPORTANT : 5;
|
||||
}
|
||||
|
||||
-// BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
|
||||
-void
|
||||
-Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
|
||||
+/// common part of ParseBws() and ParseStrctBws()
|
||||
+namespace Http::One {
|
||||
+static void
|
||||
+ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
|
||||
{
|
||||
- const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
|
||||
+ const auto count = tok.skipAll(bwsChars);
|
||||
|
||||
if (tok.atEnd())
|
||||
throw InsufficientInput(); // even if count is positive
|
||||
@@ -290,4 +291,17 @@ Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
|
||||
|
||||
// success: no more BWS characters expected
|
||||
}
|
||||
+} // namespace Http::One
|
||||
+
|
||||
+void
|
||||
+Http::One::ParseBws(Parser::Tokenizer &tok)
|
||||
+{
|
||||
+ ParseBws_(tok, CharacterSet::WSP);
|
||||
+}
|
||||
+
|
||||
+void
|
||||
+Http::One::ParseStrictBws(Parser::Tokenizer &tok)
|
||||
+{
|
||||
+ ParseBws_(tok, Parser::WhitespaceCharacters());
|
||||
+}
|
||||
|
||||
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
|
||||
index 3ef4c5f7752..49e399de546 100644
|
||||
--- a/src/http/one/Parser.h
|
||||
+++ b/src/http/one/Parser.h
|
||||
@@ -163,9 +163,15 @@ class Parser : public RefCountable
|
||||
};
|
||||
|
||||
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
|
||||
-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
|
||||
/// \throws InsufficientInput when the end of BWS cannot be confirmed
|
||||
-void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
|
||||
+/// \sa WhitespaceCharacters() for the definition of BWS characters
|
||||
+/// \sa ParseStrictBws() that avoids WhitespaceCharacters() uncertainties
|
||||
+void ParseBws(Parser::Tokenizer &);
|
||||
+
|
||||
+/// Like ParseBws() but only skips CharacterSet::WSP characters. This variation
|
||||
+/// must be used if the next element may start with CR or any other character
|
||||
+/// from RelaxedDelimiterCharacters().
|
||||
+void ParseStrictBws(Parser::Tokenizer &);
|
||||
|
||||
/// the right debugs() level for logging HTTP violation messages
|
||||
int ErrorLevel();
|
||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
||||
index aa4a840fdcf..859471b8c77 100644
|
||||
--- a/src/http/one/TeChunkedParser.cc
|
||||
+++ b/src/http/one/TeChunkedParser.cc
|
||||
@@ -125,11 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
// Code becomes much simpler when incremental parsing functions throw on
|
||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
||||
try {
|
||||
- // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
||||
- ParseBws(tok, true);
|
||||
-
|
||||
- parseChunkExtensions(tok);
|
||||
+ // Bug 4492: IBM_HTTP_Server sends SP after chunk-size.
|
||||
+ // No ParseBws() here because it may consume CR required further below.
|
||||
+ ParseStrictBws(tok);
|
||||
|
||||
+ parseChunkExtensions(tok); // a possibly empty chunk-ext list
|
||||
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
||||
buf_ = tok.remaining();
|
||||
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
|
||||
@@ -143,22 +143,20 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
|
||||
/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
|
||||
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
|
||||
-bool
|
||||
+void
|
||||
Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
|
||||
{
|
||||
- bool foundChunkExt = false;
|
||||
do {
|
||||
auto tok = callerTok;
|
||||
|
||||
ParseBws(tok);
|
||||
|
||||
if (!tok.skip(';'))
|
||||
- return foundChunkExt; // reached the end of extensions (if any)
|
||||
+ return; // reached the end of extensions (if any)
|
||||
|
||||
parseOneChunkExtension(tok);
|
||||
buf_ = tok.remaining(); // got one extension
|
||||
callerTok = tok;
|
||||
- foundChunkExt = true;
|
||||
} while (true);
|
||||
}
|
||||
|
||||
diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
|
||||
index 8c5d4bb4cba..02eacd1bb89 100644
|
||||
--- a/src/http/one/TeChunkedParser.h
|
||||
+++ b/src/http/one/TeChunkedParser.h
|
||||
@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
|
||||
private:
|
||||
bool parseChunkSize(Tokenizer &tok);
|
||||
bool parseChunkMetadataSuffix(Tokenizer &);
|
||||
- bool parseChunkExtensions(Tokenizer &);
|
||||
+ void parseChunkExtensions(Tokenizer &);
|
||||
void parseOneChunkExtension(Tokenizer &);
|
||||
bool parseChunkBody(Tokenizer &tok);
|
||||
bool parseChunkEnd(Tokenizer &tok);
|
||||
|
||||
From f79936a234e722adb2dd08f31cf6019d81ee712c Mon Sep 17 00:00:00 2001
|
||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: Thu, 10 Oct 2024 23:31:08 -0400
|
||||
Subject: [PATCH 6/6] fixup: Deadly typo
|
||||
|
||||
---
|
||||
src/http/one/Parser.cc | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
|
||||
index d3937e5e96b..7403a9163a2 100644
|
||||
--- a/src/http/one/Parser.cc
|
||||
+++ b/src/http/one/Parser.cc
|
||||
@@ -296,12 +296,12 @@ ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
|
||||
void
|
||||
Http::One::ParseBws(Parser::Tokenizer &tok)
|
||||
{
|
||||
- ParseBws_(tok, CharacterSet::WSP);
|
||||
+ ParseBws_(tok, Parser::WhitespaceCharacters());
|
||||
}
|
||||
|
||||
void
|
||||
Http::One::ParseStrictBws(Parser::Tokenizer &tok)
|
||||
{
|
||||
- ParseBws_(tok, Parser::WhitespaceCharacters());
|
||||
+ ParseBws_(tok, CharacterSet::WSP);
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,113 @@
|
||||
From a0a9e6dc69d0c7b9ba237702b4c5020abc7ad1f8 Mon Sep 17 00:00:00 2001
|
||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: Sat, 4 Nov 2023 00:30:42 +0000
|
||||
Subject: [PATCH] Bug 5154: Do not open IPv6 sockets when IPv6 is disabled
|
||||
(#1567)
|
||||
|
||||
... but allow basic IPv6 manipulations like getSockAddr().
|
||||
|
||||
Address.cc:663 getAddrInfo() assertion failed: false
|
||||
|
||||
Squids receives IPv6 addresses from traffic, configuration, or
|
||||
hard-coded constants even when ./configured with --disable-ipv6 or when
|
||||
IPv6 support was automatically disabled at startup after failing IPv6
|
||||
tests. To handle IPv6 correctly, such Squids must support basic IPv6
|
||||
operations like recognizing an IPv6 address in a request-target or
|
||||
reporting an unsolicited IPv6 DNS record. At least for now, such Squids
|
||||
must also correctly parse configuration-related IPv6 addresses.
|
||||
|
||||
All those activities rely on various low-level operations like filling
|
||||
addrinfo structure with IP address information. Since 2012 commit
|
||||
c5fbbc7, Ip::Address::getAddrInfo() was failing for IPv6 addresses when
|
||||
Ip::EnableIpv6 was falsy. That change correctly recognized[^1] the need
|
||||
for such Squids to handle IPv6, but to support basic operations, we need
|
||||
to reject IPv6 addresses at a higher level and without asserting.
|
||||
|
||||
That high-level rejection work is ongoing, but initial attempts have
|
||||
exposed difficult problems that will take time to address. For now, we
|
||||
just avoid the assertion while protecting IPv6-disabled Squid from
|
||||
listening on or opening connections to IPv6 addresses. Since Squid
|
||||
already expects (and usually correctly handles) socket opening failures,
|
||||
disabling those operations is better than failing in low-level IP
|
||||
manipulation code.
|
||||
|
||||
The overall IPv6 posture of IPv6-disabled Squids that lack http_access
|
||||
or other rules to deny IPv6 requests will change: This fix exposes more
|
||||
of IPv6-disabled Squid code to IPv6 addresses. It is possible that such
|
||||
exposure will make some IPv6 resources inside Squid (e.g., a previously
|
||||
cached HTTP response) accessible to external requests. Squids will not
|
||||
open or accept IPv6 connections but may forward requests with raw IPv6
|
||||
targets to IPv4 cache_peers. Whether these and similar behavior changes
|
||||
are going to be permanent is open for debate, but even if they are
|
||||
temporary, they are arguably better than the corresponding assertions.
|
||||
|
||||
These changes do not effect IPv6-enabled Squids.
|
||||
|
||||
The assertion in IPv6-disabled Squid was reported by Joshua Rogers at
|
||||
https://megamansec.github.io/Squid-Security-Audit/ipv6-assert.html where
|
||||
it was filed as "Assertion on IPv6 Host Requests with --disable-ipv6".
|
||||
|
||||
[^1]: https://bugs.squid-cache.org/show_bug.cgi?id=3593#c1
|
||||
---
|
||||
src/comm.cc | 6 ++++++
|
||||
src/ip/Address.cc | 2 +-
|
||||
src/ip/Intercept.cc | 8 ++++++++
|
||||
3 files changed, 15 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/comm.cc b/src/comm.cc
|
||||
index 4659955b011..271ba04d4da 100644
|
||||
--- a/src/comm.cc
|
||||
+++ b/src/comm.cc
|
||||
@@ -344,6 +344,12 @@ comm_openex(int sock_type,
|
||||
/* Create socket for accepting new connections. */
|
||||
++ statCounter.syscalls.sock.sockets;
|
||||
|
||||
+ if (!Ip::EnableIpv6 && addr.isIPv6()) {
|
||||
+ debugs(50, 2, "refusing to open an IPv6 socket when IPv6 support is disabled: " << addr);
|
||||
+ errno = ENOTSUP;
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
/* Setup the socket addrinfo details for use */
|
||||
addr.getAddrInfo(AI);
|
||||
AI->ai_socktype = sock_type;
|
||||
diff --git a/src/ip/Address.cc b/src/ip/Address.cc
|
||||
index b6f810bfc25..ae6db37da5e 100644
|
||||
--- a/src/ip/Address.cc
|
||||
+++ b/src/ip/Address.cc
|
||||
@@ -623,7 +623,7 @@ Ip::Address::getAddrInfo(struct addrinfo *&dst, int force) const
|
||||
&& dst->ai_protocol == 0)
|
||||
dst->ai_protocol = IPPROTO_UDP;
|
||||
|
||||
- if (force == AF_INET6 || (force == AF_UNSPEC && Ip::EnableIpv6 && isIPv6()) ) {
|
||||
+ if (force == AF_INET6 || (force == AF_UNSPEC && isIPv6()) ) {
|
||||
dst->ai_addr = (struct sockaddr*)new sockaddr_in6;
|
||||
|
||||
memset(dst->ai_addr,0,sizeof(struct sockaddr_in6));
|
||||
diff --git a/src/ip/Intercept.cc b/src/ip/Intercept.cc
|
||||
index 1a5e2d15af1..a8522efaac0 100644
|
||||
--- a/src/ip/Intercept.cc
|
||||
+++ b/src/ip/Intercept.cc
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "comm/Connection.h"
|
||||
#include "fde.h"
|
||||
#include "ip/Intercept.h"
|
||||
+#include "ip/tools.h"
|
||||
#include "src/tools.h"
|
||||
|
||||
#include <cerrno>
|
||||
@@ -430,6 +431,13 @@ Ip::Intercept::ProbeForTproxy(Ip::Address &test)
|
||||
|
||||
debugs(3, 3, "Detect TPROXY support on port " << test);
|
||||
|
||||
+ if (!Ip::EnableIpv6 && test.isIPv6() && !test.setIPv4()) {
|
||||
+ debugs(3, DBG_CRITICAL, "Cannot use TPROXY for " << test << " because IPv6 support is disabled");
|
||||
+ if (doneSuid)
|
||||
+ leave_suid();
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
int tos = 1;
|
||||
int tmp_sock = -1;
|
||||
|
||||
|
@ -0,0 +1,117 @@
|
||||
From 4d6dd3ddba5e850a42c86d8233735165a371c31c Mon Sep 17 00:00:00 2001
|
||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: Sun, 1 Sep 2024 00:39:34 +0000
|
||||
Subject: [PATCH] Bug 5405: Large uploads fill request buffer and die (#1887)
|
||||
|
||||
maybeMakeSpaceAvailable: request buffer full
|
||||
ReadNow: ... size 0, retval 0, errno 0
|
||||
terminateAll: 1/1 after ERR_CLIENT_GONE/WITH_CLIENT
|
||||
%Ss=TCP_MISS_ABORTED
|
||||
|
||||
This bug is triggered by a combination of the following two conditions:
|
||||
|
||||
* HTTP client upload fills Squid request buffer faster than it is
|
||||
drained by an origin server, cache_peer, or REQMOD service. The buffer
|
||||
accumulates 576 KB (default 512 KB client_request_buffer_max_size + 64
|
||||
KB internal "pipe" buffer).
|
||||
|
||||
* The affected server or service consumes a few bytes after the critical
|
||||
accumulation is reached. In other words, the bug cannot be triggered
|
||||
if nothing is consumed after the first condition above is met.
|
||||
|
||||
Comm::ReadNow() must not be called with a full buffer: Related
|
||||
FD_READ_METHOD() code cannot distinguish "received EOF" from "had no
|
||||
buffer space" outcomes. Server::readSomeData() tried to prevent such
|
||||
calls, but the corresponding check had two problems:
|
||||
|
||||
* The check had an unsigned integer underflow bug[^1] that made it
|
||||
ineffective when inBuf length exceeded Config.maxRequestBufferSize.
|
||||
That length could exceed the limit due to reconfiguration and when
|
||||
inBuf space size first grew outside of maybeMakeSpaceAvailable()
|
||||
protections (e.g., during an inBuf.c_str() call) and then got filled
|
||||
with newly read data. That growth started happening after 2020 commit
|
||||
1dfbca06 optimized SBuf::cow() to merge leading and trailing space.
|
||||
Prior to that commit, Bug 5405 could probably only affect Squid
|
||||
reconfigurations that lower client_request_buffer_max_size.
|
||||
|
||||
* The check was separated from the ReadNow() call it was meant to
|
||||
protect. While ConnStateData was waiting for the socket to become
|
||||
ready for reading, various asynchronous events could alter inBuf or
|
||||
Config.maxRequestBufferSize.
|
||||
|
||||
This change fixes both problems.
|
||||
|
||||
This change also fixes Squid Bug 5214.
|
||||
|
||||
[^1]: That underflow bug was probably introduced in 2015 commit 4d1376d7
|
||||
while trying to emulate the original "do not read less than two bytes"
|
||||
ConnStateData::In::maybeMakeSpaceAvailable() condition. That condition
|
||||
itself looks like a leftover from manual zero-terminated input buffer
|
||||
days that ended with 2014 commit e7287625. It is now removed.
|
||||
---
|
||||
|
||||
diff --git a/src/servers/Server.cc b/src/servers/Server.cc
|
||||
index 70fd10b..dd20619 100644
|
||||
--- a/src/servers/Server.cc
|
||||
+++ b/src/servers/Server.cc
|
||||
@@ -83,16 +83,25 @@ Server::maybeMakeSpaceAvailable()
|
||||
debugs(33, 4, "request buffer full: client_request_buffer_max_size=" << Config.maxRequestBufferSize);
|
||||
}
|
||||
|
||||
+bool
|
||||
+Server::mayBufferMoreRequestBytes() const
|
||||
+{
|
||||
+ // TODO: Account for bodyPipe buffering as well.
|
||||
+ if (inBuf.length() >= Config.maxRequestBufferSize) {
|
||||
+ debugs(33, 4, "no: " << inBuf.length() << '-' << Config.maxRequestBufferSize << '=' << (inBuf.length() - Config.maxRequestBufferSize));
|
||||
+ return false;
|
||||
+ }
|
||||
+ debugs(33, 7, "yes: " << Config.maxRequestBufferSize << '-' << inBuf.length() << '=' << (Config.maxRequestBufferSize - inBuf.length()));
|
||||
+ return true;
|
||||
+}
|
||||
+
|
||||
void
|
||||
Server::readSomeData()
|
||||
{
|
||||
if (reading())
|
||||
return;
|
||||
|
||||
- debugs(33, 4, clientConnection << ": reading request...");
|
||||
-
|
||||
- // we can only read if there is more than 1 byte of space free
|
||||
- if (Config.maxRequestBufferSize - inBuf.length() < 2)
|
||||
+ if (!mayBufferMoreRequestBytes())
|
||||
return;
|
||||
|
||||
typedef CommCbMemFunT<Server, CommIoCbParams> Dialer;
|
||||
@@ -123,7 +132,16 @@ Server::doClientRead(const CommIoCbParams &io)
|
||||
* Plus, it breaks our lame *HalfClosed() detection
|
||||
*/
|
||||
|
||||
+ // mayBufferMoreRequestBytes() was true during readSomeData(), but variables
|
||||
+ // like Config.maxRequestBufferSize may have changed since that check
|
||||
+ if (!mayBufferMoreRequestBytes()) {
|
||||
+ // XXX: If we avoid Comm::ReadNow(), we should not Comm::Read() again
|
||||
+ // when the wait is over; resume these doClientRead() checks instead.
|
||||
+ return; // wait for noteMoreBodySpaceAvailable() or a similar inBuf draining event
|
||||
+ }
|
||||
maybeMakeSpaceAvailable();
|
||||
+ Assure(inBuf.spaceSize());
|
||||
+
|
||||
CommIoCbParams rd(this); // will be expanded with ReadNow results
|
||||
rd.conn = io.conn;
|
||||
switch (Comm::ReadNow(rd, inBuf)) {
|
||||
diff --git a/src/servers/Server.h b/src/servers/Server.h
|
||||
index ef105f5..6e549b3 100644
|
||||
--- a/src/servers/Server.h
|
||||
+++ b/src/servers/Server.h
|
||||
@@ -119,6 +119,9 @@ protected:
|
||||
/// abort any pending transactions and prevent new ones (by closing)
|
||||
virtual void terminateAll(const Error &, const LogTagsErrors &) = 0;
|
||||
|
||||
+ /// whether client_request_buffer_max_size allows inBuf.length() increase
|
||||
+ bool mayBufferMoreRequestBytes() const;
|
||||
+
|
||||
void doClientRead(const CommIoCbParams &io);
|
||||
void clientWriteDone(const CommIoCbParams &io);
|
||||
|
Loading…
Reference in new issue