import squid-4.15-7.module+el8.9.0+19703+a1da7223

i8c-stream-4 changed/i8c-stream-4/squid-4.15-7.module+el8.9.0+19703+a1da7223
MSVSphere Packaging Team 11 months ago
parent f2d71e3271
commit 2925a17d66

File diff suppressed because it is too large Load Diff

@ -1,23 +0,0 @@
diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
index 6a9736f..0a883fa 100644
--- a/src/auth/digest/Config.cc
+++ b/src/auth/digest/Config.cc
@@ -847,11 +847,15 @@ Auth::Digest::Config::decode(char const *proxy_auth, const char *aRequestRealm)
break;
case DIGEST_NC:
- if (value.size() != 8) {
+ if (value.size() == 8) {
+ // for historical reasons, the nc value MUST be exactly 8 bytes
+ static_assert(sizeof(digest_request->nc) == 8 + 1, "bad nc buffer size");
+ xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
+ debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
+ } else {
debugs(29, 9, "Invalid nc '" << value << "' in '" << temp << "'");
+ digest_request->nc[0] = 0;
}
- xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
- debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
break;
case DIGEST_CNONCE:

@ -0,0 +1,163 @@
diff --git a/src/client_side.cc b/src/client_side.cc
index f57f3f7..ab393e4 100644
--- a/src/client_side.cc
+++ b/src/client_side.cc
@@ -906,7 +906,7 @@ ConnStateData::kick()
* We are done with the response, and we are either still receiving request
* body (early response!) or have already stopped receiving anything.
*
- * If we are still receiving, then clientParseRequest() below will fail.
+ * If we are still receiving, then parseRequests() below will fail.
* (XXX: but then we will call readNextRequest() which may succeed and
* execute a smuggled request as we are not done with the current request).
*
@@ -926,28 +926,12 @@ ConnStateData::kick()
* Attempt to parse a request from the request buffer.
* If we've been fed a pipelined request it may already
* be in our read buffer.
- *
- \par
- * This needs to fall through - if we're unlucky and parse the _last_ request
- * from our read buffer we may never re-register for another client read.
*/
- if (clientParseRequests()) {
- debugs(33, 3, clientConnection << ": parsed next request from buffer");
- }
+ parseRequests();
- /** \par
- * Either we need to kick-start another read or, if we have
- * a half-closed connection, kill it after the last request.
- * This saves waiting for half-closed connections to finished being
- * half-closed _AND_ then, sometimes, spending "Timeout" time in
- * the keepalive "Waiting for next request" state.
- */
- if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
- debugs(33, 3, "half-closed client with no pending requests, closing");
- clientConnection->close();
+ if (!isOpen())
return;
- }
/** \par
* At this point we either have a parsed request (which we've
@@ -2058,16 +2042,11 @@ ConnStateData::receivedFirstByte()
commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
}
-/**
- * Attempt to parse one or more requests from the input buffer.
- * Returns true after completing parsing of at least one request [header]. That
- * includes cases where parsing ended with an error (e.g., a huge request).
- */
-bool
-ConnStateData::clientParseRequests()
+/// Attempt to parse one or more requests from the input buffer.
+/// May close the connection.
+void
+ConnStateData::parseRequests()
{
- bool parsed_req = false;
-
debugs(33, 5, HERE << clientConnection << ": attempting to parse");
// Loop while we have read bytes that are not needed for producing the body
@@ -2116,8 +2095,6 @@ ConnStateData::clientParseRequests()
processParsedRequest(context);
- parsed_req = true; // XXX: do we really need to parse everything right NOW ?
-
if (context->mayUseConnection()) {
debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection");
break;
@@ -2130,8 +2107,19 @@ ConnStateData::clientParseRequests()
}
}
- /* XXX where to 'finish' the parsing pass? */
- return parsed_req;
+ debugs(33, 7, "buffered leftovers: " << inBuf.length());
+
+ if (isOpen() && commIsHalfClosed(clientConnection->fd)) {
+ if (pipeline.empty()) {
+ // we processed what we could parse, and no more data is coming
+ debugs(33, 5, "closing half-closed without parsed requests: " << clientConnection);
+ clientConnection->close();
+ } else {
+ // we parsed what we could, and no more data is coming
+ debugs(33, 5, "monitoring half-closed while processing parsed requests: " << clientConnection);
+ flags.readMore = false; // may already be false
+ }
+ }
}
void
@@ -2148,23 +2136,7 @@ ConnStateData::afterClientRead()
if (pipeline.empty())
fd_note(clientConnection->fd, "Reading next request");
- if (!clientParseRequests()) {
- if (!isOpen())
- return;
- /*
- * If the client here is half closed and we failed
- * to parse a request, close the connection.
- * The above check with connFinishedWithConn() only
- * succeeds _if_ the buffer is empty which it won't
- * be if we have an incomplete request.
- * XXX: This duplicates ConnStateData::kick
- */
- if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
- debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
- clientConnection->close();
- return;
- }
- }
+ parseRequests();
if (!isOpen())
return;
@@ -3945,7 +3917,7 @@ ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
startPinnedConnectionMonitoring();
if (pipeline.empty())
- kick(); // in case clientParseRequests() was blocked by a busy pic.connection
+ kick(); // in case parseRequests() was blocked by a busy pic.connection
}
/// Forward future client requests using the given server connection.
diff --git a/src/client_side.h b/src/client_side.h
index 9fe8463..dfb4d8e 100644
--- a/src/client_side.h
+++ b/src/client_side.h
@@ -85,7 +85,6 @@ public:
virtual void doneWithControlMsg();
/// Traffic parsing
- bool clientParseRequests();
void readNextRequest();
/// try to make progress on a transaction or read more I/O
@@ -373,6 +372,7 @@ private:
virtual bool connFinishedWithConn(int size);
virtual void checkLogging();
+ void parseRequests();
void clientAfterReadingRequests();
bool concurrentRequestQueueFilled() const;
diff --git a/src/tests/stub_client_side.cc b/src/tests/stub_client_side.cc
index d7efb0f..655ed83 100644
--- a/src/tests/stub_client_side.cc
+++ b/src/tests/stub_client_side.cc
@@ -14,7 +14,7 @@
#include "tests/STUB.h"
#include "client_side.h"
-bool ConnStateData::clientParseRequests() STUB_RETVAL(false)
+void ConnStateData::parseRequests() STUB
void ConnStateData::readNextRequest() STUB
bool ConnStateData::isOpen() const STUB_RETVAL(false)
void ConnStateData::kick() STUB

@ -2,7 +2,7 @@
Name: squid
Version: 4.15
Release: 6%{?dist}.1
Release: 7%{?dist}
Summary: The Squid proxy caching server
Epoch: 7
# See CREDITS for breakdown of non GPLv2+ code
@ -38,6 +38,8 @@ Patch206: squid-4.11-active-ftp.patch
Patch208: squid-4.11-convert-ipv4.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2006121
Patch209: squid-4.15-ftp-filename-extraction.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2076717
Patch210: squid-4.15-halfclosed.patch
# Security fixes
# https://bugzilla.redhat.com/show_bug.cgi?id=1941506
@ -46,11 +48,6 @@ Patch300: squid-4.15-CVE-2021-28116.patch
Patch301: squid-4.15-CVE-2021-46784.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2129771
Patch302: squid-4.15-CVE-2022-41318.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2245910
# +backported: https://github.com/squid-cache/squid/commit/417da4006cf5c97d44e74431b816fc58fec9e270
Patch303: squid-4.15-CVE-2023-46846.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2245916
Patch304: squid-4.15-CVE-2023-46847.patch
Requires: bash >= 2.0
Requires(pre): shadow-utils
@ -112,13 +109,12 @@ lookup program (dnsserver), a program for retrieving FTP data
%patch206 -p1 -b .active-ftp
%patch208 -p1 -b .convert-ipv4
%patch209 -p1 -b .ftp-fn-extraction
%patch210 -p1 -b .halfclosed
# Security patches
%patch300 -p1 -b .CVE-2021-28116
%patch301 -p1 -b .CVE-2021-46784
%patch302 -p1 -b .CVE-2022-41318
%patch303 -p1 -b .CVE-2023-46846
%patch304 -p1 -b .CVE-2023-46847
# https://bugzilla.redhat.com/show_bug.cgi?id=1679526
# Patch in the vendor documentation and used different location for documentation
@ -335,14 +331,11 @@ fi
%changelog
* Tue Dec 12 2023 MSVSphere Packaging Team <packager@msvsphere-os.ru> - 7:4.15-6.1
* Tue Dec 12 2023 MSVSphere Packaging Team <packager@msvsphere-os.ru> - 7:4.15-7
- Rebuilt for MSVSphere 8.8
* Mon Oct 30 2023 Luboš Uhliarik <luhliari@redhat.com> - 7:4.15-6.1
- Resolves: RHEL-14799 - squid: squid: Denial of Service in HTTP Digest
Authentication
- Resolves: RHEL-14775 - squid: squid: Request/Response smuggling in HTTP/1.1
and ICAP
* Wed Aug 16 2023 Luboš Uhliarik <luhliari@redhat.com> - 7:4.15-7
- Resolves: #2076717 - Crash with half_closed_client on
* Thu Dec 08 2022 Tomas Korbar <tkorbar@redhat.com> - 4.15-6
- Resolves: #2072988 - [RFE] Add the "IP_BIND_ADDRESS_NO_PORT"

Loading…
Cancel
Save