import 389-ds-base-1.4.3.35-2.module+el8.8.0+19693+b24f535c

i8c-stream-1.4 changed/i8c-stream-1.4/389-ds-base-1.4.3.35-2.module+el8.8.0+19693+b24f535c
MSVSphere Packaging Team 9 months ago
parent f6f67e866c
commit 67e21fad93

@ -0,0 +1,262 @@
From 30fb67855d7b3da6ed98d177e416145d5767a7d4 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 6 Jun 2023 12:49:50 -0400
Subject: [PATCH 01/11] Issue 5789 - Improve ds-replcheck error handling
Description: When replication is not fully configured the tool outputs vague
messages. These should be cleaned up to indicate that
replication was not initialized. Also added healthcheck.
Relates: https://github.com/389ds/389-ds-base/issues/5789
Reviewed by: tbordaz, spichugi, progier (Thanks!!!)
---
ldap/admin/src/scripts/ds-replcheck | 35 +++++++++++--------
.../src/lib/replication/replTasks.jsx | 2 +-
src/cockpit/389-console/src/replication.jsx | 3 +-
src/lib389/lib389/cli_conf/replication.py | 7 +++-
src/lib389/lib389/config.py | 2 +-
src/lib389/lib389/lint.py | 10 ++++++
src/lib389/lib389/replica.py | 20 +++++++++--
7 files changed, 58 insertions(+), 21 deletions(-)
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
index f411f357a..efa13ffe8 100755
--- a/ldap/admin/src/scripts/ds-replcheck
+++ b/ldap/admin/src/scripts/ds-replcheck
@@ -1,7 +1,7 @@
#!/usr/bin/python3
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2021 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -216,17 +216,17 @@ def get_ruv_state(opts):
mtime = get_ruv_time(opts['supplier_ruv'], opts['rid'])
rtime = get_ruv_time(opts['replica_ruv'], opts['rid'])
if mtime == -1:
- repl_state = "Replication State: Replica ID ({}) not found in Supplier's RUV".format(opts['rid'])
+ repl_state = f"Replication State: Replica ID ({opts['rid']}) not found in Supplier's RUV"
elif rtime == -1:
- repl_state = "Replication State: Replica ID ({}) not found in Replica's RUV (not initialized?)".format(opts['rid'])
+ repl_state = f"Replication State: Replica ID ({opts['rid']}) not found in Replica's RUV (not initialized?)"
elif mtime == 0:
repl_state = "Replication State: Supplier has not seen any updates"
elif rtime == 0:
repl_state = "Replication State: Replica has not seen any changes from the Supplier"
elif mtime > rtime:
- repl_state = "Replication State: Replica is behind Supplier by: {} seconds".format(mtime - rtime)
+ repl_state = f"Replication State: Replica is behind Supplier by: {mtime - rtime} seconds"
elif mtime < rtime:
- repl_state = "Replication State: Replica is ahead of Supplier by: {} seconds".format(rtime - mtime)
+ repl_state = f"Replication State: Replica is ahead of Supplier by: {rtime - mtime} seconds"
else:
repl_state = "Replication State: Supplier and Replica are in perfect synchronization"
@@ -928,7 +928,7 @@ def check_for_diffs(mentries, mglue, rentries, rglue, report, opts):
return report
-def validate_suffix(ldapnode, suffix, hostname):
+def validate_suffix(ldapnode, suffix, hostname, port):
"""Validate that the suffix exists
:param ldapnode - The LDAP object
:param suffix - The suffix to validate
@@ -938,10 +938,11 @@ def validate_suffix(ldapnode, suffix, hostname):
try:
ldapnode.search_s(suffix, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
- print("Error: Failed to validate suffix in {}. {} does not exist.".format(hostname, suffix))
+ print(f"Error: Failed to validate suffix in {hostname}:{port}. {suffix} " +
+ "does not exist. Replica might need to be initialized.")
return False
except ldap.LDAPError as e:
- print("Error: failed to validate suffix in {} ({}). ".format(hostname, str(e)))
+ print(f"Error: failed to validate suffix in {hostname}:{port} ({str(e)}). ")
return False
# Check suffix is replicated
@@ -949,10 +950,10 @@ def validate_suffix(ldapnode, suffix, hostname):
replica_filter = "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot=%s))" % suffix
supplier_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
if (len(supplier_replica) != 1):
- print("Error: Failed to validate suffix in {}. {} is not replicated.".format(hostname, suffix))
+ print(f"Error: Failed to validate suffix in {hostname}:{port}. {suffix} is not replicated.")
return False
except ldap.LDAPError as e:
- print("Error: failed to validate suffix in {} ({}). ".format(hostname, str(e)))
+ print(f"Error: failed to validate suffix in {hostname}:{port} ({str(e)}). ")
return False
return True
@@ -1034,10 +1035,10 @@ def connect_to_replicas(opts):
# Validate suffix
if opts['verbose']:
print ("Validating suffix ...")
- if not validate_suffix(supplier, opts['suffix'], opts['mhost']):
+ if not validate_suffix(supplier, opts['suffix'], opts['mhost'], opts['mport']):
sys.exit(1)
- if not validate_suffix(replica,opts['suffix'], opts['rhost']):
+ if not validate_suffix(replica,opts['suffix'], opts['rhost'], opts['rport']):
sys.exit(1)
# Get the RUVs
@@ -1048,8 +1049,11 @@ def connect_to_replicas(opts):
if len(supplier_ruv) > 0:
opts['supplier_ruv'] = ensure_list_str(supplier_ruv[0][1]['nsds50ruv'])
else:
- print("Error: Supplier does not have an RUV entry")
+ print("Error: Supplier does not have an RUV entry. It might need to be initialized.")
sys.exit(1)
+ except ldap.NO_SUCH_OBJECT:
+ print("Error: Supplier does not have an RUV entry. It might need to be initialized.")
+ sys.exit(1)
except ldap.LDAPError as e:
print("Error: Failed to get Supplier RUV entry: {}".format(str(e)))
sys.exit(1)
@@ -1061,8 +1065,11 @@ def connect_to_replicas(opts):
if len(replica_ruv) > 0:
opts['replica_ruv'] = ensure_list_str(replica_ruv[0][1]['nsds50ruv'])
else:
- print("Error: Replica does not have an RUV entry")
+ print("Error: Replica does not have an RUV entry. It might need to be initialized.")
sys.exit(1)
+ except ldap.NO_SUCH_OBJECT:
+ print("Error: Replica does not have an RUV entry. It might need to be initialized.")
+ sys.exit(1)
except ldap.LDAPError as e:
print("Error: Failed to get Replica RUV entry: {}".format(str(e)))
sys.exit(1)
diff --git a/src/cockpit/389-console/src/lib/replication/replTasks.jsx b/src/cockpit/389-console/src/lib/replication/replTasks.jsx
index 9387af325..d592995f8 100644
--- a/src/cockpit/389-console/src/lib/replication/replTasks.jsx
+++ b/src/cockpit/389-console/src/lib/replication/replTasks.jsx
@@ -345,7 +345,7 @@ export class ReplRUV extends React.Component {
if (localRID == "") {
localRUV =
- <div className="ds-indent">
+ <div className="ds-indent ds-margin-top">
<i>
There is no local RUV, the database might not have been initialized yet.
</i>
diff --git a/src/cockpit/389-console/src/replication.jsx b/src/cockpit/389-console/src/replication.jsx
index c2598c118..76b8da486 100644
--- a/src/cockpit/389-console/src/replication.jsx
+++ b/src/cockpit/389-console/src/replication.jsx
@@ -880,7 +880,8 @@ export class Replication extends React.Component {
})
.fail(err => {
const errMsg = JSON.parse(err);
- if (errMsg.desc !== "No such object") {
+ if (errMsg.desc !== "No such object" &&
+ !errMsg.desc.includes('There is no RUV for suffix')) {
this.props.addNotification(
"error",
`Error loading suffix RUV - ${errMsg.desc}`
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 8a919da98..2c9501cde 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -115,10 +115,15 @@ def _args_to_attrs(args):
#
def get_ruv(inst, basedn, log, args):
replicas = Replicas(inst)
- replica = replicas.get(args.suffix)
+ try:
+ replica = replicas.get(args.suffix)
+ except ldap.NO_SUCH_OBJECT:
+ raise ValueError(f"Suffix '{args.suffix}' is not configured for replication.")
ruv = replica.get_ruv()
ruv_dict = ruv.format_ruv()
ruvs = ruv_dict['ruvs']
+ if len(ruvs) == 0:
+ raise ValueError(f"There is no RUV for suffix {args.suffix}. Replica is not initialized.")
if args and args.json:
log.info(json.dumps({"type": "list", "items": ruvs}, indent=4))
else:
diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py
index c178eb02f..00d38463a 100644
--- a/src/lib389/lib389/config.py
+++ b/src/lib389/lib389/config.py
@@ -209,7 +209,7 @@ class Config(DSLdapObject):
yield report
def _lint_passwordscheme(self):
- allowed_schemes = ['SSHA512', 'PBKDF2_SHA256', 'GOST_YESCRYPT']
+ allowed_schemes = ['PBKDF2-SHA512', 'PBKDF2_SHA256', 'PBKDF2_SHA512', 'GOST_YESCRYPT']
u_password_scheme = self.get_attr_val_utf8('passwordStorageScheme')
u_root_scheme = self.get_attr_val_utf8('nsslapd-rootpwstoragescheme')
if u_root_scheme not in allowed_schemes or u_password_scheme not in allowed_schemes:
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
index ce23d5c12..0219801f4 100644
--- a/src/lib389/lib389/lint.py
+++ b/src/lib389/lib389/lint.py
@@ -310,6 +310,16 @@ because the consumer server is not reachable.""",
'fix': """Check if the consumer is running, and also check the errors log for more information."""
}
+DSREPLLE0006 = {
+ 'dsle': 'DSREPLLE0006',
+ 'severity': 'MEDIUM',
+ 'description': 'Replication has not been initilaized',
+ 'items': ['Replication'],
+ 'detail': """The replication for "SUFFIX" does not appear to be initialzied,
+because there is no RUV found for the suffix.""",
+ 'fix': """Initialize this replica from a primary supplier replica"""
+}
+
# Replication changelog
DSCLLE0001 = {
'dsle': 'DSCLLE0001',
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index f0c71cbeb..8b0243345 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -45,7 +45,7 @@ from lib389.idm.services import ServiceAccounts
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.conflicts import ConflictEntries
from lib389.lint import (DSREPLLE0001, DSREPLLE0002, DSREPLLE0003, DSREPLLE0004,
- DSREPLLE0005, DSCLLE0001)
+ DSREPLLE0005, DSREPLLE0006, DSCLLE0001)
class ReplicaLegacy(object):
@@ -1207,6 +1207,20 @@ class Replica(DSLdapObject):
report['check'] = f'replication:conflicts'
yield report
+ def _lint_no_ruv(self):
+ # No RUV means replica has not been initialized
+ replicas = Replicas(self._instance).list()
+ for replica in replicas:
+ ruv = replica.get_ruv()
+ ruv_dict = ruv.format_ruv()
+ ruvs = ruv_dict['ruvs']
+ suffix = replica.get_suffix()
+ if len(ruvs) == 0:
+ report = copy.deepcopy(DSREPLLE0006)
+ report['detail'] = report['detail'].replace('SUFFIX', suffix)
+ report['check'] = 'replication'
+ yield report
+
def _validate(self, rdn, properties, basedn):
(tdn, str_props) = super(Replica, self)._validate(rdn, properties, basedn)
# We override the tdn here. We use the MT for the suffix.
@@ -1587,8 +1601,8 @@ class Replica(DSLdapObject):
serverctrls=self._server_controls, clientctrls=self._client_controls,
escapehatch='i am sure')[0]
data = ensure_list_str(ent.getValues('nsds50ruv'))
- except IndexError:
- # There is no ruv entry, it's okay
+ except (IndexError, ldap.NO_SUCH_OBJECT):
+ # There are no ruv elements, it's okay
pass
return RUV(data)
--
2.41.0

@ -0,0 +1,114 @@
From ed9eaf319ea10de570849c0c1a1866d0abb192de Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Mon, 29 May 2023 09:38:21 +0000
Subject: [PATCH 02/11] Issue 5646 - Various memory leaks (#5725)
Bug description: A memory leak occurs when a sync repl search is run
in refreshPersist mode. The connection from sync repl consumer is
closed without freeing up the ldap req ctrls.
Fix description: When the connection to the client is closed or on
shutdown free the request control structure if it exists.
relates: https://github.com/389ds/389-ds-base/issues/5646
Reviewed by: @progier389, @droideck, @Firstyear, @tbordaz (Thank you)
---
dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py | 5 +----
ldap/servers/plugins/sync/sync_persist.c | 7 +++++++
ldap/servers/plugins/sync/sync_util.c | 4 ++++
3 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
index 375517693..eb3770b78 100644
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
@@ -231,7 +231,7 @@ class Sync_persist(threading.Thread, ReconnectLDAPObject, SyncreplConsumer):
print('syncrepl_poll: LDAP error (%s)', e)
self.result = ldap_connection.get_cookies()
log.info("ZZZ result = %s" % self.result)
- self.conn.unbind()
+ ldap_connection.unbind()
def test_sync_repl_mep(topology, request):
"""Test sync repl with MEP plugin that triggers several
@@ -406,12 +406,10 @@ def test_sync_repl_cookie_add_del(topology, init_sync_repl_plugins, request):
6.: succeeds
"""
inst = topology[0]
-
# create a sync repl client and wait 5 seconds to be sure it is running
sync_repl = Sync_persist(inst)
sync_repl.start()
time.sleep(5)
-
# create users, that automember/memberof will generate nested updates
users = UserAccounts(inst, DEFAULT_SUFFIX)
users_set = []
@@ -427,7 +425,6 @@ def test_sync_repl_cookie_add_del(topology, init_sync_repl_plugins, request):
# and wait a bit to let sync_repl thread time to set its result before fetching it.
inst.stop()
cookies = sync_repl.get_result()
-
# checking that the cookie are in increasing and in an acceptable range (0..1000)
assert len(cookies) > 0
prev = -1
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
index 12b23ebac..d2210b64c 100644
--- a/ldap/servers/plugins/sync/sync_persist.c
+++ b/ldap/servers/plugins/sync/sync_persist.c
@@ -903,6 +903,7 @@ sync_send_results(void *arg)
int conn_acq_flag = 0;
Slapi_Connection *conn = NULL;
Slapi_Operation *op = req->req_orig_op;
+ LDAPControl **ctrls = NULL;
int rc;
PRUint64 connid;
int opid;
@@ -1049,6 +1050,12 @@ done:
slapi_ch_free((void **)&strFilter);
slapi_pblock_set(req->req_pblock, SLAPI_SEARCH_STRFILTER, NULL);
+ slapi_pblock_get(req->req_pblock, SLAPI_REQCONTROLS, &ctrls);
+ if (ctrls) {
+ ldap_controls_free(ctrls);
+ slapi_pblock_set(req->req_pblock, SLAPI_REQCONTROLS, NULL);
+ }
+
slapi_pblock_destroy(req->req_pblock);
req->req_pblock = NULL;
diff --git a/ldap/servers/plugins/sync/sync_util.c b/ldap/servers/plugins/sync/sync_util.c
index 21e160631..605ddf1f3 100644
--- a/ldap/servers/plugins/sync/sync_util.c
+++ b/ldap/servers/plugins/sync/sync_util.c
@@ -689,6 +689,7 @@ sync_pblock_copy(Slapi_PBlock *src)
Slapi_Operation *operation;
Slapi_Operation *operation_new;
Slapi_Connection *connection;
+ LDAPControl **ctrls = NULL;
int *scope;
int *deref;
int *filter_normalized;
@@ -715,8 +716,10 @@ sync_pblock_copy(Slapi_PBlock *src)
slapi_pblock_get(src, SLAPI_REQUESTOR_ISROOT, &isroot);
slapi_pblock_get(src, SLAPI_SEARCH_SIZELIMIT, &sizelimit);
slapi_pblock_get(src, SLAPI_SEARCH_TIMELIMIT, &timelimit);
+ slapi_pblock_get(src, SLAPI_REQCONTROLS, &ctrls);
slapi_pblock_get(src, SLAPI_PLUGIN, &pi);
+
Slapi_PBlock *dest = slapi_pblock_new();
operation_new = slapi_operation_new(0);
msgid = slapi_operation_get_msgid(operation);
@@ -737,6 +740,7 @@ sync_pblock_copy(Slapi_PBlock *src)
slapi_pblock_set(dest, SLAPI_REQUESTOR_ISROOT, &isroot);
slapi_pblock_set(dest, SLAPI_SEARCH_SIZELIMIT, &sizelimit);
slapi_pblock_set(dest, SLAPI_SEARCH_TIMELIMIT, &timelimit);
+ slapi_pblock_set(dest, SLAPI_REQCONTROLS, ctrls);
slapi_pblock_set(dest, SLAPI_PLUGIN, pi);
return dest;
--
2.41.0

@ -0,0 +1,444 @@
From 3c8836eff5e2047ba07a23c0d1bddb03bc98877c Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 22 Jun 2023 16:33:55 -0400
Subject: [PATCH 03/11] Issue 2375 - CLI - Healthcheck - revise and add new
checks
Description:
Add check for
- unauthorized binds are allowed
- Access log buffering is disabled
- Make mapping tree check more robust for case
relates: https://github.com/389ds/389-ds-base/issues/2375
Reviewed by: spichugi(Thanks!)
---
.../suites/healthcheck/health_config_test.py | 80 ++++++++++++++++++-
.../suites/healthcheck/healthcheck_test.py | 15 +++-
src/cockpit/389-console/package-lock.json | 36 ++++-----
src/lib389/lib389/backend.py | 4 +-
src/lib389/lib389/config.py | 24 +++++-
src/lib389/lib389/lint.py | 32 +++++++-
6 files changed, 163 insertions(+), 28 deletions(-)
diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
index f0337f198..6d3d08bfa 100644
--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py
+++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2022 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -11,7 +11,7 @@ import pytest
import os
import subprocess
-from lib389.backend import Backends
+from lib389.backend import Backends, DatabaseConfig
from lib389.cos import CosTemplates, CosPointerDefinitions
from lib389.dbgen import dbgen_users
from lib389.idm.account import Accounts
@@ -119,6 +119,7 @@ def test_healthcheck_logging_format_should_be_revised(topology_st):
log.info('Set nsslapd-logging-hr-timestamps-enabled to off')
standalone.config.set('nsslapd-logging-hr-timestamps-enabled', 'off')
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
@@ -364,6 +365,7 @@ def test_healthcheck_low_disk_space(topology_st):
RET_CODE = 'DSDSLE0001'
standalone = topology_st.standalone
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
file = '{}/foo'.format(standalone.ds_paths.log_dir)
log.info('Count the disk space to allocate')
@@ -411,10 +413,13 @@ def test_healthcheck_notes_unindexed_search(topology_st, setup_ldif):
standalone = topology_st.standalone
log.info('Delete the previous access logs')
- topology_st.standalone.deleteAccessLogs()
+ standalone.deleteAccessLogs()
log.info('Set nsslapd-accesslog-logbuffering to off')
standalone.config.set("nsslapd-accesslog-logbuffering", "off")
+ db_cfg = DatabaseConfig(standalone)
+ db_cfg.set([('nsslapd-idlistscanlimit', '100')])
+
log.info('Stopping the server and running offline import...')
standalone.stop()
@@ -429,6 +434,8 @@ def test_healthcheck_notes_unindexed_search(topology_st, setup_ldif):
log.info('Check that access log contains "notes=A"')
assert standalone.ds_access_log.match(r'.*notes=A.*')
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
+
run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False)
run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True)
@@ -464,6 +471,8 @@ def test_healthcheck_notes_unknown_attribute(topology_st, setup_ldif):
log.info('Set nsslapd-accesslog-logbuffering to off')
standalone.config.set("nsslapd-accesslog-logbuffering", "off")
+ db_cfg = DatabaseConfig(standalone)
+ db_cfg.set([('nsslapd-idlistscanlimit', '100')])
log.info('Stopping the server and running offline import...')
standalone.stop()
@@ -478,9 +487,74 @@ def test_healthcheck_notes_unknown_attribute(topology_st, setup_ldif):
log.info('Check that access log contains "notes=F"')
assert standalone.ds_access_log.match(r'.*notes=F.*')
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False)
run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True)
+def test_healthcheck_unauth_binds(topology_st):
+ """Check if HealthCheck returns DSCLE0003 code when unauthorized binds are
+ allowed
+
+ :id: 13b88a3b-0dc5-4ce9-9fbf-058ad072339b
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Set nsslapd-allow-unauthenticated-binds to on
+ 3. Use HealthCheck without --json option
+ 4. Use HealthCheck with --json option
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Healthcheck reports DSCLE0003
+ 4. Healthcheck reports DSCLE0003
+ """
+
+ RET_CODE = 'DSCLE0003'
+
+ inst = topology_st.standalone
+
+ log.info('nsslapd-allow-unauthenticated-binds to on')
+ inst.config.set("nsslapd-allow-unauthenticated-binds", "on")
+
+ run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=False)
+ run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=True)
+
+ # reset setting
+ log.info('Reset nsslapd-allow-unauthenticated-binds to off')
+ inst.config.set("nsslapd-allow-unauthenticated-binds", "off")
+
+def test_healthcheck_accesslog_buffering(topology_st):
+ """Check if HealthCheck returns DSCLE0004 code when acccess log biffering
+ is disabled
+
+ :id: 5a6512fd-1c7b-4557-9278-45150423148b
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Set nsslapd-accesslog-logbuffering to off
+ 3. Use HealthCheck without --json option
+ 4. Use HealthCheck with --json option
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Healthcheck reports DSCLE0004
+ 4. Healthcheck reports DSCLE0004
+ """
+
+ RET_CODE = 'DSCLE0004'
+
+ inst = topology_st.standalone
+
+ log.info('nsslapd-accesslog-logbuffering to off')
+ inst.config.set("nsslapd-accesslog-logbuffering", "off")
+
+ run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=False)
+ run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=True)
+
+ # reset setting
+ log.info('Reset nsslapd-accesslog-logbuffering to on')
+ inst.config.set("nsslapd-accesslog-logbuffering", "on")
+
if __name__ == '__main__':
# Run isolated
diff --git a/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py b/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py
index 1c83b53ff..83b529024 100644
--- a/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py
+++ b/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2020 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -91,6 +91,7 @@ def test_healthcheck_disabled_suffix(topology_st):
mts = MappingTrees(topology_st.standalone)
mt = mts.get(DEFAULT_SUFFIX)
mt.replace("nsslapd-state", "disabled")
+ topology_st.standalone.config.set("nsslapd-accesslog-logbuffering", "on")
run_healthcheck_and_flush_log(topology_st, topology_st.standalone, RET_CODE, json=False)
run_healthcheck_and_flush_log(topology_st, topology_st.standalone, RET_CODE, json=True)
@@ -187,6 +188,8 @@ def test_healthcheck_list_errors(topology_st):
'DSCERTLE0002 :: Certificate expired',
'DSCLE0001 :: Different log timestamp format',
'DSCLE0002 :: Weak passwordStorageScheme',
+ 'DSCLE0003 :: Unauthorized Binds Allowed',
+ 'DSCLE0004 :: Access Log buffering disabled',
'DSCLLE0001 :: Changelog trimming not configured',
'DSDSLE0001 :: Low disk space',
'DSELE0001 :: Weak TLS protocol version',
@@ -231,6 +234,8 @@ def test_healthcheck_check_option(topology_st):
output_list = ['config:hr_timestamp',
'config:passwordscheme',
+ # 'config:accesslog_buffering', Skip test access log buffering is disabled
+ 'config:unauth_binds',
'backends:userroot:mappingtree',
'backends:userroot:search',
'backends:userroot:virt_attrs',
@@ -238,9 +243,11 @@ def test_healthcheck_check_option(topology_st):
'fschecks:file_perms',
'refint:attr_indexes',
'refint:update_delay',
+ 'memberof:member_attr_indexes',
'monitor-disk-space:disk_space',
'replication:agmts_status',
'replication:conflicts',
+ 'replication:no_ruv',
'dseldif:nsstate',
'tls:certificate_expiration',
'logs:notes']
@@ -308,6 +315,8 @@ def test_healthcheck_replication(topology_m2):
# If we don't set changelog trimming, we will get error DSCLLE0001
set_changelog_trimming(M1)
set_changelog_trimming(M2)
+ M1.config.set("nsslapd-accesslog-logbuffering", "on")
+ M2.config.set("nsslapd-accesslog-logbuffering", "on")
log.info('Run healthcheck for supplier1')
run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False)
@@ -347,6 +356,8 @@ def test_healthcheck_replication_tls(topology_m2):
M2.enable_tls()
log.info('Run healthcheck for supplier1')
+ M1.config.set("nsslapd-accesslog-logbuffering", "on")
+ M2.config.set("nsslapd-accesslog-logbuffering", "on")
run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False)
run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True)
@@ -399,7 +410,7 @@ def test_healthcheck_backend_missing_mapping_tree(topology_st):
mts.create(properties={
'cn': DEFAULT_SUFFIX,
'nsslapd-state': 'backend',
- 'nsslapd-backend': 'userRoot',
+ 'nsslapd-backend': 'USERROOT',
})
run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False)
diff --git a/src/cockpit/389-console/package-lock.json b/src/cockpit/389-console/package-lock.json
index 787cc6b7a..cf2b4665c 100644
--- a/src/cockpit/389-console/package-lock.json
+++ b/src/cockpit/389-console/package-lock.json
@@ -2657,9 +2657,9 @@
}
},
"node_modules/audit-ci/node_modules/semver": {
- "version": "7.3.8",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz",
- "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==",
+ "version": "7.5.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
+ "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
@@ -3240,9 +3240,9 @@
}
},
"node_modules/css-loader/node_modules/semver": {
- "version": "7.3.8",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz",
- "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==",
+ "version": "7.5.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
+ "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
@@ -4469,9 +4469,9 @@
}
},
"node_modules/eslint/node_modules/semver": {
- "version": "7.3.8",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz",
- "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==",
+ "version": "7.5.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
+ "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
"dependencies": {
"lru-cache": "^6.0.0"
},
@@ -10677,9 +10677,9 @@
}
},
"semver": {
- "version": "7.3.8",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz",
- "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==",
+ "version": "7.5.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
+ "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
@@ -11097,9 +11097,9 @@
}
},
"semver": {
- "version": "7.3.8",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz",
- "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==",
+ "version": "7.5.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
+ "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
@@ -11699,9 +11699,9 @@
}
},
"semver": {
- "version": "7.3.8",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz",
- "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==",
+ "version": "7.5.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
+ "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
"requires": {
"lru-cache": "^6.0.0"
}
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
index d1094aa61..9acced205 100644
--- a/src/lib389/lib389/backend.py
+++ b/src/lib389/lib389/backend.py
@@ -498,11 +498,11 @@ class Backend(DSLdapObject):
* missing indices if we are local and have log access?
"""
# Check for the missing mapping tree.
- suffix = self.get_attr_val_utf8('nsslapd-suffix')
+ suffix = self.get_attr_val_utf8_l('nsslapd-suffix')
bename = self.lint_uid()
try:
mt = self._mts.get(suffix)
- if mt.get_attr_val_utf8('nsslapd-backend') != bename and mt.get_attr_val_utf8('nsslapd-state') != 'backend':
+ if mt.get_attr_val_utf8_l('nsslapd-backend') != bename.lower() and mt.get_attr_val_utf8('nsslapd-state') != 'backend':
raise ldap.NO_SUCH_OBJECT("We have a matching suffix, but not a backend or correct database name.")
except ldap.NO_SUCH_OBJECT:
result = DSBLE0001
diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py
index 00d38463a..b1a474ebe 100644
--- a/src/lib389/lib389/config.py
+++ b/src/lib389/lib389/config.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2020 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -22,7 +22,9 @@ from lib389._constants import *
from lib389 import Entry
from lib389._mapped_object import DSLdapObject
from lib389.utils import ensure_bytes, selinux_label_port, selinux_present
-from lib389.lint import DSCLE0001, DSCLE0002, DSELE0001
+from lib389.lint import (
+ DSCLE0001, DSCLE0002, DSCLE0003, DSCLE0004, DSELE0001
+)
class Config(DSLdapObject):
"""
@@ -218,6 +220,24 @@ class Config(DSLdapObject):
report['check'] = "config:passwordscheme"
yield report
+ def _lint_unauth_binds(self):
+ # Allow unauthenticated binds
+ unauthbinds = self.get_attr_val_utf8_l('nsslapd-allow-unauthenticated-binds')
+ if unauthbinds == "on":
+ report = copy.deepcopy(DSCLE0003)
+ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
+ report['check'] = "config:unauthorizedbinds"
+ yield report
+
+ def _lint_accesslog_buffering(self):
+ # access log buffering
+ buffering = self.get_attr_val_utf8_l('nsslapd-accesslog-logbuffering')
+ if buffering == "off":
+ report = copy.deepcopy(DSCLE0004)
+ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
+ report['check'] = "config:accesslogbuffering"
+ yield report
+
def disable_plaintext_port(self):
"""
Configure the server to not-provide the plaintext port.
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
index 0219801f4..7ca524315 100644
--- a/src/lib389/lib389/lint.py
+++ b/src/lib389/lib389/lint.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2022 Red Hat, Inc.
+# Copyright (C) 2023 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -113,6 +113,36 @@ You can also use 'dsconf' to replace these values. Here is an example:
# dsconf slapd-YOUR_INSTANCE config replace passwordStorageScheme=PBKDF2-SHA512 nsslapd-rootpwstoragescheme=PBKDF2-SHA512"""
}
+DSCLE0003 = {
+ 'dsle': 'DSCLE0003',
+ 'severity': 'MEDIUM',
+ 'description': 'Unauthorized Binds Allowed',
+ 'items': ['cn=config', ],
+ 'detail': """nsslapd-allow-unauthenticated-binds is set to 'on' this can
+lead to unexpected results with clients and potential security issues
+""",
+ 'fix': """Set nsslapd-allow-unauthenticated-binds to off.
+You can use 'dsconf' to set this attribute. Here is an example:
+
+ # dsconf slapd-YOUR_INSTANCE config replace nsslapd-allow-unauthenticated-binds=off"""
+}
+
+DSCLE0004 = {
+ 'dsle': 'DSCLE0004',
+ 'severity': 'LOW',
+ 'description': 'Access Log buffering disabled',
+ 'items': ['cn=config', ],
+ 'detail': """nsslapd-accesslog-logbuffering is set to 'off' this will cause high
+disk IO and can significantly impact server performance. This should only be used
+for debug purposes
+""",
+ 'fix': """Set nsslapd-accesslog-logbuffering to 'on'.
+You can use 'dsconf' to set this attribute. Here is an example:
+
+ # dsconf slapd-YOUR_INSTANCE config replace nsslapd-accesslog-logbuffering=on
+"""
+}
+
# Security checks
DSELE0001 = {
'dsle': 'DSELE0001',
--
2.41.0

@ -0,0 +1,74 @@
From a490d2428223b6409c0067ddb21936a1a7797cdd Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 5 Jul 2023 13:52:50 -0400
Subject: [PATCH 04/11] Issue 5825 - healthcheck - password storage scheme
warning needs more info
Description: Add the current/insecure scheme to the report, and state which
config setting is insecure.
relates: https://github.com/389ds/389-ds-base/issues/5825
Reviewed by: jchapman & spichugi(Thanks!!)
---
src/lib389/lib389/config.py | 13 ++++++++++++-
src/lib389/lib389/lint.py | 10 +++-------
2 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py
index b1a474ebe..81bf8ec66 100644
--- a/src/lib389/lib389/config.py
+++ b/src/lib389/lib389/config.py
@@ -214,9 +214,20 @@ class Config(DSLdapObject):
allowed_schemes = ['PBKDF2-SHA512', 'PBKDF2_SHA256', 'PBKDF2_SHA512', 'GOST_YESCRYPT']
u_password_scheme = self.get_attr_val_utf8('passwordStorageScheme')
u_root_scheme = self.get_attr_val_utf8('nsslapd-rootpwstoragescheme')
- if u_root_scheme not in allowed_schemes or u_password_scheme not in allowed_schemes:
+ if u_root_scheme not in allowed_schemes:
report = copy.deepcopy(DSCLE0002)
+ report['detail'] = report['detail'].replace('SCHEME', u_root_scheme)
+ report['detail'] = report['detail'].replace('CONFIG', 'nsslapd-rootpwstoragescheme')
report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
+ report['fix'] = report['fix'].replace('CONFIG', 'nsslapd-rootpwstoragescheme')
+ report['check'] = "config:passwordscheme"
+ yield report
+ if u_password_scheme not in allowed_schemes:
+ report = copy.deepcopy(DSCLE0002)
+ report['detail'] = report['detail'].replace('SCHEME', u_password_scheme)
+ report['detail'] = report['detail'].replace('CONFIG', 'passwordStorageScheme')
+ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
+ report['fix'] = report['fix'].replace('CONFIG', 'passwordStorageScheme')
report['check'] = "config:passwordscheme"
yield report
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
index 7ca524315..475ab08bd 100644
--- a/src/lib389/lib389/lint.py
+++ b/src/lib389/lib389/lint.py
@@ -97,20 +97,16 @@ verify, as this adds a cost of work to an attacker.
In Directory Server, we offer one hash suitable for this (PBKDF2-SHA512) and one hash
for "legacy" support (SSHA512).
-Your configuration does not use these for password storage or the root password storage
-scheme.
+Your configured scheme (SCHEME) for 'CONFIG' is not secure
""",
'fix': """Perform a configuration reset of the values:
-passwordStorageScheme
-nsslapd-rootpwstoragescheme
-
-IE, stop Directory Server, and in dse.ldif delete these two lines. When Directory Server
+IE, stop Directory Server, and in dse.ldif delete this line (CONFIG). When Directory Server
is started, they will use the server provided defaults that are secure.
You can also use 'dsconf' to replace these values. Here is an example:
- # dsconf slapd-YOUR_INSTANCE config replace passwordStorageScheme=PBKDF2-SHA512 nsslapd-rootpwstoragescheme=PBKDF2-SHA512"""
+ # dsconf slapd-YOUR_INSTANCE config replace CONFIG=PBKDF2-SHA512"""
}
DSCLE0003 = {
--
2.41.0

@ -0,0 +1,615 @@
From b9c26c9c21e4c4b81ebdc883873bb2f571da24c3 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Tue, 18 Jul 2023 11:17:07 +0200
Subject: [PATCH 05/11] Issue 4551 - Paged search impacts performance (#5838)
Problem:
Having a script looping doing a search with paged result impact greatly the performance of other clients
(for example ldclt bind+search rate decreased by 80% in the test case)
Cause:
Page result field in connection were protected by the connection mutex that is also used by the listener thread, in some cases this cause contention that delays the handling of new operations
Solution:
Do not rely on the connection mutex to protect the page result context but on a dedicated array of locks.
(cherry picked from commit 3c510e0a26e321949b552b5e8c887634d9d7e63e)
---
ldap/servers/slapd/daemon.c | 1 +
ldap/servers/slapd/main.c | 2 +
ldap/servers/slapd/opshared.c | 26 +++---
ldap/servers/slapd/pagedresults.c | 134 ++++++++++++++++++------------
ldap/servers/slapd/proto-slap.h | 3 +
5 files changed, 101 insertions(+), 65 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 9eed67892..388fa0943 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1367,6 +1367,7 @@ slapd_daemon(daemon_ports_t *ports)
slapi_log_err(SLAPI_LOG_TRACE, "slapd_daemon",
"slapd shutting down - waiting for backends to close down\n");
+ pageresult_lock_cleanup();
eq_stop(); /* deprecated */
eq_stop_rel();
if (!in_referral_mode) {
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index fe7f74e91..ed2de90b1 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -989,6 +989,7 @@ main(int argc, char **argv)
eq_init_rel(); /* must be done before plugins started */
ps_init_psearch_system(); /* must come before plugin_startall() */
+ pageresult_lock_init();
/* initialize UniqueID generator - must be done once backends are started
@@ -2214,6 +2215,7 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg)
eq_init_rel(); /* must be done before plugins started */
ps_init_psearch_system(); /* must come before plugin_startall() */
+ pageresult_lock_init();
plugin_startall(argc, argv, plugin_list);
eq_start(); /* must be done after plugins started - DEPRECATED*/
eq_start_rel(); /* must be done after plugins started */
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 905a81f0f..897b9566e 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -271,6 +271,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
int pr_idx = -1;
Slapi_DN *orig_sdn = NULL;
int free_sdn = 0;
+ pthread_mutex_t *pagedresults_mutex = NULL;
be_list[0] = NULL;
referral_list[0] = NULL;
@@ -577,6 +578,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
int32_t tlimit;
slapi_pblock_get(pb, SLAPI_SEARCH_TIMELIMIT, &tlimit);
pagedresults_set_timelimit(pb_conn, operation, (time_t)tlimit, pr_idx);
+ pagedresults_mutex = pageresult_lock_get_addr(pb_conn);
}
/*
@@ -694,10 +696,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
/* PAGED RESULTS and already have the search results from the prev op */
pagedresults_lock(pb_conn, pr_idx);
/*
- * In async paged result case, the search result might be released
- * by other theads. We need to double check it in the locked region.
- */
- pthread_mutex_lock(&(pb_conn->c_mutex));
+ * In async paged result case, the search result might be released
+ * by other theads. We need to double check it in the locked region.
+ */
+ pthread_mutex_lock(pagedresults_mutex);
pr_search_result = pagedresults_get_search_result(pb_conn, operation, 1 /*locked*/, pr_idx);
if (pr_search_result) {
if (pagedresults_is_abandoned_or_notavailable(pb_conn, 1 /*locked*/, pr_idx)) {
@@ -705,7 +707,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
/* Previous operation was abandoned and the simplepaged object is not in use. */
send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
rc = LDAP_SUCCESS;
- pthread_mutex_unlock(&(pb_conn->c_mutex));
+ pthread_mutex_unlock(pagedresults_mutex);
goto free_and_return;
} else {
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, pr_search_result);
@@ -719,7 +721,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
pr_stat = PAGEDRESULTS_SEARCH_END;
rc = LDAP_SUCCESS;
}
- pthread_mutex_unlock(&(pb_conn->c_mutex));
+ pthread_mutex_unlock(pagedresults_mutex);
pagedresults_unlock(pb_conn, pr_idx);
if ((PAGEDRESULTS_SEARCH_END == pr_stat) || (0 == pnentries)) {
@@ -844,10 +846,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
/* PAGED RESULTS */
if (op_is_pagedresults(operation)) {
/* cleanup the slot */
- pthread_mutex_lock(&(pb_conn->c_mutex));
+ pthread_mutex_lock(pagedresults_mutex);
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
- pthread_mutex_unlock(&(pb_conn->c_mutex));
+ pthread_mutex_unlock(pagedresults_mutex);
}
if (1 == flag_no_such_object) {
break;
@@ -888,11 +890,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
if ((PAGEDRESULTS_SEARCH_END == pr_stat) || (0 == pnentries)) {
/* no more entries, but at least another backend */
- pthread_mutex_lock(&(pb_conn->c_mutex));
+ pthread_mutex_lock(pagedresults_mutex);
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
be->be_search_results_release(&sr);
rc = pagedresults_set_current_be(pb_conn, next_be, pr_idx, 1);
- pthread_mutex_unlock(&(pb_conn->c_mutex));
+ pthread_mutex_unlock(pagedresults_mutex);
pr_stat = PAGEDRESULTS_SEARCH_END; /* make sure stat is SEARCH_END */
if (NULL == next_be) {
/* no more entries && no more backends */
@@ -920,9 +922,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
next_be = NULL; /* to break the loop */
if (operation->o_status & SLAPI_OP_STATUS_ABANDONED) {
/* It turned out this search was abandoned. */
- pthread_mutex_lock(&(pb_conn->c_mutex));
+ pthread_mutex_lock(pagedresults_mutex);
pagedresults_free_one_msgid_nolock(pb_conn, operation->o_msgid);
- pthread_mutex_unlock(&(pb_conn->c_mutex));
+ pthread_mutex_unlock(pagedresults_mutex);
/* paged-results-request was abandoned; making an empty cookie. */
pagedresults_set_response_control(pb, 0, estimate, -1, pr_idx);
send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
index e3444e944..01fe3370f 100644
--- a/ldap/servers/slapd/pagedresults.c
+++ b/ldap/servers/slapd/pagedresults.c
@@ -12,6 +12,34 @@
#include "slap.h"
+#define LOCK_HASH_SIZE 997 /* Should be a prime number */
+
+static pthread_mutex_t *lock_hash = NULL;
+
+void
+pageresult_lock_init()
+{
+ lock_hash = (pthread_mutex_t *)slapi_ch_calloc(LOCK_HASH_SIZE, sizeof(pthread_mutex_t));
+ for (size_t i=0; i<LOCK_HASH_SIZE; i++) {
+ pthread_mutex_init(&lock_hash[i], NULL);
+ }
+}
+
+void
+pageresult_lock_cleanup()
+{
+ for (size_t i=0; i<LOCK_HASH_SIZE; i++) {
+ pthread_mutex_destroy(&lock_hash[i]);
+ }
+ slapi_ch_free((void**)&lock_hash);
+}
+
+pthread_mutex_t *
+pageresult_lock_get_addr(Connection *conn)
+{
+ return &lock_hash[(((size_t)conn)/sizeof (Connection))%LOCK_HASH_SIZE];
+}
+
/* helper function to clean up one prp slot */
static void
_pr_cleanup_one_slot(PagedResults *prp)
@@ -98,7 +126,7 @@ pagedresults_parse_control_value(Slapi_PBlock *pb,
return LDAP_UNWILLING_TO_PERFORM;
}
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
/* the ber encoding is no longer needed */
ber_free(ber, 1);
if (cookie.bv_len <= 0) {
@@ -206,7 +234,7 @@ bail:
}
}
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_parse_control_value",
"<= idx %d\n", *index);
@@ -300,7 +328,7 @@ pagedresults_free_one(Connection *conn, Operation *op, int index)
slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one",
"=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (conn->c_pagedresults.prl_count <= 0) {
slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one",
"conn=%" PRIu64 " paged requests list count is %d\n",
@@ -311,7 +339,7 @@ pagedresults_free_one(Connection *conn, Operation *op, int index)
conn->c_pagedresults.prl_count--;
rc = 0;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one", "<= %d\n", rc);
@@ -319,7 +347,7 @@ pagedresults_free_one(Connection *conn, Operation *op, int index)
}
/*
- * Used for abandoning - conn->c_mutex is already locked in do_abandone.
+ * Used for abandoning - pageresult_lock_get_addr(conn) is already locked in do_abandone.
*/
int
pagedresults_free_one_msgid_nolock(Connection *conn, ber_int_t msgid)
@@ -363,11 +391,11 @@ pagedresults_get_current_be(Connection *conn, int index)
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_current_be", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
be = conn->c_pagedresults.prl_list[index].pr_current_be;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_current_be", "<= %p\n", be);
@@ -382,13 +410,13 @@ pagedresults_set_current_be(Connection *conn, Slapi_Backend *be, int index, int
"pagedresults_set_current_be", "=> idx=%d\n", index);
if (conn && (index > -1)) {
if (!nolock)
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
conn->c_pagedresults.prl_list[index].pr_current_be = be;
}
rc = 0;
if (!nolock)
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_set_current_be", "<= %d\n", rc);
@@ -407,13 +435,13 @@ pagedresults_get_search_result(Connection *conn, Operation *op, int locked, int
locked ? "locked" : "not locked", index);
if (conn && (index > -1)) {
if (!locked) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
}
if (index < conn->c_pagedresults.prl_maxlen) {
sr = conn->c_pagedresults.prl_list[index].pr_search_result_set;
}
if (!locked) {
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
}
slapi_log_err(SLAPI_LOG_TRACE,
@@ -433,7 +461,7 @@ pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, int lo
index, sr);
if (conn && (index > -1)) {
if (!locked)
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
PagedResults *prp = conn->c_pagedresults.prl_list + index;
if (!(prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED) || !sr) {
@@ -443,7 +471,7 @@ pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, int lo
rc = 0;
}
if (!locked)
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_set_search_result", "=> %d\n", rc);
@@ -460,11 +488,11 @@ pagedresults_get_search_result_count(Connection *conn, Operation *op, int index)
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_search_result_count", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
count = conn->c_pagedresults.prl_list[index].pr_search_result_count;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_search_result_count", "<= %d\n", count);
@@ -481,11 +509,11 @@ pagedresults_set_search_result_count(Connection *conn, Operation *op, int count,
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_set_search_result_count", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
conn->c_pagedresults.prl_list[index].pr_search_result_count = count;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
rc = 0;
}
slapi_log_err(SLAPI_LOG_TRACE,
@@ -506,11 +534,11 @@ pagedresults_get_search_result_set_size_estimate(Connection *conn,
"pagedresults_get_search_result_set_size_estimate",
"=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
count = conn->c_pagedresults.prl_list[index].pr_search_result_set_size_estimate;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_search_result_set_size_estimate", "<= %d\n",
@@ -532,11 +560,11 @@ pagedresults_set_search_result_set_size_estimate(Connection *conn,
"pagedresults_set_search_result_set_size_estimate",
"=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
conn->c_pagedresults.prl_list[index].pr_search_result_set_size_estimate = count;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
rc = 0;
}
slapi_log_err(SLAPI_LOG_TRACE,
@@ -555,11 +583,11 @@ pagedresults_get_with_sort(Connection *conn, Operation *op, int index)
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_with_sort", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
flags = conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_WITH_SORT;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_with_sort", "<= %d\n", flags);
@@ -576,14 +604,14 @@ pagedresults_set_with_sort(Connection *conn, Operation *op, int flags, int index
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_set_with_sort", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
if (flags & OP_FLAG_SERVER_SIDE_SORTING) {
conn->c_pagedresults.prl_list[index].pr_flags |=
CONN_FLAG_PAGEDRESULTS_WITH_SORT;
}
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
rc = 0;
}
slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_with_sort", "<= %d\n", rc);
@@ -600,11 +628,11 @@ pagedresults_get_unindexed(Connection *conn, Operation *op, int index)
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_unindexed", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
flags = conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_UNINDEXED;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_unindexed", "<= %d\n", flags);
@@ -621,12 +649,12 @@ pagedresults_set_unindexed(Connection *conn, Operation *op, int index)
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_set_unindexed", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
conn->c_pagedresults.prl_list[index].pr_flags |=
CONN_FLAG_PAGEDRESULTS_UNINDEXED;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
rc = 0;
}
slapi_log_err(SLAPI_LOG_TRACE,
@@ -644,11 +672,11 @@ pagedresults_get_sort_result_code(Connection *conn, Operation *op, int index)
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_sort_result_code", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
code = conn->c_pagedresults.prl_list[index].pr_sort_result_code;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_get_sort_result_code", "<= %d\n", code);
@@ -665,11 +693,11 @@ pagedresults_set_sort_result_code(Connection *conn, Operation *op, int code, int
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_set_sort_result_code", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
conn->c_pagedresults.prl_list[index].pr_sort_result_code = code;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
rc = 0;
}
slapi_log_err(SLAPI_LOG_TRACE,
@@ -687,11 +715,11 @@ pagedresults_set_timelimit(Connection *conn, Operation *op, time_t timelimit, in
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_set_timelimit", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
slapi_timespec_expire_at(timelimit, &(conn->c_pagedresults.prl_list[index].pr_timelimit_hr));
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
rc = 0;
}
slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_timelimit", "<= %d\n", rc);
@@ -746,7 +774,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
}
if (needlock) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
}
for (i = 0; conn->c_pagedresults.prl_list &&
i < conn->c_pagedresults.prl_maxlen;
@@ -765,7 +793,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
}
conn->c_pagedresults.prl_count = 0;
if (needlock) {
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
/* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */
return rc;
@@ -792,7 +820,7 @@ pagedresults_cleanup_all(Connection *conn, int needlock)
}
if (needlock) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
}
for (i = 0; conn->c_pagedresults.prl_list &&
i < conn->c_pagedresults.prl_maxlen;
@@ -812,7 +840,7 @@ pagedresults_cleanup_all(Connection *conn, int needlock)
conn->c_pagedresults.prl_maxlen = 0;
conn->c_pagedresults.prl_count = 0;
if (needlock) {
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup_all", "<= %d\n", rc);
return rc;
@@ -831,7 +859,7 @@ pagedresults_check_or_set_processing(Connection *conn, int index)
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_check_or_set_processing", "=>\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
ret = (conn->c_pagedresults.prl_list[index].pr_flags &
CONN_FLAG_PAGEDRESULTS_PROCESSING);
@@ -839,7 +867,7 @@ pagedresults_check_or_set_processing(Connection *conn, int index)
conn->c_pagedresults.prl_list[index].pr_flags |=
CONN_FLAG_PAGEDRESULTS_PROCESSING;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_check_or_set_processing", "<= %d\n", ret);
@@ -858,7 +886,7 @@ pagedresults_reset_processing(Connection *conn, int index)
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_reset_processing", "=> idx=%d\n", index);
if (conn && (index > -1)) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
ret = (conn->c_pagedresults.prl_list[index].pr_flags &
CONN_FLAG_PAGEDRESULTS_PROCESSING);
@@ -866,7 +894,7 @@ pagedresults_reset_processing(Connection *conn, int index)
conn->c_pagedresults.prl_list[index].pr_flags &=
~CONN_FLAG_PAGEDRESULTS_PROCESSING;
}
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
slapi_log_err(SLAPI_LOG_TRACE,
"pagedresults_reset_processing", "<= %d\n", ret);
@@ -885,7 +913,7 @@ pagedresults_reset_processing(Connection *conn, int index)
* Do not return timed out here. But let the next request take care the
* timedout slot(s).
*
- * must be called within conn->c_mutex
+ * must be called within pageresult_lock_get_addr(conn)
*/
int
pagedresults_is_timedout_nolock(Connection *conn)
@@ -912,7 +940,7 @@ pagedresults_is_timedout_nolock(Connection *conn)
/*
* reset all timeout
- * must be called within conn->c_mutex
+ * must be called within pageresult_lock_get_addr(conn)
*/
int
pagedresults_reset_timedout_nolock(Connection *conn)
@@ -977,9 +1005,9 @@ pagedresults_lock(Connection *conn, int index)
if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) {
return;
}
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
prp = conn->c_pagedresults.prl_list + index;
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
if (prp->pr_mutex) {
PR_Lock(prp->pr_mutex);
}
@@ -993,9 +1021,9 @@ pagedresults_unlock(Connection *conn, int index)
if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) {
return;
}
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
prp = conn->c_pagedresults.prl_list + index;
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
if (prp->pr_mutex) {
PR_Unlock(prp->pr_mutex);
}
@@ -1010,11 +1038,11 @@ pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int inde
return 1; /* not abandoned, but do not want to proceed paged results op. */
}
if (!locked) {
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
}
prp = conn->c_pagedresults.prl_list + index;
if (!locked) {
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
}
@@ -1039,13 +1067,13 @@ pagedresults_set_search_result_pb(Slapi_PBlock *pb, void *sr, int locked)
"pagedresults_set_search_result_pb", "=> idx=%d, sr=%p\n", index, sr);
if (conn && (index > -1)) {
if (!locked)
- pthread_mutex_lock(&(conn->c_mutex));
+ pthread_mutex_lock(pageresult_lock_get_addr(conn));
if (index < conn->c_pagedresults.prl_maxlen) {
conn->c_pagedresults.prl_list[index].pr_search_result_set = sr;
rc = 0;
}
if (!locked) {
- pthread_mutex_unlock(&(conn->c_mutex));
+ pthread_mutex_unlock(pageresult_lock_get_addr(conn));
}
}
slapi_log_err(SLAPI_LOG_TRACE,
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index eba115a62..65efee854 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1547,6 +1547,9 @@ int slapd_do_all_nss_ssl_init(int slapd_exemode, int importexport_encrypt, int s
/*
* pagedresults.c
*/
+void pageresult_lock_init();
+void pageresult_lock_cleanup();
+pthread_mutex_t *pageresult_lock_get_addr(Connection *conn);
int pagedresults_parse_control_value(Slapi_PBlock *pb, struct berval *psbvp, ber_int_t *pagesize, int *index, Slapi_Backend *be);
void pagedresults_set_response_control(Slapi_PBlock *pb, int iscritical, ber_int_t estimate, int curr_search_count, int index);
Slapi_Backend *pagedresults_get_current_be(Connection *conn, int index);
--
2.41.0

@ -0,0 +1,517 @@
From 6053374b175e2fd8847282ceffb99854ec148324 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Thu, 20 Jul 2023 15:47:14 +0000
Subject: [PATCH 06/11] Issue 5804 - dtablesize being set to soft
maxfiledescriptor limit (#5806)
Bug Description: 389ds is not setting dtablesize properly based when systemd is setting
the maxfiledescriptors with it's default settings. dtablesize stays 1024 which causes
massive slowdown once you hit around 950 connection
Fix Description: dtablesize is set to the connection table size, this
commit sets the connection table size/dtablesize to the system max
file descriptor number less the reserve file descriptors.
relates: https://github.com/389ds/389-ds-base/issues/5804
Reviewed by: @tbordaz @progier389 (Thank you)
---
.../suites/resource_limits/fdlimits_test.py | 47 +++-
ldap/servers/slapd/conntable.c | 2 +
ldap/servers/slapd/daemon.c | 37 ++-
ldap/servers/slapd/libglobs.c | 257 ++++++++++++++----
ldap/servers/slapd/proto-slap.h | 6 +-
5 files changed, 283 insertions(+), 66 deletions(-)
diff --git a/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py
index 3b26e8cae..19854b01d 100644
--- a/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py
+++ b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py
@@ -11,6 +11,7 @@ import pytest
import os
import ldap
import resource
+from lib389.backend import Backends
from lib389._constants import *
from lib389.topologies import topology_st
from lib389.utils import ds_is_older, ensure_str
@@ -22,9 +23,11 @@ logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
FD_ATTR = "nsslapd-maxdescriptors"
+RESRV_FD_ATTR = "nsslapd-reservedescriptors"
GLOBAL_LIMIT = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
SYSTEMD_LIMIT = ensure_str(check_output("systemctl show -p LimitNOFILE dirsrv@standalone1".split(" ")).strip()).split('=')[1]
CUSTOM_VAL = str(int(SYSTEMD_LIMIT) - 10)
+RESRV_DESC_VAL = str(10)
TOO_HIGH_VAL = str(GLOBAL_LIMIT * 2)
TOO_HIGH_VAL2 = str(int(SYSTEMD_LIMIT) * 2)
TOO_LOW_VAL = "0"
@@ -74,7 +77,49 @@ def test_fd_limits(topology_st):
max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR)
assert max_fd == CUSTOM_VAL
- log.info("Test PASSED")
+ log.info("test_fd_limits PASSED")
+
+@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented")
+def test_reserve_descriptor_validation(topology_st):
+ """Test the reserve descriptor self check
+
+ :id: TODO
+ :setup: Standalone Instance
+ :steps:
+ 1. Set attr nsslapd-reservedescriptors to a low value of RESRV_DESC_VAL (10)
+ 2. Verify low value has been set
+ 3. Restart instance (On restart the reservedescriptor attr will be validated)
+ 4. Check updated value for nsslapd-reservedescriptors attr
+ :expectedresults:
+ 1. Success
+ 2. A value of RESRV_DESC_VAL (10) is returned
+ 3. Success
+ 4. A value of STANDALONE_INST_RESRV_DESCS (55) is returned
+ """
+
+ # Set nsslapd-reservedescriptors to a low value (RESRV_DESC_VAL:10)
+ topology_st.standalone.config.set(RESRV_FD_ATTR, RESRV_DESC_VAL)
+ resrv_fd = topology_st.standalone.config.get_attr_val_utf8(RESRV_FD_ATTR)
+ assert resrv_fd == RESRV_DESC_VAL
+
+ # An instance restart triggers a validation of the configured nsslapd-reservedescriptors attribute
+ topology_st.standalone.restart()
+
+ """
+ A standalone instance contains a single backend with default indexes
+ so we only check these. TODO add tests for repl, chaining, PTA, SSL
+ """
+ STANDALONE_INST_RESRV_DESCS = 20 # 20 = Reserve descriptor constant
+ backends = Backends(topology_st.standalone)
+ STANDALONE_INST_RESRV_DESCS += (len(backends.list()) * 4) # 4 = Backend descriptor constant
+ for be in backends.list() :
+ STANDALONE_INST_RESRV_DESCS += len(be.get_indexes().list())
+
+ # Varify reservedescriptors has been updated
+ resrv_fd = topology_st.standalone.config.get_attr_val_utf8(RESRV_FD_ATTR)
+ assert resrv_fd == str(STANDALONE_INST_RESRV_DESCS)
+
+ log.info("test_reserve_descriptor_validation PASSED")
if __name__ == '__main__':
diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
index feb9c0d75..5e6513880 100644
--- a/ldap/servers/slapd/conntable.c
+++ b/ldap/servers/slapd/conntable.c
@@ -138,6 +138,8 @@ connection_table_new(int table_size)
ct->conn_next_offset = 1;
ct->conn_free_offset = 1;
+ slapi_log_err(SLAPI_LOG_INFO, "connection_table_new", "conntablesize:%d\n", ct->size);
+
pthread_mutexattr_t monitor_attr = {0};
pthread_mutexattr_init(&monitor_attr);
pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE);
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 388fa0943..e5b7d6e06 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -82,6 +82,7 @@ PRFileDesc *signalpipe[2];
static int writesignalpipe = SLAPD_INVALID_SOCKET;
static int readsignalpipe = SLAPD_INVALID_SOCKET;
#define FDS_SIGNAL_PIPE 0
+#define MAX_LDAP_CONNS 64000
static PRThread *accept_thread_p = NULL;
static PRThread *disk_thread_p = NULL;
@@ -107,7 +108,7 @@ static PRFileDesc *tls_listener = NULL; /* Stashed tls listener for get_ssl_list
#define SLAPD_POLL_LISTEN_READY(xxflagsxx) (xxflagsxx & PR_POLL_READ)
-static int get_configured_connection_table_size(void);
+static int get_connection_table_size(void);
#ifdef RESOLVER_NEEDS_LOW_FILE_DESCRIPTORS
static void get_loopback_by_addr(void);
#endif
@@ -1063,7 +1064,11 @@ slapd_daemon(daemon_ports_t *ports)
PRIntervalTime pr_timeout = PR_MillisecondsToInterval(slapd_wakeup_timer);
uint64_t threads;
int in_referral_mode = config_check_referral_mode();
- int connection_table_size = get_configured_connection_table_size();
+ int connection_table_size = get_connection_table_size();
+ if (!connection_table_size) {
+ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", "Not enough available file descriuptors");
+ exit(1);
+ }
the_connection_table = connection_table_new(connection_table_size);
/*
@@ -2846,18 +2851,32 @@ catch_signals()
#endif /* HPUX */
static int
-get_configured_connection_table_size(void)
+get_connection_table_size(void)
{
- int size = config_get_conntablesize();
+ int size = 0;
+ int resrvdesc = 0;
int maxdesc = config_get_maxdescriptors();
- /*
- * Cap the table size at nsslapd-maxdescriptors.
- */
- if (maxdesc >= 0 && size > maxdesc) {
- size = maxdesc;
+ /* Validate configured reserve descriptors */
+ validate_num_config_reservedescriptors();
+
+ resrvdesc = config_get_reservedescriptors();
+ if (maxdesc > resrvdesc) {
+ size = (maxdesc - resrvdesc);
+ } else {
+ return 0;
+ }
+
+ /* Verify size does not exceed max num of conns */
+ if (size > MAX_LDAP_CONNS) {
+ size = MAX_LDAP_CONNS;
}
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ slapdFrontendConfig->conntablesize = size;
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+
return size;
}
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index f6c6b52a1..43f924947 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1670,13 +1670,6 @@ FrontendConfig_init(void)
cfg->groupevalnestlevel = SLAPD_DEFAULT_GROUPEVALNESTLEVEL;
cfg->snmp_index = SLAPD_DEFAULT_SNMP_INDEX;
cfg->SSLclientAuth = SLAPD_DEFAULT_SSLCLIENTAUTH;
-
-#ifdef USE_SYSCONF
- cfg->conntablesize = sysconf(_SC_OPEN_MAX);
-#else /* USE_SYSCONF */
- cfg->conntablesize = getdtablesize();
-#endif /* USE_SYSCONF */
-
init_accesscontrol = cfg->accesscontrol = LDAP_ON;
/* nagle triggers set/unset TCP_CORK setsockopt per operation
@@ -1689,7 +1682,6 @@ FrontendConfig_init(void)
init_return_exact_case = cfg->return_exact_case = LDAP_ON;
init_result_tweak = cfg->result_tweak = LDAP_OFF;
init_attrname_exceptions = cfg->attrname_exceptions = LDAP_OFF;
- cfg->reservedescriptors = SLAPD_DEFAULT_RESERVE_FDS;
cfg->useroc = slapi_ch_strdup("");
cfg->userat = slapi_ch_strdup("");
/* kexcoff: should not be initialized by default here
@@ -4830,43 +4822,13 @@ config_set_maxdescriptors(const char *attrname, char *value, char *errorbuf, int
int
config_set_conntablesize(const char *attrname, char *value, char *errorbuf, int apply)
{
- int retVal = LDAP_SUCCESS;
- long nValue = 0;
- int maxVal = 65535;
- char *endp = NULL;
- struct rlimit rlp;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
- if (config_value_is_null(attrname, value, errorbuf, 0)) {
- return LDAP_OPERATIONS_ERROR;
- }
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "User setting of %s attribute is disabled, server has auto calculated its value to %d.",
+ attrname, slapdFrontendConfig->conntablesize);
- if (0 == getrlimit(RLIMIT_NOFILE, &rlp)) {
- maxVal = (int)rlp.rlim_max;
- }
-
- errno = 0;
- nValue = strtol(value, &endp, 0);
-
- if (*endp != '\0' || errno == ERANGE || nValue < 1 || nValue > maxVal) {
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
- "%s: invalid value \"%s\", connection table size must range from 1 to %d (the current process maxdescriptors limit). "
- "Server will use a setting of %d.",
- attrname, value, maxVal, maxVal);
- if (nValue > maxVal) {
- nValue = maxVal;
- retVal = LDAP_UNWILLING_TO_PERFORM;
- } else {
- retVal = LDAP_OPERATIONS_ERROR;
- }
- }
-
- if (apply) {
- CFG_LOCK_WRITE(slapdFrontendConfig);
- slapdFrontendConfig->conntablesize = nValue;
- CFG_UNLOCK_WRITE(slapdFrontendConfig);
- }
- return retVal;
+ return LDAP_OPERATIONS_ERROR;
}
int
@@ -6293,6 +6255,19 @@ config_get_maxdescriptors(void)
return retVal;
}
+int
+config_get_conntablesize(void)
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ int retVal;
+
+ CFG_LOCK_READ(slapdFrontendConfig);
+ retVal = slapdFrontendConfig->conntablesize;
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+
+ return retVal;
+}
+
int
config_get_reservedescriptors()
{
@@ -6595,19 +6570,6 @@ config_get_referral_mode(void)
return ret;
}
-int
-config_get_conntablesize(void)
-{
- slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
- int retVal;
-
- CFG_LOCK_READ(slapdFrontendConfig);
- retVal = slapdFrontendConfig->conntablesize;
- CFG_UNLOCK_READ(slapdFrontendConfig);
-
- return retVal;
-}
-
/* return yes/no without actually copying the referral url
we don't worry about another thread changing this value
since we now return an integer */
@@ -9135,3 +9097,188 @@ invalid_sasl_mech(char *str)
/* Mechanism value is valid */
return 0;
}
+
+/*
+ * Check if the number of reserve descriptors satisfy the servers needs.
+ *
+ * 1) Calculate the number of reserve descriptors the server requires
+ * 2) Get the configured value for nsslapd-reservedescriptors
+ * 3) If the configured value is less than the calculated value, increase it
+ *
+ * The formula used here is taken from the RH DS 11 docs:
+ * nsslapd-reservedescriptor = 20 + (NldbmBackends * 4) + NglobalIndex +
+ * 8 ReplicationDescriptors + Nreplicas +
+ * NchainingBackends * nsOperationCOnnectionsLImit +
+ * 3 PTADescriptors + 5 SSLDescriptors
+ */
+int
+validate_num_config_reservedescriptors(void)
+{
+ #define RESRV_DESC_CONST 20
+ #define BE_DESC_CONST 4
+ #define REPL_DESC_CONST 8
+ #define PTA_DESC_CONST 3
+ #define SSL_DESC_CONST 5
+ Slapi_Attr *attr = NULL;
+ Slapi_Backend *be = NULL;
+ Slapi_DN sdn;
+ Slapi_Entry *entry = NULL;
+ Slapi_Entry **entries = NULL;
+ Slapi_PBlock *search_pb = NULL;
+ char *cookie = NULL;
+ char const *mt_str = NULL;
+ char *entry_str = NULL;
+ int rc = -1;
+ int num_backends = 0;
+ int num_repl_agmts = 0;
+ int num_chaining_backends = 0;
+ int chain_conn_limit = 0;
+ int calc_reservedesc = RESRV_DESC_CONST;
+ int config_reservedesc = config_get_reservedescriptors();
+
+ /* Get number of backends, multiplied by the backend descriptor constant */
+ for (be = slapi_get_first_backend(&cookie); be != NULL; be = slapi_get_next_backend(cookie)) {
+ entry_str = slapi_create_dn_string("cn=%s,cn=ldbm database,cn=plugins,cn=config", be->be_name);
+ if (NULL == entry_str) {
+ slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", "Failed to create backend dn string");
+ return -1;
+ }
+ slapi_sdn_init_dn_byref(&sdn, entry_str);
+ slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id());
+ if (entry) {
+ if (slapi_entry_attr_hasvalue(entry, "objectclass", "nsBackendInstance")) {
+ num_backends += 1;
+ }
+ }
+ slapi_entry_free(entry);
+ slapi_ch_free_string(&entry_str);
+ slapi_sdn_done(&sdn);
+ }
+ slapi_ch_free((void **)&cookie);
+ if (num_backends) {
+ calc_reservedesc += (num_backends * BE_DESC_CONST);
+ }
+
+ /* Get number of indexes for each backend and add to total */
+ for (be = slapi_get_first_backend(&cookie); be; be = slapi_get_next_backend(cookie)) {
+ entry_str = slapi_create_dn_string("cn=index,cn=%s,cn=ldbm database,cn=plugins,cn=config", be->be_name);
+ if (NULL == entry_str) {
+ slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", "Failed to create index dn string");
+ return -1;
+ }
+ slapi_sdn_init_dn_byref(&sdn, entry_str);
+ slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id());
+ if (entry) {
+ rc = slapi_entry_attr_find(entry, "numsubordinates", &attr);
+ if (LDAP_SUCCESS == rc) {
+ Slapi_Value *sval;
+ slapi_attr_first_value(attr, &sval);
+ if (sval != NULL) {
+ const struct berval *bval = slapi_value_get_berval(sval);
+ if (NULL != bval)
+ calc_reservedesc += atol(bval->bv_val);
+ }
+ }
+ }
+ slapi_entry_free(entry);
+ slapi_ch_free_string(&entry_str);
+ slapi_sdn_done(&sdn);
+ }
+ slapi_ch_free((void **)&cookie);
+
+ /* If replication is enabled add replication descriptor constant, plus the number of enabled repl agmts */
+ mt_str = slapi_get_mapping_tree_config_root();
+ if (NULL == mt_str) {
+ slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", "Failed to get mapping tree config string");
+ return -1;
+ }
+ search_pb = slapi_pblock_new();
+ slapi_search_internal_set_pb(search_pb, mt_str, LDAP_SCOPE_SUBTREE, "(objectClass=nsds5replicationagreement) nsds5ReplicaEnabled", NULL, 0, NULL, NULL, plugin_get_default_component_id(), 0);
+ slapi_search_internal_pb(search_pb);
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ if (LDAP_SUCCESS == rc) {
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ for (; *entries; ++entries) {
+ num_repl_agmts += 1;
+ }
+ if (num_repl_agmts) {
+ calc_reservedesc += REPL_DESC_CONST;
+ }
+ }
+ slapi_free_search_results_internal(search_pb);
+ slapi_pblock_destroy(search_pb);
+ calc_reservedesc += num_repl_agmts;
+
+ /* Get the operation connection limit from the default instance config */
+ entry_str = slapi_create_dn_string("cn=default instance config,cn=chaining database,cn=plugins,cn=config");
+ if (NULL == entry_str) {
+ slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", "Failed to create default chaining config dn string");
+ return -1;
+ }
+ slapi_sdn_init_dn_byref(&sdn, entry_str);
+ slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id());
+ if (entry) {
+ chain_conn_limit = slapi_entry_attr_get_int(entry, "nsoperationconnectionslimit");
+ }
+ slapi_entry_free(entry);
+ slapi_ch_free_string(&entry_str);
+ slapi_sdn_done(&sdn);
+
+ /* Get the number of chaining backends, multiplied by the chaining operation connection limit */
+ for (be = slapi_get_first_backend(&cookie); be; be = slapi_get_next_backend(cookie)) {
+ entry_str = slapi_create_dn_string("cn=%s,cn=chaining database,cn=plugins,cn=config", be->be_name);
+ if (NULL == entry_str) {
+ slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", "Failed to create chaining be dn string");
+ return -1;
+ }
+ slapi_sdn_init_dn_byref(&sdn, entry_str);
+ slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id());
+ if (entry) {
+ if (slapi_entry_attr_hasvalue(entry, "objectclass", "nsBackendInstance")) {
+ num_chaining_backends += 1;
+ }
+ }
+ slapi_entry_free(entry);
+ slapi_ch_free_string(&entry_str);
+ slapi_sdn_done(&sdn);
+ }
+ slapi_ch_free((void **)&cookie);
+ if (num_chaining_backends) {
+ calc_reservedesc += (num_chaining_backends * chain_conn_limit);
+ }
+
+ /* If PTA is enabled add the pass through auth descriptor constant */
+ entry_str = slapi_create_dn_string("cn=Pass Through Authentication,cn=plugins,cn=config");
+ if (NULL == entry_str) {
+ slapi_log_err(SLAPI_LOG_ERR, "validate_num_config_reservedescriptors", "Failed to create PTA dn string");
+ return -1;
+ }
+ slapi_sdn_init_dn_byref(&sdn, entry_str);
+ slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id());
+ if (entry) {
+ if (slapi_entry_attr_hasvalue(entry, "nsslapd-PluginEnabled", "on")) {
+ calc_reservedesc += PTA_DESC_CONST;
+ }
+ }
+ slapi_entry_free(entry);
+ slapi_ch_free_string(&entry_str);
+ slapi_sdn_done(&sdn);
+
+ /* If SSL is enabled add the SSL descriptor constant */;;
+ if (config_get_security()) {
+ calc_reservedesc += SSL_DESC_CONST;
+ }
+
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
+ char resrvdesc_str[SLAPI_DSE_RETURNTEXT_SIZE];
+ /* Are the configured reserve descriptors enough to satisfy the servers needs */
+ if (config_reservedesc < calc_reservedesc) {
+ PR_snprintf(resrvdesc_str, sizeof(resrvdesc_str), "%d", calc_reservedesc);
+ if (LDAP_SUCCESS == config_set_reservedescriptors(CONFIG_RESERVEDESCRIPTORS_ATTRIBUTE, resrvdesc_str, errorbuf, 1)) {
+ slapi_log_err(SLAPI_LOG_INFO, "validate_num_config_reservedescriptors",
+ "reserve descriptors changed from %d to %d\n", config_reservedesc, calc_reservedesc);
+ }
+ }
+
+ return (0);
+}
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 65efee854..47465c4c5 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -349,7 +349,7 @@ int config_set_useroc(const char *attrname, char *value, char *errorbuf, int app
int config_set_return_exact_case(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_result_tweak(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_referral_mode(const char *attrname, char *url, char *errorbuf, int apply);
-int config_set_conntablesize(const char *attrname, char *url, char *errorbuf, int apply);
+int config_set_conntablesize(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_maxbersize(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_maxsasliosize(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_versionstring(const char *attrname, char *versionstring, char *errorbuf, int apply);
@@ -645,6 +645,10 @@ int get_ldapmessage_controls_ext(Slapi_PBlock *pb, BerElement *ber, LDAPControl
int write_controls(BerElement *ber, LDAPControl **ctrls);
void add_control(LDAPControl ***ctrlsp, LDAPControl *newctrl);
+/*
+ * daemon.c
+ */
+int validate_num_config_reservedescriptors(void) ;
/*
* delete.c
--
2.41.0

@ -0,0 +1,45 @@
From 3f68ad6ff5aad54f42b51ff69bbc9cb465366096 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 20 Jul 2023 16:23:35 -0400
Subject: [PATCH 07/11] Issue 5825 - healthcheck - update allowed password
schemes for fips mode
Description:
In 1.4.3 in fips mode should allow SSHA512 as not all 1.4.3 released versions
support the new Rust password hashers.
relates: https://github.com/389ds/389-ds-base/issues/5825
Reviewed by: spichugi(Thanks!)
---
src/lib389/lib389/config.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py
index 81bf8ec66..a24fe95fc 100644
--- a/src/lib389/lib389/config.py
+++ b/src/lib389/lib389/config.py
@@ -21,7 +21,7 @@ import ldap
from lib389._constants import *
from lib389 import Entry
from lib389._mapped_object import DSLdapObject
-from lib389.utils import ensure_bytes, selinux_label_port, selinux_present
+from lib389.utils import ensure_bytes, selinux_label_port, selinux_present, is_fips
from lib389.lint import (
DSCLE0001, DSCLE0002, DSCLE0003, DSCLE0004, DSELE0001
)
@@ -212,6 +212,10 @@ class Config(DSLdapObject):
def _lint_passwordscheme(self):
allowed_schemes = ['PBKDF2-SHA512', 'PBKDF2_SHA256', 'PBKDF2_SHA512', 'GOST_YESCRYPT']
+ if is_fips():
+ # Not all RHEL 8 servers support the Rust password hashers so we
+ # need to allow SSHA512 in fips mode
+ allowed_schemes.append('SSHA512')
u_password_scheme = self.get_attr_val_utf8('passwordStorageScheme')
u_root_scheme = self.get_attr_val_utf8('nsslapd-rootpwstoragescheme')
if u_root_scheme not in allowed_schemes:
--
2.41.0

@ -0,0 +1,46 @@
From 6140837beb4235019d5884533353830ad688ef48 Mon Sep 17 00:00:00 2001
From: Viktor Ashirov <vashirov@redhat.com>
Date: Wed, 26 Jul 2023 16:44:51 +0200
Subject: [PATCH 08/11] Issue 5864 - Server fails to start after reboot because
it's unable to access nsslapd-rundir
Bug Description:
Sometimes after reboot dirsrv service fails to start:
EMERG - main - Unable to access nsslapd-rundir: No such file or directory
EMERG - main - Ensure that user "dirsrv" has read and write permissions on /run/dirsrv
EMERG - main - Shutting down.
We rely on systemd-tmpfiles for /run/dirsrv creation. But dirsrv service
doesn't explicitly wait for systemd-tmpfiles-setup.service to start.
This creates a race condition.
Fix Description:
dirsrv service should start only after systemd-tmpfiles-setup.service is finished,
add it as a dependency via `After=` and `Wants=`.
Fixes: https://github.com/389ds/389-ds-base/issues/5864
Reviwed-by: @Firstyear (Thanks!)
---
wrappers/systemd.template.service.in | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
index 4485e0ec0..4a44eb0e4 100644
--- a/wrappers/systemd.template.service.in
+++ b/wrappers/systemd.template.service.in
@@ -4,8 +4,9 @@
[Unit]
Description=@capbrand@ Directory Server %i.
PartOf=@systemdgroupname@
-After=chronyd.service ntpd.service network-online.target
+After=chronyd.service ntpd.service network-online.target systemd-tmpfiles-setup.service
Before=radiusd.service
+Wants=systemd-tmpfiles-setup.service
[Service]
Type=notify
--
2.41.0

@ -0,0 +1,121 @@
From f496fca98ba5c9d04423b0c8ee5e69277bcc5a08 Mon Sep 17 00:00:00 2001
From: Barbora Simonova <bsmejkal@redhat.com>
Date: Mon, 31 Jul 2023 12:58:14 +0200
Subject: [PATCH 09/11] Issue 2375 - CLI - Healthcheck - revise and add new
checks
Description:
Updated the healthcheck tests with nsslapd-accesslog-logbuffering
settings so it does not report a warning.
Also adjusted certificate tests so it covers days in leap year.
Relates: https://github.com/389ds/389-ds-base/issues/2375
Reviewed by: @vashirov (Thanks!)
---
.../suites/healthcheck/health_repl_test.py | 7 ++++++
.../healthcheck/health_security_test.py | 22 +++++++++++++++++--
2 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
index e67c5d0ef..8905db68f 100644
--- a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
+++ b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
@@ -107,6 +107,10 @@ def test_healthcheck_replication_replica_not_reachable(topology_m2):
M1 = topology_m2.ms['supplier1']
M2 = topology_m2.ms['supplier2']
+ log.info('Set nsslapd-accesslog-logbuffering so it does not raise a warning')
+ M1.config.set('nsslapd-accesslog-logbuffering', 'on')
+ M2.config.set('nsslapd-accesslog-logbuffering', 'on')
+
set_changelog_trimming(M1)
log.info('Set nsds5replicaport for the replication agreement to an unreachable port')
@@ -156,6 +160,9 @@ def test_healthcheck_changelog_trimming_not_configured(topology_m2):
M1 = topology_m2.ms['supplier1']
+ log.info('Set nsslapd-accesslog-logbuffering so it does not raise a warning')
+ M1.config.set('nsslapd-accesslog-logbuffering', 'on')
+
RET_CODE = 'DSCLLE0001'
log.info('Get the changelog entries for M1')
diff --git a/dirsrvtests/tests/suites/healthcheck/health_security_test.py b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
index d14b52c7a..ebd330d95 100644
--- a/dirsrvtests/tests/suites/healthcheck/health_security_test.py
+++ b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
@@ -102,6 +102,9 @@ def test_healthcheck_insecure_pwd_hash_configured(topology_st):
standalone = topology_st.standalone
+ log.info('Set nsslapd-accesslog-logbuffering so it does not raise a warning')
+ standalone.config.set('nsslapd-accesslog-logbuffering', 'on')
+
log.info('Configure an insecure passwordStorageScheme (SHA)')
standalone.config.set('passwordStorageScheme', 'SHA')
@@ -154,6 +157,9 @@ def test_healthcheck_min_allowed_tls_version_too_low(topology_st):
standalone = topology_st.standalone
+ log.info('Set nsslapd-accesslog-logbuffering so it does not raise a warning')
+ standalone.config.set('nsslapd-accesslog-logbuffering', 'on')
+
standalone.enable_tls()
# We have to update-crypto-policies to LEGACY, otherwise we can't set TLS1.0
@@ -210,6 +216,9 @@ def test_healthcheck_resolvconf_bad_file_perm(topology_st):
standalone = topology_st.standalone
+ log.info('Set nsslapd-accesslog-logbuffering so it does not raise a warning')
+ standalone.config.set('nsslapd-accesslog-logbuffering', 'on')
+
log.info('Change the /etc/resolv.conf file permissions to 444')
os.chmod('/etc/resolv.conf', 0o444)
@@ -254,6 +263,9 @@ def test_healthcheck_pwdfile_bad_file_perm(topology_st):
standalone = topology_st.standalone
cert_dir = standalone.ds_paths.cert_dir
+ log.info('Set nsslapd-accesslog-logbuffering so it does not raise a warning')
+ standalone.config.set('nsslapd-accesslog-logbuffering', 'on')
+
log.info('Change the /etc/dirsrv/slapd-{}/pwdfile.txt permissions to 000'.format(standalone.serverid))
os.chmod('{}/pwdfile.txt'.format(cert_dir), 0o000)
@@ -291,10 +303,13 @@ def test_healthcheck_certif_expiring_within_30d(topology_st):
standalone = topology_st.standalone
+ log.info('Set nsslapd-accesslog-logbuffering so it does not raise a warning')
+ standalone.config.set('nsslapd-accesslog-logbuffering', 'on')
+
standalone.enable_tls()
# Cert is valid two years from today, so we count the date that is within 30 days before certificate expiration
- date_future = datetime.now() + timedelta(days=701)
+ date_future = datetime.now() + timedelta(days=702)
with libfaketime.fake_time(date_future):
run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False)
@@ -329,10 +344,13 @@ def test_healthcheck_certif_expired(topology_st):
standalone = topology_st.standalone
+ log.info('Set nsslapd-accesslog-logbuffering so it does not raise a warning')
+ standalone.config.set('nsslapd-accesslog-logbuffering', 'on')
+
standalone.enable_tls()
# Cert is valid two years from today, so we count the date that is after expiration
- date_future = datetime.now() + timedelta(days=731)
+ date_future = datetime.now() + timedelta(days=732)
with libfaketime.fake_time(date_future):
run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False)
--
2.41.0

@ -0,0 +1,37 @@
From 0dd88a9e077cd7b8b3d3f317a1329438a4d35ed3 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Mon, 7 Aug 2023 10:18:19 +0200
Subject: [PATCH 10/11] Issue 5883 - Remove connection mutex contention risk on
autobind (#5886)
Problem: A contention on the connection c_mutex is blocking the listener thread when autobind is performed.
Solution: Let the listener thread skip the connection if the mutex is held by another thread
Reviewed by: @mreynolds389 , @droideck Thanks
(cherry picked from commit 599db0a450357e804072ca03421c9f65351cdf1f)
---
ldap/servers/slapd/daemon.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index e5b7d6e06..e44d0c9b5 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1675,7 +1675,13 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
continue;
}
- pthread_mutex_lock(&(c->c_mutex));
+ /* Try to get connection mutex, if not available just skip the connection and
+ * process other connections events. May generates cpu load for listening thread
+ * if connection mutex is held for a long time
+ */
+ if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
+ continue;
+ }
if (connection_is_active_nolock(c) && c->c_gettingber == 0) {
PRInt16 out_flags;
short readready;
--
2.41.0

@ -0,0 +1,35 @@
From 8e51448ede4852653568ace0f8587f911dfbf8f2 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Tue, 8 Aug 2023 17:27:16 +0200
Subject: [PATCH 11/11] Issue 4551 - Part 2 - Fix build warning of previous PR
(#5888)
Fix build paged search resuilt PR warning in header fix
Issue: 4551
Reviewed by: @mreynolds389 Thanks
(cherry picked from commit 89c2de5093c4522366d16decf81c06d4e5f8f96a)
---
ldap/servers/slapd/proto-slap.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 47465c4c5..bb1f8aea2 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1551,8 +1551,8 @@ int slapd_do_all_nss_ssl_init(int slapd_exemode, int importexport_encrypt, int s
/*
* pagedresults.c
*/
-void pageresult_lock_init();
-void pageresult_lock_cleanup();
+void pageresult_lock_init(void);
+void pageresult_lock_cleanup(void);
pthread_mutex_t *pageresult_lock_get_addr(Connection *conn);
int pagedresults_parse_control_value(Slapi_PBlock *pb, struct berval *psbvp, ber_int_t *pagesize, int *index, Slapi_Backend *be);
void pagedresults_set_response_control(Slapi_PBlock *pb, int iscritical, ber_int_t estimate, int curr_search_count, int index);
--
2.41.0

@ -48,7 +48,7 @@ ExcludeArch: i686
Summary: 389 Directory Server (base)
Name: 389-ds-base
Version: 1.4.3.35
Release: %{?relprefix}1%{?prerel}%{?dist}
Release: %{?relprefix}2%{?prerel}%{?dist}
License: GPLv3+ and ASL 2.0 and MIT
URL: https://www.port389.org
Group: System Environment/Daemons
@ -273,6 +273,18 @@ Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download
Source4: vendor-%{version}-1.tar.gz
Source5: Cargo-%{version}.lock
%endif
Patch01: 0001-Issue-5789-Improve-ds-replcheck-error-handling.patch
Patch02: 0002-Issue-5646-Various-memory-leaks-5725.patch
Patch03: 0003-Issue-2375-CLI-Healthcheck-revise-and-add-new-checks.patch
Patch04: 0004-Issue-5825-healthcheck-password-storage-scheme-warni.patch
Patch05: 0005-Issue-4551-Paged-search-impacts-performance-5838.patch
Patch06: 0006-Issue-5804-dtablesize-being-set-to-soft-maxfiledescr.patch
Patch07: 0007-Issue-5825-healthcheck-update-allowed-password-schem.patch
Patch08: 0008-Issue-5864-Server-fails-to-start-after-reboot-becaus.patch
Patch09: 0009-Issue-2375-CLI-Healthcheck-revise-and-add-new-checks.patch
Patch10: 0010-Issue-5883-Remove-connection-mutex-contention-risk-o.patch
Patch11: 0011-Issue-4551-Part-2-Fix-build-warning-of-previous-PR-5.patch
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
@ -894,9 +906,13 @@ exit 0
%doc README.md
%changelog
* Fri Dec 08 2023 MSVSphere Packaging Team <packager@msvsphere-os.ru> - 1.4.3.35-1
* Fri Dec 08 2023 MSVSphere Packaging Team <packager@msvsphere-os.ru> - 1.4.3.35-2
- Rebuilt for MSVSphere 8.8
* Wed Aug 16 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.35-2
- Bump version to 1.4.3.35-2
- Resolves: rhbz#2232420 - Paged search impacts performance
* Wed Jun 21 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.35-1
- Bump version to 1.4.3.35-1
- Resolves: Bug 2213861 - unable to update 389-ds-base on RHEL8.8

Loading…
Cancel
Save