import 389-ds-base-2.4.5-9.el9_4

c9 imports/c9/389-ds-base-2.4.5-9.el9_4
MSVSphere Packaging Team 1 month ago
parent 84f2767280
commit 92d7626784

@ -0,0 +1,145 @@
From 52a9ee6556a0467f5134fb6392ff1681a38f3252 Mon Sep 17 00:00:00 2001
From: Pierre Rogier <progier@redhat.com>
Date: Fri, 14 Jun 2024 13:27:10 +0200
Subject: [PATCH] CVE-2024-5953
---
.../tests/suites/password/regression_test.py | 51 ++++++++++++++++++-
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
3 files changed, 64 insertions(+), 2 deletions(-)
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
index 4876ff435..160d6f01d 100644
--- a/dirsrvtests/tests/suites/password/regression_test.py
+++ b/dirsrvtests/tests/suites/password/regression_test.py
@@ -8,11 +8,12 @@
import pytest
import time
import glob
+import base64
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB, DEFAULT_BENAME
from lib389 import Entry
from lib389.topologies import topology_m1 as topo_supplier
-from lib389.idm.user import UserAccounts
+from lib389.idm.user import UserAccounts, UserAccount
from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
from lib389.topologies import topology_st as topo
from lib389.idm.organizationalunit import OrganizationalUnits
@@ -40,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
TEST_PASSWORDS2 = (
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
+SUPPORTED_SCHEMES = (
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
+
def _check_unhashed_userpw(inst, user_dn, is_present=False):
"""Check if unhashed#user#password attribute is present or not in the changelog"""
unhashed_pwd_attribute = 'unhashed#user#password'
@@ -319,6 +327,47 @@ def test_unhashed_pw_switch(topo_supplier):
# Add debugging steps(if any)...
pass
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
+def test_long_hashed_password(topo, create_user, scheme):
+ """Check that hashed password with very long value does not cause trouble
+
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
+ :setup: standalone Instance
+ :parametrized: yes
+ :steps:
+ 1. Add a test user user
+ 2. Set a long password with requested scheme
+ 3. Bind on that user using a wrong password
+ 4. Check that instance is still alive
+ 5. Remove the added user
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Should get ldap.INVALID_CREDENTIALS exception
+ 4. Success
+ 5. Success
+ """
+ inst = topo.standalone
+ inst.simple_bind_s(DN_DM, PASSWORD)
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ # Make sure that server is started as this test may crash it
+ inst.start()
+ # Adding Test user (It may already exists if previous test failed)
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
+ if not user2.exists():
+ user2 = users.create_test_user(uid=1002, gid=2002)
+ # Setting hashed password
+ passwd = 'A'*4000
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
+ user2.replace('userpassword', hashed_passwd)
+ # Bind on that user using a wrong password
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ conn = user2.bind(PASSWORD)
+ # Check that instance is still alive
+ assert inst.status()
+ # Remove the added user
+ user2.delete()
+
if __name__ == '__main__':
# Run isolated
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
index 1e2cf58e7..b9a48d5ca 100644
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
unsigned char hash_out[MD5_HASH_LEN];
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
SECItem binary_item;
+ size_t dbpwd_len = strlen(dbpwd);
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
if (ctx == NULL) {
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
goto loser;
}
+ if (dbpwd_len >= sizeof b2a_out) {
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
+ goto loser;
+ }
+
/* create the hash */
PK11_DigestBegin(ctx);
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
/* bver points to b2a_out upon success */
if (bver) {
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
} else {
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
"Could not base64 encode hashed value for password compare");
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
index dcac4fcdd..82b8c9501 100644
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
passItem.data = (unsigned char *)userpwd;
passItem.len = strlen(userpwd);
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
+ return result;
+ }
+
/* Decode the DBpwd to bytes from b64 */
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
--
2.44.0

@ -0,0 +1,25 @@
From 323f74c69f84a8482413ecd73cf61d09cfc4a0a1 Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Mon, 24 Jun 2024 15:51:28 +0200
Subject: [PATCH] CVE-2024-6237
---
ldap/servers/plugins/syntaxes/inchain.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/servers/plugins/syntaxes/inchain.c b/ldap/servers/plugins/syntaxes/inchain.c
index df19c973b..0a6a04e9f 100644
--- a/ldap/servers/plugins/syntaxes/inchain.c
+++ b/ldap/servers/plugins/syntaxes/inchain.c
@@ -277,7 +277,7 @@ inchain_values2keys(Slapi_PBlock *pb, Slapi_Value **vals, Slapi_Value ***ivals,
slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &base_sdn);
if (! slapi_attr_is_dn_syntax_type(mrTYPE)) {
- slapi_log_err(SLAPI_LOG_ERR, "inchain", "Requires distinguishedName syntax. AttributeDescription %s is not distinguishedName\n");
+ slapi_log_err(SLAPI_LOG_ERR, "inchain", "Requires distinguishedName syntax. AttributeDescription %s is not distinguishedName\n", mrTYPE);
result = (Slapi_Value **)slapi_ch_calloc(1, sizeof(Slapi_Value *));
*ivals = result;
return(0);
--
2.44.0

@ -0,0 +1,192 @@
From 9501c34df01e35f483201a4bba12a93091b2b13f Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Thu, 13 Jun 2024 15:17:36 +0200
Subject: [PATCH] Issue 5772 - ONE LEVEL search fails to return sub-suffixes
(#6219)
Problem: ONE LEVEL scoped search fails to return sub-suffixes entries
Reason: When such search is done, a one level search is done on the main suffix and base search are done on any matching sub-suffix. But main suffix is processed search (to ensure that parent entries are returned before children ones when searching subtree) and ldbm_back_search change the filter to (&(parentid=xxx)old_filter) so the filter test reject the entry on the sub-suffixes.
Solution: Revert the backend list when doing one level search so that the sub-suffixes are processed first
and restore the base dn for the main suffix.
Alternative rejected: reset the filter when discivering a sub-suffix. Not so easy because filter is altered by the rewriteres.
And systematic duplication is an useless overhead if there is no matching sub-suffixes (which is the usual case)
Issue: #5772
Reviewed by: @tbordaz, @droideck (Thanks!)
(cherry picked from commit 407bdaa00d1da9f5ff53d66a2e012b17ad658907)
---
.../suites/mapping_tree/regression_test.py | 36 +++++++++++++++++-
ldap/servers/slapd/filterentry.c | 38 ++++++++++++++++++-
ldap/servers/slapd/opshared.c | 22 ++++++++++-
3 files changed, 92 insertions(+), 4 deletions(-)
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
index f4877da2b..c3fc2c0a2 100644
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
@@ -92,7 +92,6 @@ EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,
@pytest.mark.skipif(not has_orphan_attribute, reason = "compatibility attribute not yet implemented in this version")
def test_sub_suffixes(topo, orphan_param):
""" check the entries found on suffix/sub-suffix
- used int
:id: 5b4421c2-d851-11ec-a760-482ae39447e5
:feature: mapping-tree
@@ -122,8 +121,41 @@ def test_sub_suffixes(topo, orphan_param):
log.info(f'Verifying domain component entries count for search under {suffix} ...')
entries = topo.standalone.search_s(suffix, ldap.SCOPE_SUBTREE, "(dc=*)")
assert len(entries) == expected
- log.info('Found {expected} domain component entries as expected while searching {suffix}')
+ log.info(f'Found {expected} domain component entries as expected while searching {suffix}')
log.info('Test PASSED')
+def test_one_level_search_on_sub_suffixes(topo):
+ """ Perform one level scoped search accross suffix and sub-suffix
+
+ :id: 92f3139e-280e-11ef-a989-482ae39447e5
+ :feature: mapping-tree
+ :setup: Standalone instance with 3 additional backends:
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
+ :steps:
+ 1. Perform a ONE LEVEL search on dc=parent
+ 2. Check that all expected entries have been returned
+ 3. Check that only the expected entries have been returned
+ :expectedresults:
+ 1. Success
+ 2. each expected dn should be in the result set
+ 3. Number of returned entries should be the same as the number of expected entries
+ """
+ expected_dns = ( 'dc=child1,dc=parent',
+ 'dc=child2,dc=parent',
+ 'ou=accounting,dc=parent',
+ 'ou=product development,dc=parent',
+ 'ou=product testing,dc=parent',
+ 'ou=human resources,dc=parent',
+ 'ou=payroll,dc=parent',
+ 'ou=people,dc=parent',
+ 'ou=groups,dc=parent', )
+ entries = topo.standalone.search_s("dc=parent", ldap.SCOPE_ONELEVEL, "(objectClass=*)",
+ attrlist=("dc","ou"), escapehatch='i am sure')
+ log.info(f'one level search on dc=parent returned the following entries: {entries}')
+ dns = [ entry.dn for entry in entries ]
+ for dn in expected_dns:
+ assert dn in dns
+ assert len(entries) == len(expected_dns)
+
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
index 4de4aa66e..d2c7e3082 100644
--- a/ldap/servers/slapd/filterentry.c
+++ b/ldap/servers/slapd/filterentry.c
@@ -240,6 +240,36 @@ slapi_filter_test_ext(
}
+static const char *
+filter_type_as_string(int filter_type)
+{
+ switch (filter_type) {
+ case LDAP_FILTER_AND:
+ return "&";
+ case LDAP_FILTER_OR:
+ return "|";
+ case LDAP_FILTER_NOT:
+ return "!";
+ case LDAP_FILTER_EQUALITY:
+ return "=";
+ case LDAP_FILTER_SUBSTRINGS:
+ return "*";
+ case LDAP_FILTER_GE:
+ return ">=";
+ case LDAP_FILTER_LE:
+ return "<=";
+ case LDAP_FILTER_PRESENT:
+ return "=*";
+ case LDAP_FILTER_APPROX:
+ return "~";
+ case LDAP_FILTER_EXT:
+ return "EXT";
+ default:
+ return "?";
+ }
+}
+
+
int
test_ava_filter(
Slapi_PBlock *pb,
@@ -253,7 +283,13 @@ test_ava_filter(
{
int rc;
- slapi_log_err(SLAPI_LOG_FILTER, "test_ava_filter", "=>\n");
+ if (slapi_is_loglevel_set(SLAPI_LOG_FILTER)) {
+ char *val = slapi_berval_get_string_copy(&ava->ava_value);
+ char buf[BUFSIZ];
+ slapi_log_err(SLAPI_LOG_FILTER, "test_ava_filter", "=> AVA: %s%s%s\n",
+ ava->ava_type, filter_type_as_string(ftype), escape_string(val, buf));
+ slapi_ch_free_string(&val);
+ }
*access_check_done = 0;
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index f77043afa..540597f45 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -219,6 +219,7 @@ cache_return_target_entry(Slapi_PBlock *pb, Slapi_Backend *be, Slapi_Operation *
operation_set_target_entry_id(operation, 0);
}
}
+
/*
* Returns: 0 - if the operation is successful
* < 0 - if operation fails.
@@ -481,6 +482,20 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
while (be_list[index] && be_list[index + 1]) {
index++;
}
+ if (scope == LDAP_SCOPE_ONELEVEL) {
+ /*
+ * ONE LEVEL searches may ends up on multiple backends
+ * with a ONE LEVEL search on a suffix and a BASE search on its
+ * subsuffixes. Because LDAP_SCOPE_ONELEVEL rewrite the filter
+ * the backends should be reversed so that the BASE search(es)
+ * are done first (with the original filter).
+ */
+ for (int idx = 0; idx <= index/2; idx++) {
+ be = be_list[index-idx];
+ be_list[index-idx] = be_list[idx];
+ be_list[idx] = be;
+ }
+ }
be = be_list[index];
} else {
be = NULL;
@@ -779,7 +794,6 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
(slapi_sdn_get_ndn_len(basesdn) == 0)) {
int tmp_scope = LDAP_SCOPE_BASE;
slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
-
if (free_sdn) {
slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
slapi_sdn_free(&sdn);
@@ -790,6 +804,12 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
} else if (slapi_sdn_issuffix(basesdn, be_suffix)) {
int tmp_scope = LDAP_SCOPE_ONELEVEL;
slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
+ if (free_sdn) {
+ slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
+ slapi_sdn_free(&sdn);
+ sdn = slapi_sdn_dup(basesdn);
+ slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, (void *)sdn);
+ }
} else {
slapi_sdn_done(&monitorsdn);
goto next_be;
--
2.45.2

@ -0,0 +1,220 @@
From 45e14d64c47080951d9bfa3bcfe8c267d01ce251 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 22 May 2024 11:29:05 +0200
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
filter component when tested against a large valueset (like group members)
(#6173)
Bug description:
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
If a filter component (equality) is testing the value (ava) against a
large valueset (like uniquemember values), it takes a long time because
of the large number of values and required normalization of the values.
This can be improved taking benefit of sorted valueset. Those sorted
valueset were created to improve updates of large valueset (groups) but
at that time not implemented in SRCH path.
Fix description:
In case of LDAP_FILTER_EQUALITY component, the server can get
benefit of the sorted valuearray.
To limit the risk of regression, we use the sorted valuearray
only for the DN syntax attribute. Indeed the sorted valuearray was
designed for those type of attribute.
With those two limitations, there is no need of a toggle and
the call to plugin_call_syntax_filter_ava can be replaced by
a call to slapi_valueset_find.
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
values are normalized.
In sorted valueset, the values have been normalized to insert the index
in the sorted array and then comparison is done on normalized values.
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
before comparison.
relates: #6172
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
---
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
ldap/servers/slapd/filterentry.c | 22 ++-
2 files changed, 146 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
index d6bfa5a3b..4baaf04a7 100644
--- a/dirsrvtests/tests/suites/filter/filter_test.py
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
@@ -9,7 +9,11 @@
import logging
import pytest
+import time
+from lib389.dirsrv_log import DirsrvAccessLog
from lib389.tasks import *
+from lib389.backend import Backends, Backend
+from lib389.dbgen import dbgen_users, dbgen_groups
from lib389.topologies import topology_st
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
from lib389.utils import *
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
assert len(ents) == 1
+def test_match_large_valueset(topology_st):
+ """Test that when returning a big number of entries
+ and that we need to match the filter from a large valueset
+ we get benefit to use the sorted valueset
+
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
+
+ :setup: Standalone instance
+
+ :steps:
+ 1. Create a users and groups backends and tune them
+ 2. Generate a test ldif (2k users and 1K groups with all users)
+ 3. Import test ldif file using Offline import (ldif2db).
+ 4. Prim the 'groups' entrycache with a "fast" search
+ 5. Search the 'groups' with a difficult matching value
+ 6. check that etime from step 5 is less than a second
+
+ :expectedresults:
+ 1. Create a users and groups backends should PASS
+ 2. Generate LDIF should PASS.
+ 3. Offline import should PASS.
+ 4. Priming should PASS.
+ 5. Performance search should PASS.
+ 6. Etime of performance search should PASS.
+ """
+
+ log.info('Running test_match_large_valueset...')
+ #
+ # Test online/offline LDIF imports
+ #
+ inst = topology_st.standalone
+ inst.start()
+ backends = Backends(inst)
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
+ users_backend = 'users'
+ users_ldif = 'users_import.ldif'
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
+ groups_backend = 'groups'
+ groups_ldif = 'groups_import.ldif'
+ groups_entrycache = '200000000'
+ users_number = 2000
+ groups_number = 1000
+
+
+ # For priming the cache we just want to be fast
+ # taking the first value in the valueset is good
+ # whether the valueset is sorted or not
+ priming_user_rdn = "user0001"
+
+ # For performance testing, this is important to use
+ # user1000 rather then user0001
+ # Because user0001 is the first value in the valueset
+ # whether we use the sorted valuearray or non sorted
+ # valuearray the performance will be similar.
+ # With middle value user1000, the performance boost of
+ # the sorted valuearray will make the difference.
+ perf_user_rdn = "user1000"
+
+ # Step 1. Prepare the backends and tune the groups entrycache
+ try:
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
+
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
+ except:
+ raise
+
+ # Step 2. Generate a test ldif (10k users entries)
+ log.info("Generating users LDIF...")
+ ldif_dir = inst.get_ldif_dir()
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
+
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
+ props = {
+ "name": "group",
+ "suffix": groups_suffix,
+ "parent": groups_suffix,
+ "number": groups_number,
+ "numMembers": users_number,
+ "createMembers": False,
+ "memberParent": users_suffix,
+ "membershipAttr": "uniquemember",
+ }
+ dbgen_groups(inst, groups_import_ldif, props)
+
+ # Step 3. Do the both offline imports
+ inst.stop()
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
+ log.fatal('test_basic_import_export: Offline users import failed')
+ assert False
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
+ log.fatal('test_basic_import_export: Offline groups import failed')
+ assert False
+ inst.start()
+
+ # Step 4. first prime the cache
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
+ assert len(entries) == groups_number
+
+ # Step 5. Now do the real performance checking it should take less than a second
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
+ search_start = time.time()
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
+ duration = time.time() - search_start
+ log.info("Duration of the search was %f", duration)
+
+ # Step 6. Gather the etime from the access log
+ inst.stop()
+ access_log = DirsrvAccessLog(inst)
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
+ log.info("Found patterns are %s", search_result[0])
+ log.info("Found patterns are %s", search_result[1])
+ etime = float(search_result[1].split('etime=')[1])
+ log.info("Duration of the search from access log was %f", etime)
+ assert len(entries) == groups_number
+ assert (etime < 1)
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
index 2a7102828..4de4aa66e 100644
--- a/ldap/servers/slapd/filterentry.c
+++ b/ldap/servers/slapd/filterentry.c
@@ -296,7 +296,27 @@ test_ava_filter(
rc = -1;
for (; a != NULL; a = a->a_next) {
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
+ /* This path is for a performance improvement */
+
+ /* In case of equality filter we can get benefit of the
+ * sorted valuearray (from valueset).
+ * This improvement is limited to DN syntax attributes for
+ * which the sorted valueset was designed.
+ */
+ Slapi_Value *sval = NULL;
+ sval = slapi_value_new_berval(&ava->ava_value);
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
+ rc = 0;
+ }
+ slapi_value_free(&sval);
+ } else {
+ /* When sorted valuearray optimization cannot be used
+ * lets filter the value according to its syntax
+ */
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
+ }
if (rc == 0) {
break;
}
--
2.45.2

@ -47,7 +47,7 @@ ExcludeArch: i686
Summary: 389 Directory Server (base) Summary: 389 Directory Server (base)
Name: 389-ds-base Name: 389-ds-base
Version: 2.4.5 Version: 2.4.5
Release: 8%{?dist} Release: 9%{?dist}
License: GPL-3.0-or-later AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (MIT OR Apache-2.0) AND Unicode-DFS-2016 AND (MIT OR Unlicense) AND Apache-2.0 AND BSD-3-Clause AND MIT AND MPL-2.0 License: GPL-3.0-or-later AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (MIT OR Apache-2.0) AND Unicode-DFS-2016 AND (MIT OR Unlicense) AND Apache-2.0 AND BSD-3-Clause AND MIT AND MPL-2.0
URL: https://www.port389.org URL: https://www.port389.org
Conflicts: selinux-policy-base < 3.9.8 Conflicts: selinux-policy-base < 3.9.8
@ -282,11 +282,15 @@ Source2: %{name}-devel.README
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2 Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
%endif %endif
Source4: 389-ds-base.sysusers Source4: 389-ds-base.sysusers
Patch0: 0001-Issue-3527-Support-HAProxy-and-Instance-on-the-same-.patch Patch01: 0001-Issue-3527-Support-HAProxy-and-Instance-on-the-same-.patch
Patch1: 0002-Issue-6112-RFE-add-new-operation-note-for-MFA-authen.patch Patch02: 0002-Issue-6112-RFE-add-new-operation-note-for-MFA-authen.patch
Patch2: 0003-Issue-6133-Move-slapi_pblock_set_flag_operation_note.patch Patch03: 0003-Issue-6133-Move-slapi_pblock_set_flag_operation_note.patch
Patch3: 0004-CVE-2024-2199.patch Patch04: 0004-CVE-2024-2199.patch
Patch4: 0005-CVE-2024-3657.patch Patch05: 0005-CVE-2024-3657.patch
Patch06: 0006-CVE-2024-5953.patch
Patch07: 0007-CVE-2024-6237.patch
Patch08: 0008-Issue-5772-ONE-LEVEL-search-fails-to-return-sub-suff.patch
Patch09: 0009-Issue-6172-RFE-improve-the-performance-of-evaluation.patch
%description %description
@ -730,6 +734,13 @@ exit 0
%endif %endif
%changelog %changelog
* Tue Jul 16 2024 James Chapman <jachapma@redhat.com> - 2.4.5-9
- Bump version to 2.4.5-9
- Resolves: RHEL-44323 - unauthenticated user can trigger a DoS by sending a specific extended search request
- Resolves: RHEL-40945 - Malformed userPassword hash may cause Denial of Service
- Resolves: RHEL-49457 - perf search result investigation for many large static groups and members 
- Resolves: RHEL-49459 - subsuffix are not returned in one level scoped search
* Fri May 31 2024 Viktor Ashirov <vashirov@redhat.com> - 2.4.5-8 * Fri May 31 2024 Viktor Ashirov <vashirov@redhat.com> - 2.4.5-8
- Bump version to 2.4.5-8 - Bump version to 2.4.5-8
- Fix License tag - Fix License tag

Loading…
Cancel
Save