i8c-stream-1.4
changed/i8c-stream-1.4/389-ds-base-1.4.3.39-8.module+el8.10.0+22275+e4fc04d4
parent
bf45329f1e
commit
1a0be3e95e
@ -0,0 +1,220 @@
|
|||||||
|
From 8cf981c00ae18d3efaeb10819282cd991621e9a2 Mon Sep 17 00:00:00 2001
|
||||||
|
From: tbordaz <tbordaz@redhat.com>
|
||||||
|
Date: Wed, 22 May 2024 11:29:05 +0200
|
||||||
|
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
|
||||||
|
filter component when tested against a large valueset (like group members)
|
||||||
|
(#6173)
|
||||||
|
|
||||||
|
Bug description:
|
||||||
|
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
|
||||||
|
If a filter component (equality) is testing the value (ava) against a
|
||||||
|
large valueset (like uniquemember values), it takes a long time because
|
||||||
|
of the large number of values and required normalization of the values.
|
||||||
|
This can be improved taking benefit of sorted valueset. Those sorted
|
||||||
|
valueset were created to improve updates of large valueset (groups) but
|
||||||
|
at that time not implemented in SRCH path.
|
||||||
|
|
||||||
|
Fix description:
|
||||||
|
In case of LDAP_FILTER_EQUALITY component, the server can get
|
||||||
|
benefit of the sorted valuearray.
|
||||||
|
To limit the risk of regression, we use the sorted valuearray
|
||||||
|
only for the DN syntax attribute. Indeed the sorted valuearray was
|
||||||
|
designed for those type of attribute.
|
||||||
|
With those two limitations, there is no need of a toggle and
|
||||||
|
the call to plugin_call_syntax_filter_ava can be replaced by
|
||||||
|
a call to slapi_valueset_find.
|
||||||
|
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
|
||||||
|
values are normalized.
|
||||||
|
In sorted valueset, the values have been normalized to insert the index
|
||||||
|
in the sorted array and then comparison is done on normalized values.
|
||||||
|
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
|
||||||
|
before comparison.
|
||||||
|
|
||||||
|
relates: #6172
|
||||||
|
|
||||||
|
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
|
||||||
|
---
|
||||||
|
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
|
||||||
|
ldap/servers/slapd/filterentry.c | 22 ++-
|
||||||
|
2 files changed, 146 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||||
|
index d6bfa5a3b..4baaf04a7 100644
|
||||||
|
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||||
|
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||||
|
@@ -9,7 +9,11 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
+import time
|
||||||
|
+from lib389.dirsrv_log import DirsrvAccessLog
|
||||||
|
from lib389.tasks import *
|
||||||
|
+from lib389.backend import Backends, Backend
|
||||||
|
+from lib389.dbgen import dbgen_users, dbgen_groups
|
||||||
|
from lib389.topologies import topology_st
|
||||||
|
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
|
||||||
|
from lib389.utils import *
|
||||||
|
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
|
||||||
|
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
|
||||||
|
assert len(ents) == 1
|
||||||
|
|
||||||
|
+def test_match_large_valueset(topology_st):
|
||||||
|
+ """Test that when returning a big number of entries
|
||||||
|
+ and that we need to match the filter from a large valueset
|
||||||
|
+ we get benefit to use the sorted valueset
|
||||||
|
+
|
||||||
|
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
|
||||||
|
+
|
||||||
|
+ :setup: Standalone instance
|
||||||
|
+
|
||||||
|
+ :steps:
|
||||||
|
+ 1. Create a users and groups backends and tune them
|
||||||
|
+ 2. Generate a test ldif (2k users and 1K groups with all users)
|
||||||
|
+ 3. Import test ldif file using Offline import (ldif2db).
|
||||||
|
+ 4. Prim the 'groups' entrycache with a "fast" search
|
||||||
|
+ 5. Search the 'groups' with a difficult matching value
|
||||||
|
+ 6. check that etime from step 5 is less than a second
|
||||||
|
+
|
||||||
|
+ :expectedresults:
|
||||||
|
+ 1. Create a users and groups backends should PASS
|
||||||
|
+ 2. Generate LDIF should PASS.
|
||||||
|
+ 3. Offline import should PASS.
|
||||||
|
+ 4. Priming should PASS.
|
||||||
|
+ 5. Performance search should PASS.
|
||||||
|
+ 6. Etime of performance search should PASS.
|
||||||
|
+ """
|
||||||
|
+
|
||||||
|
+ log.info('Running test_match_large_valueset...')
|
||||||
|
+ #
|
||||||
|
+ # Test online/offline LDIF imports
|
||||||
|
+ #
|
||||||
|
+ inst = topology_st.standalone
|
||||||
|
+ inst.start()
|
||||||
|
+ backends = Backends(inst)
|
||||||
|
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
|
||||||
|
+ users_backend = 'users'
|
||||||
|
+ users_ldif = 'users_import.ldif'
|
||||||
|
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
|
||||||
|
+ groups_backend = 'groups'
|
||||||
|
+ groups_ldif = 'groups_import.ldif'
|
||||||
|
+ groups_entrycache = '200000000'
|
||||||
|
+ users_number = 2000
|
||||||
|
+ groups_number = 1000
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+ # For priming the cache we just want to be fast
|
||||||
|
+ # taking the first value in the valueset is good
|
||||||
|
+ # whether the valueset is sorted or not
|
||||||
|
+ priming_user_rdn = "user0001"
|
||||||
|
+
|
||||||
|
+ # For performance testing, this is important to use
|
||||||
|
+ # user1000 rather then user0001
|
||||||
|
+ # Because user0001 is the first value in the valueset
|
||||||
|
+ # whether we use the sorted valuearray or non sorted
|
||||||
|
+ # valuearray the performance will be similar.
|
||||||
|
+ # With middle value user1000, the performance boost of
|
||||||
|
+ # the sorted valuearray will make the difference.
|
||||||
|
+ perf_user_rdn = "user1000"
|
||||||
|
+
|
||||||
|
+ # Step 1. Prepare the backends and tune the groups entrycache
|
||||||
|
+ try:
|
||||||
|
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
|
||||||
|
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
|
||||||
|
+
|
||||||
|
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
|
||||||
|
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
|
||||||
|
+ except:
|
||||||
|
+ raise
|
||||||
|
+
|
||||||
|
+ # Step 2. Generate a test ldif (10k users entries)
|
||||||
|
+ log.info("Generating users LDIF...")
|
||||||
|
+ ldif_dir = inst.get_ldif_dir()
|
||||||
|
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
|
||||||
|
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
|
||||||
|
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
|
||||||
|
+
|
||||||
|
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
|
||||||
|
+ props = {
|
||||||
|
+ "name": "group",
|
||||||
|
+ "suffix": groups_suffix,
|
||||||
|
+ "parent": groups_suffix,
|
||||||
|
+ "number": groups_number,
|
||||||
|
+ "numMembers": users_number,
|
||||||
|
+ "createMembers": False,
|
||||||
|
+ "memberParent": users_suffix,
|
||||||
|
+ "membershipAttr": "uniquemember",
|
||||||
|
+ }
|
||||||
|
+ dbgen_groups(inst, groups_import_ldif, props)
|
||||||
|
+
|
||||||
|
+ # Step 3. Do the both offline imports
|
||||||
|
+ inst.stop()
|
||||||
|
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
|
||||||
|
+ log.fatal('test_basic_import_export: Offline users import failed')
|
||||||
|
+ assert False
|
||||||
|
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
|
||||||
|
+ log.fatal('test_basic_import_export: Offline groups import failed')
|
||||||
|
+ assert False
|
||||||
|
+ inst.start()
|
||||||
|
+
|
||||||
|
+ # Step 4. first prime the cache
|
||||||
|
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||||
|
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
|
||||||
|
+ assert len(entries) == groups_number
|
||||||
|
+
|
||||||
|
+ # Step 5. Now do the real performance checking it should take less than a second
|
||||||
|
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||||
|
+ search_start = time.time()
|
||||||
|
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
|
||||||
|
+ duration = time.time() - search_start
|
||||||
|
+ log.info("Duration of the search was %f", duration)
|
||||||
|
+
|
||||||
|
+ # Step 6. Gather the etime from the access log
|
||||||
|
+ inst.stop()
|
||||||
|
+ access_log = DirsrvAccessLog(inst)
|
||||||
|
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
|
||||||
|
+ log.info("Found patterns are %s", search_result[0])
|
||||||
|
+ log.info("Found patterns are %s", search_result[1])
|
||||||
|
+ etime = float(search_result[1].split('etime=')[1])
|
||||||
|
+ log.info("Duration of the search from access log was %f", etime)
|
||||||
|
+ assert len(entries) == groups_number
|
||||||
|
+ assert (etime < 1)
|
||||||
|
+
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# Run isolated
|
||||||
|
# -s for DEBUG mode
|
||||||
|
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||||
|
index fd8fdda9f..cae5c7edc 100644
|
||||||
|
--- a/ldap/servers/slapd/filterentry.c
|
||||||
|
+++ b/ldap/servers/slapd/filterentry.c
|
||||||
|
@@ -296,7 +296,27 @@ test_ava_filter(
|
||||||
|
rc = -1;
|
||||||
|
for (; a != NULL; a = a->a_next) {
|
||||||
|
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
|
||||||
|
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||||
|
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
|
||||||
|
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
|
||||||
|
+ /* This path is for a performance improvement */
|
||||||
|
+
|
||||||
|
+ /* In case of equality filter we can get benefit of the
|
||||||
|
+ * sorted valuearray (from valueset).
|
||||||
|
+ * This improvement is limited to DN syntax attributes for
|
||||||
|
+ * which the sorted valueset was designed.
|
||||||
|
+ */
|
||||||
|
+ Slapi_Value *sval = NULL;
|
||||||
|
+ sval = slapi_value_new_berval(&ava->ava_value);
|
||||||
|
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
|
||||||
|
+ rc = 0;
|
||||||
|
+ }
|
||||||
|
+ slapi_value_free(&sval);
|
||||||
|
+ } else {
|
||||||
|
+ /* When sorted valuearray optimization cannot be used
|
||||||
|
+ * lets filter the value according to its syntax
|
||||||
|
+ */
|
||||||
|
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||||
|
+ }
|
||||||
|
if (rc == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
--
|
||||||
|
2.46.0
|
||||||
|
|
@ -0,0 +1,163 @@
|
|||||||
|
From 57051154bafaf50b83fc27dadbd89a49fd1c8c36 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Pierre Rogier <progier@redhat.com>
|
||||||
|
Date: Fri, 14 Jun 2024 13:27:10 +0200
|
||||||
|
Subject: [PATCH] Security fix for CVE-2024-5953
|
||||||
|
|
||||||
|
Description:
|
||||||
|
A denial of service vulnerability was found in the 389 Directory Server.
|
||||||
|
This issue may allow an authenticated user to cause a server denial
|
||||||
|
of service while attempting to log in with a user with a malformed hash
|
||||||
|
in their password.
|
||||||
|
|
||||||
|
Fix Description:
|
||||||
|
To prevent buffer overflow when a bind request is processed, the bind fails
|
||||||
|
if the hash size is not coherent without even attempting to process further
|
||||||
|
the hashed password.
|
||||||
|
|
||||||
|
References:
|
||||||
|
- https://nvd.nist.gov/vuln/detail/CVE-2024-5953
|
||||||
|
- https://access.redhat.com/security/cve/CVE-2024-5953
|
||||||
|
- https://bugzilla.redhat.com/show_bug.cgi?id=2292104
|
||||||
|
---
|
||||||
|
.../tests/suites/password/regression_test.py | 54 ++++++++++++++++++-
|
||||||
|
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
|
||||||
|
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
|
||||||
|
3 files changed, 66 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
|
||||||
|
index 8f1facb6d..1fa581643 100644
|
||||||
|
--- a/dirsrvtests/tests/suites/password/regression_test.py
|
||||||
|
+++ b/dirsrvtests/tests/suites/password/regression_test.py
|
||||||
|
@@ -7,12 +7,14 @@
|
||||||
|
#
|
||||||
|
import pytest
|
||||||
|
import time
|
||||||
|
+import glob
|
||||||
|
+import base64
|
||||||
|
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
|
||||||
|
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
|
||||||
|
from lib389 import Entry
|
||||||
|
from lib389.topologies import topology_m1 as topo_supplier
|
||||||
|
-from lib389.idm.user import UserAccounts
|
||||||
|
-from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer
|
||||||
|
+from lib389.idm.user import UserAccounts, UserAccount
|
||||||
|
+from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
|
||||||
|
from lib389.topologies import topology_st as topo
|
||||||
|
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||||
|
|
||||||
|
@@ -39,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
|
||||||
|
TEST_PASSWORDS2 = (
|
||||||
|
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
|
||||||
|
|
||||||
|
+SUPPORTED_SCHEMES = (
|
||||||
|
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
|
||||||
|
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
|
||||||
|
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
|
||||||
|
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
|
||||||
|
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
|
||||||
|
+
|
||||||
|
def _check_unhashed_userpw(inst, user_dn, is_present=False):
|
||||||
|
"""Check if unhashed#user#password attribute is present or not in the changelog"""
|
||||||
|
unhashed_pwd_attribute = 'unhashed#user#password'
|
||||||
|
@@ -319,6 +328,47 @@ def test_unhashed_pw_switch(topo_supplier):
|
||||||
|
# Add debugging steps(if any)...
|
||||||
|
pass
|
||||||
|
|
||||||
|
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
|
||||||
|
+def test_long_hashed_password(topo, create_user, scheme):
|
||||||
|
+ """Check that hashed password with very long value does not cause trouble
|
||||||
|
+
|
||||||
|
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
|
||||||
|
+ :setup: standalone Instance
|
||||||
|
+ :parametrized: yes
|
||||||
|
+ :steps:
|
||||||
|
+ 1. Add a test user user
|
||||||
|
+ 2. Set a long password with requested scheme
|
||||||
|
+ 3. Bind on that user using a wrong password
|
||||||
|
+ 4. Check that instance is still alive
|
||||||
|
+ 5. Remove the added user
|
||||||
|
+ :expectedresults:
|
||||||
|
+ 1. Success
|
||||||
|
+ 2. Success
|
||||||
|
+ 3. Should get ldap.INVALID_CREDENTIALS exception
|
||||||
|
+ 4. Success
|
||||||
|
+ 5. Success
|
||||||
|
+ """
|
||||||
|
+ inst = topo.standalone
|
||||||
|
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||||
|
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||||
|
+ # Make sure that server is started as this test may crash it
|
||||||
|
+ inst.start()
|
||||||
|
+ # Adding Test user (It may already exists if previous test failed)
|
||||||
|
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
|
||||||
|
+ if not user2.exists():
|
||||||
|
+ user2 = users.create_test_user(uid=1002, gid=2002)
|
||||||
|
+ # Setting hashed password
|
||||||
|
+ passwd = 'A'*4000
|
||||||
|
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
|
||||||
|
+ user2.replace('userpassword', hashed_passwd)
|
||||||
|
+ # Bind on that user using a wrong password
|
||||||
|
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||||
|
+ conn = user2.bind(PASSWORD)
|
||||||
|
+ # Check that instance is still alive
|
||||||
|
+ assert inst.status()
|
||||||
|
+ # Remove the added user
|
||||||
|
+ user2.delete()
|
||||||
|
+
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# Run isolated
|
||||||
|
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||||
|
index 1e2cf58e7..b9a48d5ca 100644
|
||||||
|
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||||
|
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||||
|
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||||
|
unsigned char hash_out[MD5_HASH_LEN];
|
||||||
|
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
|
||||||
|
SECItem binary_item;
|
||||||
|
+ size_t dbpwd_len = strlen(dbpwd);
|
||||||
|
|
||||||
|
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||||
|
if (ctx == NULL) {
|
||||||
|
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||||
|
goto loser;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ if (dbpwd_len >= sizeof b2a_out) {
|
||||||
|
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||||
|
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
|
||||||
|
+ goto loser;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
/* create the hash */
|
||||||
|
PK11_DigestBegin(ctx);
|
||||||
|
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
|
||||||
|
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||||
|
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
|
||||||
|
/* bver points to b2a_out upon success */
|
||||||
|
if (bver) {
|
||||||
|
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
|
||||||
|
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
|
||||||
|
} else {
|
||||||
|
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||||
|
"Could not base64 encode hashed value for password compare");
|
||||||
|
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||||
|
index dcac4fcdd..82b8c9501 100644
|
||||||
|
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||||
|
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||||
|
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||||
|
passItem.data = (unsigned char *)userpwd;
|
||||||
|
passItem.len = strlen(userpwd);
|
||||||
|
|
||||||
|
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
|
||||||
|
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
|
||||||
|
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
|
||||||
|
+ return result;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
/* Decode the DBpwd to bytes from b64 */
|
||||||
|
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
|
||||||
|
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
|
||||||
|
--
|
||||||
|
2.46.0
|
||||||
|
|
Loading…
Reference in new issue