Compare commits

...

No commits in common. 'c8' and 'c9' have entirely different histories.
c8 ... c9

3
.gitignore vendored

@ -1,2 +1 @@
SOURCES/pcp-5.3.7.src.tar.gz
SOURCES/redhat-bugzilla-2219731-hacluster-metrics.patch
SOURCES/pcp-6.2.0.src.tar.gz

@ -1,2 +1 @@
a0a05bf501b016cb859fb211ae60ce18be2bbd99 SOURCES/pcp-5.3.7.src.tar.gz
bb84c58586e078343c975ed61a8b53d4fd4393e8 SOURCES/redhat-bugzilla-2219731-hacluster-metrics.patch
617d0e505af5f253080effb6701e089fc02e7379 SOURCES/pcp-6.2.0.src.tar.gz

@ -1,326 +0,0 @@
commit c226e98096c7bdb6296a96257439724e1f68217e
Author: Nathan Scott <nathans@redhat.com>
Date: Tue Apr 26 12:00:32 2022 +1000
libpcp: optimize indom handling in fetchgroup code
Looking into some pcp-ss issues I immediately encountered a
noticable elapsed time difference between executing just ss
versus pcp-ss. It turned out it was due to fetchgroup code
not really understanding how the underlying indom APIs work
and repeatedly refreshing shared indoms (all metrics in the
pcp-ss tool actually share one indom) once for every metric
that happens to use that indom - i.e. many times per fetch.
These changes resulted in a 5x speedup in pcp-ss and it now
completes instantly as expected. Additionally, we use alot
less memory now for the many-metrics-sharing-an-indom case.
diff -Naurp pcp-5.3.7.patched/src/libpcp/src/fetchgroup.c pcp-5.3.7/src/libpcp/src/fetchgroup.c
--- pcp-5.3.7.patched/src/libpcp/src/fetchgroup.c 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/libpcp/src/fetchgroup.c 2023-03-08 18:46:43.486916630 +1100
@@ -34,6 +34,19 @@
/* ------------------------------------------------------------------------ */
/*
+ * Cache to avoid unnecessary repeated calls to pmGetInDom for individual
+ * instance domains. This involves a round trip to pmcd and it does not
+ * change frequently (certainly not for processing results of one sample).
+ */
+struct __pmInDomCache {
+ pmInDom indom;
+ int *codes; /* saved from pmGetInDom */
+ char **names;
+ unsigned int size;
+ int refreshed;
+};
+
+/*
* An individual fetch-group is a linked list of requests tied to a
* particular pcp context (which the fetchgroup owns). NB: This is
* opaque to the PMAPI client.
@@ -43,8 +56,10 @@ struct __pmFetchGroup {
int wrap; /* wrap-handling flag, set at fg-create-time */
pmResult *prevResult;
struct __pmFetchGroupItem *items;
+ struct __pmInDomCache *unique_indoms;
pmID *unique_pmids;
size_t num_unique_pmids;
+ size_t num_unique_indoms;
};
/*
@@ -81,9 +96,6 @@ struct __pmFetchGroupItem {
struct {
pmID metric_pmid;
pmDesc metric_desc;
- int *indom_codes; /* saved from pmGetInDom */
- char **indom_names;
- unsigned indom_size;
struct __pmFetchGroupConversionSpec conv;
int *output_inst_codes; /* NB: may be NULL */
char **output_inst_names; /* NB: may be NULL */
@@ -168,9 +180,7 @@ pmfg_add_pmid(pmFG pmfg, pmID pmid)
/*
* Populate given pmFGI item structure based on common lookup &
* verification for pmfg inputs. Adjust instance profile to
- * include requested instance. If it's a derived metric, we
- * don't know what instance domain(s) it could involve, so we
- * clear the instance profile entirely.
+ * include requested instance.
*/
static int
pmfg_lookup_item(const char *metric, const char *instance, pmFGI item)
@@ -206,9 +216,12 @@ pmfg_lookup_item(const char *metric, con
/* Same for a whole indom. Add the whole instance profile. */
static int
-pmfg_lookup_indom(const char *metric, pmFGI item)
+pmfg_lookup_indom(pmFG pmfg, const char *metric, pmFGI item)
{
- int sts;
+ struct __pmInDomCache *indoms;
+ pmInDom indom;
+ size_t size;
+ int i, sts;
assert(item != NULL);
assert(item->type == pmfg_indom);
@@ -221,14 +234,34 @@ pmfg_lookup_indom(const char *metric, pm
return sts;
/* As a convenience to users, we also accept non-indom'd metrics */
- if (item->u.indom.metric_desc.indom == PM_INDOM_NULL)
+ if ((indom = item->u.indom.metric_desc.indom) == PM_INDOM_NULL)
return 0;
/*
+ * Insert into the instance domain cache if not seen previously
+ */
+ for (i = 0; i < pmfg->num_unique_indoms; i++) {
+ if (pmfg->unique_indoms[i].indom == indom)
+ return 0;
+ }
+
+ size = sizeof(struct __pmInDomCache) * (pmfg->num_unique_indoms + 1);
+ indoms = realloc(pmfg->unique_indoms, size);
+
+ if (indoms == NULL)
+ return -ENOMEM;
+ pmfg->unique_indoms = indoms;
+ pmfg->unique_indoms[pmfg->num_unique_indoms].indom = indom;
+ pmfg->unique_indoms[pmfg->num_unique_indoms].codes = NULL;
+ pmfg->unique_indoms[pmfg->num_unique_indoms].names = NULL;
+ pmfg->unique_indoms[pmfg->num_unique_indoms].refreshed = 0;
+ pmfg->num_unique_indoms++;
+
+ /*
* Add all instances; this will override any other past or future
* piecemeal instance requests from __pmExtendFetchGroup_lookup.
*/
- return pmAddProfile(item->u.indom.metric_desc.indom, 0, NULL);
+ return pmAddProfile(indom, 0, NULL);
}
/* Same for an event field. */
@@ -581,7 +614,7 @@ pmfg_reinit_indom(pmFGI item)
if (item->u.indom.output_inst_names)
for (i = 0; i < item->u.indom.output_maxnum; i++)
- item->u.indom.output_inst_names[i] = NULL; /* break ref into indom_names[] */
+ item->u.indom.output_inst_names[i] = NULL; /* ptr into names[] */
if (item->u.indom.output_stss)
for (i = 0; i < item->u.indom.output_maxnum; i++)
@@ -857,11 +890,11 @@ pmfg_fetch_timestamp(pmFG pmfg, pmFGI it
static void
pmfg_fetch_indom(pmFG pmfg, pmFGI item, pmResult *newResult)
{
- int sts = 0;
- int i;
- unsigned j;
- int need_indom_refresh;
+ int i, sts = 0;
+ unsigned int j, k;
+ struct __pmInDomCache *cache;
const pmValueSet *iv;
+ pmInDom indom;
assert(item != NULL);
assert(item->type == pmfg_indom);
@@ -901,36 +934,43 @@ pmfg_fetch_indom(pmFG pmfg, pmFGI item,
/*
* Analyze newResult to see whether it only contains instances we
- * already know. This is unfortunately an O(N**2) operation. It
- * could be made a bit faster if we build a pmGetInDom()- variant
- * that provides instances in sorted order.
+ * already know.
*/
- need_indom_refresh = 0;
- if (item->u.indom.output_inst_names) { /* Caller interested at all? */
- for (j = 0; j < (unsigned)iv->numval; j++) {
- unsigned k;
- for (k = 0; k < item->u.indom.indom_size; k++)
- if (item->u.indom.indom_codes[k] == iv->vlist[j].inst)
+ cache = NULL;
+ indom = item->u.indom.metric_desc.indom;
+ for (j = 0; j < pmfg->num_unique_indoms; j++) {
+ if (indom != pmfg->unique_indoms[j].indom)
+ continue;
+ cache = &pmfg->unique_indoms[j];
+ break;
+ }
+ if (cache && cache->refreshed &&
+ item->u.indom.output_inst_names) { /* Caller interested at all? */
+ for (j = 0; j < (unsigned int)iv->numval; j++) {
+ for (k = 0; k < cache->size; k++)
+ if (cache->codes[k] == iv->vlist[j].inst)
break;
- if (k >= item->u.indom.indom_size) {
- need_indom_refresh = 1;
+ if (k >= cache->size) {
+ cache->refreshed = 0;
break;
}
}
}
- if (need_indom_refresh) {
- free(item->u.indom.indom_codes);
- free(item->u.indom.indom_names);
- sts = pmGetInDom(item->u.indom.metric_desc.indom,
- &item->u.indom.indom_codes, &item->u.indom.indom_names);
+ if (cache && !cache->refreshed) {
+ cache->refreshed = 1;
+ free(cache->codes);
+ free(cache->names);
+ sts = pmGetInDom(indom, &cache->codes, &cache->names);
if (sts < 1) {
/* Need to manually clear; pmGetInDom claims they are undefined. */
- item->u.indom.indom_codes = NULL;
- item->u.indom.indom_names = NULL;
- item->u.indom.indom_size = 0;
+ cache->codes = NULL;
+ cache->names = NULL;
+ cache->size = 0;
}
else {
- item->u.indom.indom_size = sts;
+ if (sts < 0)
+ cache->refreshed = 0;
+ cache->size = sts;
}
/*
* NB: Even if the pmGetInDom failed, we can proceed with
@@ -965,18 +1005,19 @@ pmfg_fetch_indom(pmFG pmfg, pmFGI item,
* results from pmGetIndom.
*/
if (item->u.indom.output_inst_names) {
- unsigned k;
-
- for (k = 0; k < item->u.indom.indom_size; k++) {
- if (item->u.indom.indom_codes[k] == jv->inst) {
- /*
- * NB: copy the indom name char* by value.
- * The user is not supposed to modify / free this pointer,
- * or use it after a subsequent fetch or delete operation.
- */
- item->u.indom.output_inst_names[j] =
- item->u.indom.indom_names[k];
- break;
+ if (cache == NULL)
+ item->u.indom.output_inst_names[j] = NULL;
+ else {
+ for (k = 0; k < cache->size; k++) {
+ if (cache->codes[k] == jv->inst) {
+ /*
+ * NB: copy the indom name char* by value.
+ * User may not modify / free this pointer,
+ * nor use it after subsequent fetch / delete.
+ */
+ item->u.indom.output_inst_names[j] = cache->names[k];
+ break;
+ }
}
}
}
@@ -1201,8 +1242,7 @@ out:
static int
pmfg_clear_profile(pmFG pmfg)
{
- int sts;
- pmFGI item;
+ int i, sts;
sts = pmUseContext(pmfg->ctx);
if (sts != 0)
@@ -1216,10 +1256,9 @@ pmfg_clear_profile(pmFG pmfg)
* Any errors here are benign (or rather there's nothing we can do
* about 'em), so ignore pmDelProfile() return value.
*/
- for (item = pmfg->items; item; item = item->next) {
- if (item->u.item.metric_desc.indom != PM_INDOM_NULL)
- (void)pmDelProfile(item->u.item.metric_desc.indom, 0, NULL);
- }
+ for (i = 0; i < pmfg->num_unique_indoms; i++)
+ (void)pmDelProfile(pmfg->unique_indoms[i].indom, 0, NULL);
+
return 0;
}
@@ -1434,7 +1473,7 @@ pmExtendFetchGroup_indom(pmFG pmfg,
item->type = pmfg_indom;
- sts = pmfg_lookup_indom(metric, item);
+ sts = pmfg_lookup_indom(pmfg, metric, item);
if (sts != 0)
goto out;
@@ -1615,7 +1654,6 @@ pmFetchGroup(pmFG pmfg)
if (pmfg == NULL)
return -EINVAL;
-
/*
* Walk the fetchgroup, reinitializing every output spot, regardless of
* later errors.
@@ -1705,6 +1743,7 @@ int
pmClearFetchGroup(pmFG pmfg)
{
pmFGI item;
+ size_t n;
if (pmfg == NULL)
return -EINVAL;
@@ -1719,8 +1758,6 @@ pmClearFetchGroup(pmFG pmfg)
break;
case pmfg_indom:
pmfg_reinit_indom(item);
- free(item->u.indom.indom_codes);
- free(item->u.indom.indom_names);
break;
case pmfg_event:
pmfg_reinit_event(item);
@@ -1739,11 +1776,21 @@ pmClearFetchGroup(pmFG pmfg)
if (pmfg->prevResult)
pmFreeResult(pmfg->prevResult);
pmfg->prevResult = NULL;
+
if (pmfg->unique_pmids)
free(pmfg->unique_pmids);
pmfg->unique_pmids = NULL;
pmfg->num_unique_pmids = 0;
+ for (n = 0; n < pmfg->num_unique_indoms; n++) {
+ free(pmfg->unique_indoms[n].codes);
+ free(pmfg->unique_indoms[n].names);
+ }
+ if (pmfg->unique_indoms)
+ free(pmfg->unique_indoms);
+ pmfg->unique_indoms = NULL;
+ pmfg->num_unique_indoms = 0;
+
return pmfg_clear_profile(pmfg);
}

@ -1,374 +0,0 @@
diff --git a/qa/1927 b/qa/1927
new file mode 100755
index 000000000..46afa9509
--- /dev/null
+++ b/qa/1927
@@ -0,0 +1,88 @@
+#!/bin/sh
+# PCP QA Test No. 1927
+# Exercise the sockets PMDA Install/Remove and string metric bug.
+#
+# Copyright (c) 2022 Red Hat. All Rights Reserved.
+#
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+# get standard environment, filters and checks
+. ./common.product
+. ./common.filter
+. ./common.check
+
+[ -f $PCP_PMDAS_DIR/sockets/pmdasockets ] || _notrun "sockets pmda not installed"
+
+_cleanup()
+{
+ cd $here
+ $sudo rm -rf $tmp $tmp.*
+}
+
+status=0 # success is the default!
+$sudo rm -rf $tmp $tmp.* $seq.full
+
+_filter_sockets()
+{
+ grep -v 'No value(s) available'
+}
+
+pmdasockets_remove()
+{
+ echo
+ echo "=== remove sockets agent ==="
+ $sudo ./Remove >$tmp.out 2>&1
+ _filter_pmda_remove <$tmp.out
+}
+
+pmdasockets_install()
+{
+ # start from known starting points
+ cd $PCP_PMDAS_DIR/sockets
+ $sudo ./Remove >/dev/null 2>&1
+
+ echo
+ echo "=== sockets agent installation ==="
+ $sudo ./Install </dev/null >$tmp.out 2>&1
+ cat $tmp.out >>$here/$seq.full
+ # Check sockets metrics have appeared ... X metrics and Y values
+ _filter_pmda_install <$tmp.out \
+ | sed \
+ -e 's/[0-9][0-9]* warnings, //' \
+ | $PCP_AWK_PROG '
+/Check network.persocket metrics have appeared/ {
+ if ($7 >= 50 && $7 <= 99) $7 = "X"
+ if ($10 >= 0) $10 = "Y"
+ }
+ { print }'
+}
+
+_prepare_pmda sockets
+# note: _restore_auto_restart pmcd done in _cleanup_pmda()
+trap "_cleanup_pmda sockets; exit \$status" 0 1 2 3 15
+
+_stop_auto_restart pmcd
+
+# real QA test starts here
+pmdasockets_install
+
+# pmcd should have been started by the Install process - check
+if pminfo -v network.persocket > $tmp.info 2> $tmp.err
+then
+ :
+else
+ echo "... failed! ... here is the Install log ..."
+ cat $tmp.out
+fi
+cat $tmp.info $tmp.err | _filter_sockets
+
+echo "Check the values for v6only metric are 0 or 1 ..."
+pminfo -f network.persocket.v6only | egrep -v 'value [01]$' | sed -e '/^$/d'
+
+pmdasockets_remove
+status=0
+
+# success, all done
+exit
diff --git a/qa/1927.out b/qa/1927.out
new file mode 100644
index 000000000..2ae4385fd
--- /dev/null
+++ b/qa/1927.out
@@ -0,0 +1,17 @@
+QA output created by 1927
+
+=== sockets agent installation ===
+Updating the Performance Metrics Name Space (PMNS) ...
+Terminate PMDA if already installed ...
+[...install files, make output...]
+Updating the PMCD control file, and notifying PMCD ...
+Check network.persocket metrics have appeared ... X metrics and Y values
+Check the values for v6only metric are 0 or 1 ...
+network.persocket.v6only
+
+=== remove sockets agent ===
+Culling the Performance Metrics Name Space ...
+network.persocket ... done
+Updating the PMCD control file, and notifying PMCD ...
+[...removing files...]
+Check network.persocket metrics have gone away ... OK
diff --git a/qa/group b/qa/group
index acfc5d208..846c0c4bd 100644
--- a/qa/group
+++ b/qa/group
@@ -1967,6 +1967,7 @@ x11
1901 pmlogger local
1902 help local
1914 atop local
+1927 pmda.sockets local
1937 pmlogrewrite pmda.xfs local
1955 libpcp pmda pmda.pmcd local
1956 pmda.linux pmcd local
diff --git a/src/pmdas/linux_sockets/pmda.c b/src/pmdas/linux_sockets/pmda.c
index d10eacf29..5a3018d8a 100644
--- a/src/pmdas/linux_sockets/pmda.c
+++ b/src/pmdas/linux_sockets/pmda.c
@@ -1,7 +1,7 @@
/*
* Sockets PMDA
*
- * Copyright (c) 2021 Red Hat.
+ * Copyright (c) 2021-2022 Red Hat.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -14,6 +14,7 @@
* for more details.
*/
+#include <ctype.h>
#include "pmapi.h"
#include "pmda.h"
@@ -147,6 +148,31 @@ sockets_fetchCallBack(pmdaMetric *metric, unsigned int inst, pmAtomValue *atom)
return PMDA_FETCH_STATIC;
}
+/*
+ * Restrict the allowed filter strings to only limited special
+ * characters (open and close brackets - everthing else can be
+ * done with alphanumerics) to limit any attack surface here.
+ * The ss filtering language is more complex than we ever want
+ * to be attempting to parse ourself, so we leave that side of
+ * things to the ss command itself.
+ */
+int
+sockets_check_filter(const char *string)
+{
+ const char *p;
+
+ for (p = string; *p; p++) {
+ if (isspace(*p))
+ continue;
+ if (isalnum(*p))
+ continue;
+ if (*p == '(' || *p == ')')
+ continue;
+ return 0; /* disallow */
+ }
+ return 1;
+}
+
static int
sockets_store(pmResult *result, pmdaExt *pmda)
{
@@ -165,9 +191,14 @@ sockets_store(pmResult *result, pmdaExt *pmda)
case 0: /* network.persocket.filter */
if ((sts = pmExtractValue(vsp->valfmt, &vsp->vlist[0],
PM_TYPE_STRING, &av, PM_TYPE_STRING)) >= 0) {
+ if (sockets_check_filter(av.cp)) {
+ sts = PM_ERR_BADSTORE;
+ free(av.cp);
+ break;
+ }
if (ss_filter)
free(ss_filter);
- ss_filter = av.cp; /* TODO filter syntax check */
+ ss_filter = av.cp;
}
break;
default:
diff --git a/src/pmdas/linux_sockets/ss_parse.c b/src/pmdas/linux_sockets/ss_parse.c
index 94c5e16e9..9f3afc691 100644
--- a/src/pmdas/linux_sockets/ss_parse.c
+++ b/src/pmdas/linux_sockets/ss_parse.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Red Hat.
+ * Copyright (c) 2021-2022 Red Hat.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -21,65 +21,70 @@ static ss_stats_t ss_p;
/* boolean value with no separate value, default 0 */
#define PM_TYPE_BOOL (PM_TYPE_UNKNOWN-1)
+/* helper macros to extract field address and size */
+#define SSFIELD(str,type,f) {(str), (sizeof(str)-1), type, (&(f)), (sizeof(f))}
+#define SSNULLFIELD(str) {(str), (sizeof(str)-1), PM_TYPE_UNKNOWN, NULL}
+
static struct {
char *field;
int len;
int type;
void *addr;
+ int size;
int found;
} parse_table[] = {
- { "timer:", 6, PM_TYPE_STRING, &ss_p.timer_str },
- { "uid:", 4, PM_TYPE_U32, &ss_p.uid },
- { "ino:", 4, PM_TYPE_64, &ss_p.inode },
- { "sk:", 3, PM_TYPE_U64, &ss_p.sk },
- { "cgroup:", 7, PM_TYPE_STRING, &ss_p.cgroup },
- { "v6only:", 7, PM_TYPE_32, &ss_p.v6only },
- { "--- ", 4, PM_TYPE_UNKNOWN, NULL },
- { "<-> ", 4, PM_TYPE_UNKNOWN, NULL },
- { "--> ", 4, PM_TYPE_UNKNOWN, NULL },
- { "skmem:", 6, PM_TYPE_STRING, &ss_p.skmem_str, },
- { "ts ", 3, PM_TYPE_BOOL, &ss_p.ts },
- { "sack ", 5, PM_TYPE_BOOL, &ss_p.sack },
- { "cubic ", 6, PM_TYPE_BOOL, &ss_p.cubic },
- { "wscale:", 7, PM_TYPE_STRING, &ss_p.wscale_str },
- { "rto:", 4, PM_TYPE_DOUBLE, &ss_p.rto },
- { "rtt:", 4, PM_TYPE_STRING, &ss_p.round_trip_str },
- { "ato:", 4, PM_TYPE_DOUBLE, &ss_p.ato },
- { "backoff:", 8, PM_TYPE_32, &ss_p.backoff },
- { "mss:", 4, PM_TYPE_U32, &ss_p.mss },
- { "pmtu:", 5, PM_TYPE_U32, &ss_p.pmtu },
- { "rcvmss:", 7, PM_TYPE_U32, &ss_p.rcvmss },
- { "advmss:", 7, PM_TYPE_U32, &ss_p.advmss },
- { "cwnd:", 5, PM_TYPE_U32, &ss_p.cwnd },
- { "lost:", 5, PM_TYPE_32, &ss_p.lost },
- { "ssthresh:", 9, PM_TYPE_U32, &ss_p.ssthresh },
- { "bytes_sent:", 11, PM_TYPE_U64, &ss_p.bytes_sent },
- { "bytes_retrans:", 14, PM_TYPE_U64, &ss_p.bytes_retrans },
- { "bytes_acked:", 12, PM_TYPE_U64, &ss_p.bytes_acked },
- { "bytes_received:", 15, PM_TYPE_U64, &ss_p.bytes_received },
- { "segs_out:", 9, PM_TYPE_U32, &ss_p.segs_out },
- { "segs_in:", 8, PM_TYPE_U32, &ss_p.segs_in },
- { "data_segs_out:", 14, PM_TYPE_U32, &ss_p.data_segs_out },
- { "data_segs_in:", 13, PM_TYPE_U32, &ss_p.data_segs_in },
- { "send ", 5, PM_TYPE_DOUBLE, &ss_p.send }, /* no ':' */
- { "lastsnd:", 8, PM_TYPE_U32, &ss_p.lastsnd },
- { "lastrcv:", 8, PM_TYPE_U32, &ss_p.lastrcv },
- { "lastack:", 8, PM_TYPE_U32, &ss_p.lastack },
- { "pacing_rate ", 12, PM_TYPE_DOUBLE, &ss_p.pacing_rate }, /* no ':' */
- { "delivery_rate ", 14, PM_TYPE_DOUBLE, &ss_p.delivery_rate }, /* no ':' */
- { "delivered:", 10, PM_TYPE_U32, &ss_p.delivered },
- { "app_limited ", 12, PM_TYPE_BOOL, &ss_p.app_limited },
- { "reord_seen:", 11, PM_TYPE_32, &ss_p.reord_seen },
- { "busy:", 5, PM_TYPE_U64, &ss_p.busy },
- { "unacked:", 8, PM_TYPE_32, &ss_p.unacked },
- { "rwnd_limited:", 13, PM_TYPE_U64, &ss_p.rwnd_limited },
- { "retrans:", 8, PM_TYPE_STRING, &ss_p.retrans_str },
- { "dsack_dups:", 11, PM_TYPE_U32, &ss_p.dsack_dups },
- { "rcv_rtt:", 8, PM_TYPE_DOUBLE, &ss_p.rcv_rtt },
- { "rcv_space:", 10, PM_TYPE_32, &ss_p.rcv_space },
- { "rcv_ssthresh:", 13, PM_TYPE_32, &ss_p.rcv_ssthresh },
- { "minrtt:", 7, PM_TYPE_DOUBLE, &ss_p.minrtt },
- { "notsent:", 8, PM_TYPE_U32, &ss_p.notsent },
+ SSFIELD("timer:", PM_TYPE_STRING, ss_p.timer_str),
+ SSFIELD("uid:", PM_TYPE_U32, ss_p.uid),
+ SSFIELD("ino:", PM_TYPE_64, ss_p.inode),
+ SSFIELD("sk:", PM_TYPE_U64, ss_p.sk),
+ SSFIELD("cgroup:", PM_TYPE_STRING, ss_p.cgroup),
+ SSFIELD("v6only:", PM_TYPE_32, ss_p.v6only),
+ SSNULLFIELD("--- "),
+ SSNULLFIELD("<-> "),
+ SSNULLFIELD("--> "),
+ SSFIELD("skmem:", PM_TYPE_STRING, ss_p.skmem_str),
+ SSFIELD("ts ", PM_TYPE_BOOL, ss_p.ts),
+ SSFIELD("sack ", PM_TYPE_BOOL, ss_p.sack),
+ SSFIELD("cubic ", PM_TYPE_BOOL, ss_p.cubic),
+ SSFIELD("wscale:", PM_TYPE_STRING, ss_p.wscale_str),
+ SSFIELD("rto:", PM_TYPE_DOUBLE, ss_p.rto),
+ SSFIELD("rtt:", PM_TYPE_STRING, ss_p.round_trip_str),
+ SSFIELD("ato:", PM_TYPE_DOUBLE, ss_p.ato),
+ SSFIELD("backoff:", PM_TYPE_32, ss_p.backoff),
+ SSFIELD("mss:", PM_TYPE_U32, ss_p.mss),
+ SSFIELD("pmtu:", PM_TYPE_U32, ss_p.pmtu),
+ SSFIELD("rcvmss:", PM_TYPE_U32, ss_p.rcvmss),
+ SSFIELD("advmss:", PM_TYPE_U32, ss_p.advmss),
+ SSFIELD("cwnd:", PM_TYPE_U32, ss_p.cwnd),
+ SSFIELD("lost:", PM_TYPE_32, ss_p.lost),
+ SSFIELD("ssthresh:", PM_TYPE_U32, ss_p.ssthresh),
+ SSFIELD("bytes_sent:", PM_TYPE_U64, ss_p.bytes_sent),
+ SSFIELD("bytes_retrans:", PM_TYPE_U64, ss_p.bytes_retrans),
+ SSFIELD("bytes_acked:", PM_TYPE_U64, ss_p.bytes_acked),
+ SSFIELD("bytes_received:", PM_TYPE_U64, ss_p.bytes_received),
+ SSFIELD("segs_out:", PM_TYPE_U32, ss_p.segs_out),
+ SSFIELD("segs_in:", PM_TYPE_U32, ss_p.segs_in),
+ SSFIELD("data_segs_out:", PM_TYPE_U32, ss_p.data_segs_out),
+ SSFIELD("data_segs_in:", PM_TYPE_U32, ss_p.data_segs_in),
+ SSFIELD("send ", PM_TYPE_DOUBLE, ss_p.send), /* no ':' */
+ SSFIELD("lastsnd:", PM_TYPE_U32, ss_p.lastsnd),
+ SSFIELD("lastrcv:", PM_TYPE_U32, ss_p.lastrcv),
+ SSFIELD("lastack:", PM_TYPE_U32, ss_p.lastack),
+ SSFIELD("pacing_rate ", PM_TYPE_DOUBLE, ss_p.pacing_rate), /* no ':' */
+ SSFIELD("delivery_rate ", PM_TYPE_DOUBLE, ss_p.delivery_rate), /* no ':' */
+ SSFIELD("delivered:", PM_TYPE_U32, ss_p.delivered),
+ SSFIELD("app_limited ", PM_TYPE_BOOL, ss_p.app_limited),
+ SSFIELD("reord_seen:", PM_TYPE_32, ss_p.reord_seen),
+ SSFIELD("busy:", PM_TYPE_U64, ss_p.busy),
+ SSFIELD("unacked:", PM_TYPE_32, ss_p.unacked),
+ SSFIELD("rwnd_limited:", PM_TYPE_U64, ss_p.rwnd_limited),
+ SSFIELD("retrans:", PM_TYPE_STRING, ss_p.retrans_str),
+ SSFIELD("dsack_dups:", PM_TYPE_U32, ss_p.dsack_dups),
+ SSFIELD("rcv_rtt:", PM_TYPE_DOUBLE, ss_p.rcv_rtt),
+ SSFIELD("rcv_space:", PM_TYPE_32, ss_p.rcv_space),
+ SSFIELD("rcv_ssthresh:", PM_TYPE_32, ss_p.rcv_ssthresh),
+ SSFIELD("minrtt:", PM_TYPE_DOUBLE, ss_p.minrtt),
+ SSFIELD("notsent:", PM_TYPE_U32, ss_p.notsent),
{ NULL }
};
@@ -225,8 +230,11 @@ ss_parse(char *line, int has_state_field, ss_stats_t *ss)
if (*p == '(')
p++;
r = (char *)parse_table[i].addr;
- for (s=p; *s && *s != ' ' && *s != '\n' && *s != ')'; s++)
- *r++ = *s; /* TODO check r len */
+ for (s=p; *s && *s != ' ' && *s != '\n' && *s != ')'; s++) {
+ *r++ = *s;
+ if (r - (char *)parse_table[i].addr >= parse_table[i].size - 1)
+ break;
+ }
*r = '\0';
break;
case PM_TYPE_32:
diff --git a/src/pmdas/linux_sockets/ss_stats.h b/src/pmdas/linux_sockets/ss_stats.h
index 183db5afa..009a00cd9 100644
--- a/src/pmdas/linux_sockets/ss_stats.h
+++ b/src/pmdas/linux_sockets/ss_stats.h
@@ -1,11 +1,11 @@
/*
- * Copyright (c) 2021 Red Hat.
- *
+ * Copyright (c) 2021-2022 Red Hat.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
@@ -26,7 +26,7 @@ typedef struct ss_stats {
__int32_t timer_retrans;
__uint32_t uid;
__uint64_t sk;
- char cgroup[64];
+ char cgroup[128];
__int32_t v6only;
char skmem_str[64];
__int32_t skmem_rmem_alloc;

@ -1,20 +0,0 @@
bcc included in RHEL 8.6 doesn't support the kernel_struct_has_field function.
The 4.18.x kernel in RHEL 8.6 did backport the `state` to `__state` rename (upstream:
change was in kernel v5.14+), and now we're in a situation where we can't test for
the existence of this kernel struct member and also can't rely on a kernel version check.
Therefore, let's patch it here for RHEL 8.x only:
diff --git a/src/pmdas/bcc/modules/runqlat.python b/src/pmdas/bcc/modules/runqlat.python
index 1c6c6b4b0..efc30e958 100644
--- a/src/pmdas/bcc/modules/runqlat.python
+++ b/src/pmdas/bcc/modules/runqlat.python
@@ -100,7 +100,7 @@ class PCPBCCModule(PCPBCCBase):
if (
hasattr(BPF, "kernel_struct_has_field")
and BPF.kernel_struct_has_field(b"task_struct", b"__state") == 1
- ) or self.kernel_version() >= (5, 14, 0):
+ ) or self.kernel_version() >= (4, 18, 0):
self.bpf_text = self.bpf_text.replace('STATE_FIELD', '__state')
else:
self.bpf_text = self.bpf_text.replace('STATE_FIELD', 'state')

@ -1,11 +0,0 @@
diff -Naurp pcp-5.3.7.orig/src/pmie/GNUmakefile pcp-5.3.7/src/pmie/GNUmakefile
--- pcp-5.3.7.orig/src/pmie/GNUmakefile 2022-02-02 11:53:05.000000000 +1100
+++ pcp-5.3.7/src/pmie/GNUmakefile 2022-05-03 11:45:12.108743480 +1000
@@ -80,6 +80,7 @@ pmie.service : pmie.service.in
$(SED) <$< >$@ \
-e 's;@PCP_RC_DIR@;'$(PCP_RC_DIR)';' \
-e 's;@PCP_RUN_DIR@;'$(PCP_RUN_DIR)';' \
+ -e 's;@PCP_SYSCONFIG_DIR@;'$(PCP_SYSCONFIG_DIR)';' \
# END
pmie_farm.service : pmie_farm.service.in

@ -1,146 +0,0 @@
commit f54eddf494e474531e5af609bcc376037a918977
Author: Nathan Scott <nathans@redhat.com>
Date: Tue Apr 26 14:32:59 2022 +1000
pmdapostfix: harden against a not-yet-running postfix
Ensure the postfix PMDA can start and service requests even
if postfix is not yet started.
diff --git a/src/perl/PMDA/local.c b/src/perl/PMDA/local.c
index e223bde7a..33130bc5d 100644
--- a/src/perl/PMDA/local.c
+++ b/src/perl/PMDA/local.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 Red Hat.
+ * Copyright (c) 2012-2017,2022 Red Hat.
* Copyright (c) 2008-2011 Aconex. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -139,18 +139,15 @@ int
local_tail(char *file, scalar_t *callback, int cookie)
{
int fd = open(file, O_RDONLY | O_NDELAY);
- struct stat stats;
+ struct stat stats = {0};
int me;
- if (fd < 0) {
- pmNotifyErr(LOG_ERR, "open failed (%s): %s", file, osstrerror());
- exit(1);
- }
- if (fstat(fd, &stats) < 0) {
- pmNotifyErr(LOG_ERR, "fstat failed (%s): %s", file, osstrerror());
- exit(1);
- }
- lseek(fd, 0L, SEEK_END);
+ if (fd < 0)
+ pmNotifyErr(LOG_INFO, "open failed (%s): %s", file, osstrerror());
+ else if (fstat(fd, &stats) < 0)
+ pmNotifyErr(LOG_INFO, "fstat failed (%s): %s", file, osstrerror());
+ else
+ lseek(fd, 0L, SEEK_END);
me = local_file(FILE_TAIL, fd, callback, cookie);
files[me].me.tail.path = strdup(file);
files[me].me.tail.dev = stats.st_dev;
@@ -416,10 +413,11 @@ local_pmdaMain(pmdaInterface *self)
}
for (i = 0; i < nfiles; i++) {
- fd = files[i].fd;
/* check for log rotation or host reconnection needed */
if ((count % 10) == 0) /* but only once every 10 */
local_connection(&files[i]);
+ if ((fd = files[i].fd) < 0)
+ continue;
if (files[i].type != FILE_TAIL && !(__pmFD_ISSET(fd, &readyfds)))
continue;
offset = 0;
@@ -431,21 +429,16 @@ multiread:
(oserror() == EAGAIN) ||
(oserror() == EWOULDBLOCK)))
continue;
- if (files[i].type == FILE_SOCK) {
- close(files[i].fd);
- files[i].fd = -1;
- continue;
- }
- pmNotifyErr(LOG_ERR, "Data read error on %s: %s\n",
- local_filetype(files[i].type), osstrerror());
- exit(1);
+ close(files[i].fd);
+ files[i].fd = -1;
+ continue;
}
if (bytes == 0) {
if (files[i].type == FILE_TAIL)
continue;
- pmNotifyErr(LOG_ERR, "No data to read - %s may be closed\n",
- local_filetype(files[i].type));
- exit(1);
+ close(files[i].fd);
+ files[i].fd = -1;
+ continue;
}
/*
* good read ... data up to buffer + offset + bytes is all OK
diff --git a/src/pmdas/postfix/pmdapostfix.pl b/src/pmdas/postfix/pmdapostfix.pl
index ac46816bc..d6d3f4d3a 100644
--- a/src/pmdas/postfix/pmdapostfix.pl
+++ b/src/pmdas/postfix/pmdapostfix.pl
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2012-2015 Red Hat.
+# Copyright (c) 2012-2015,2022 Red Hat.
# Copyright (c) 2009-2010 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
#
# This program is free software; you can redistribute it and/or modify it
@@ -56,8 +56,6 @@ my @postfix_received_dom = (
1 => 'smtp',
);
-my $setup = defined($ENV{'PCP_PERL_PMNS'}) || defined($ENV{'PCP_PERL_DOMAIN'});
-
sub postfix_do_refresh
{
QUEUE:
@@ -212,7 +210,7 @@ $logstats{"received"}{1} = 0;
# Note:
# Environment variables.
-# $PMDA_POSTFIX_QSHAPE: alternative executable qshape scrpipt (for QA)
+# $PMDA_POSTFIX_QSHAPE: alternative executable qshape script (for QA)
# ... over-rides default and command line argument.
# ... over-rides default arguments -b 10 -t $refresh
# $PMDA_POSTFIX_REFRESH: alternative refresh rate (for QA)
@@ -228,7 +226,7 @@ if (defined($ENV{'PMDA_POSTFIX_QSHAPE'})) {
$qshape = $ENV{'PMDA_POSTFIX_QSHAPE'};
$qshape_args = '';
}
-if (!$setup) { $pmda->log("qshape cmd: $qshape $qshape_args <qname>"); }
+unless (pmda_install()) { $pmda->log("qshape cmd: $qshape $qshape_args <qname>"); }
if (defined($ENV{'PMDA_POSTFIX_REFRESH'})) { $refresh = $ENV{'PMDA_POSTFIX_REFRESH'}; }
@@ -238,12 +236,15 @@ foreach my $file ( @logfiles ) {
}
}
if (defined($ENV{'PMDA_POSTFIX_LOG'})) { $logfile = $ENV{'PMDA_POSTFIX_LOG'}; }
-unless(defined($logfile))
-{
- $pmda->log("Fatal: No Postfix log file found in: @logfiles");
- die 'No Postfix log file found';
+unless (pmda_install()) {
+ if (defined($logfile)) {
+ $pmda->log("logfile: $logfile");
+ } else {
+ $pmda->log("Warning: assuming logfile: $logfiles[0] as no Postfix log found yet from: @logfiles");
+ }
}
-if (!$setup) { $pmda->log("logfile: $logfile"); }
+# set a good default if none found, before continuing
+unless (defined($logfile)) { $logfile = $logfiles[0]; }
$pmda->add_indom($postfix_queues_indom, \@postfix_queues_dom, '', '');
$pmda->add_indom($postfix_sent_indom, \@postfix_sent_dom, '', '');

@ -1,44 +0,0 @@
commit d874d2e486c8a64fa9945ed7aa0048cccbd46f77
Author: Nathan Scott <nathans@redhat.com>
Date: Wed May 4 17:11:19 2022 +1000
pmdaproc: fix cgroup cpu metrics refresh structures
Jan Kurik encountered this issue when running the regression
testsuite (especially qa/359) on non-x86_64 architectures.
Something must've changed in the toolchain recently on these
platforms since we've not seen this before, but this bug has
been in our code for some time. It works everywhere else by
good fortune, when there just happen to be NULLs after these
cgroups CPU parsing data structures.
Resolves Red Hat BZ #2081262.
diff --git a/src/pmdas/linux_proc/cgroups.c b/src/pmdas/linux_proc/cgroups.c
index 413a72343..26d59863a 100644
--- a/src/pmdas/linux_proc/cgroups.c
+++ b/src/pmdas/linux_proc/cgroups.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2019 Red Hat.
+ * Copyright (c) 2012-2019,2022 Red Hat.
* Copyright (c) 2010 Aconex. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -863,6 +863,7 @@ read_cpu_time(const char *file, cgroup_cputime_t *ccp)
{ "usage_usec", &cputime.usage },
{ "user_usec", &cputime.user },
{ "system_usec", &cputime.system },
+ { NULL, NULL }
};
char buffer[4096], name[64];
unsigned long long value;
@@ -903,6 +904,7 @@ read_cpu_stats(const char *file, cgroup_cpustat_t *ccp)
{ "nr_periods", &cpustat.nr_periods },
{ "nr_throttled", &cpustat.nr_throttled },
{ "throttled_time", &cpustat.throttled_time },
+ { NULL, NULL }
};
char buffer[4096], name[64];
unsigned long long value;

@ -1,74 +0,0 @@
diff -Naurp pcp-5.3.7.orig/src/pcp/dstat/pcp-dstat.1 pcp-5.3.7/src/pcp/dstat/pcp-dstat.1
--- pcp-5.3.7.orig/src/pcp/dstat/pcp-dstat.1 2021-05-26 17:43:26.000000000 +1000
+++ pcp-5.3.7/src/pcp/dstat/pcp-dstat.1 2022-10-20 08:57:02.176922290 +1100
@@ -1,6 +1,6 @@
'\"macro stdmacro
.\"
-.\" Copyright (c) 2018-2020 Red Hat.
+.\" Copyright (c) 2018-2022 Red Hat.
.\"
.\" This program is free software; you can redistribute it and/or modify it
.\" under the terms of the GNU General Public License as published by the
@@ -34,6 +34,7 @@
[\f3\-\-integer\f1]
[\f3\-\-nocolor\f1]
[\f3\-\-noheaders\f1]
+[\f3\-\-nomissed\f1]
[\f3\-\-noupdate\f1]
[\f3\-\-list\f1]
[\f3\-\-pidfile\f1 \f2pid-file\f1]
@@ -404,6 +405,9 @@ disable colors
\fB\-\-noheaders\fR
disable repetitive headers
.TP
+\fB\-\-nomissed\fR
+disable missed ticks warnings for intermediate samples.
+.TP
\fB\-\-noupdate\fR
disable intermediate updates when \fIdelay\fR greater than 1.
.TP
diff -Naurp pcp-5.3.7.orig/src/pcp/dstat/pcp-dstat.py pcp-5.3.7/src/pcp/dstat/pcp-dstat.py
--- pcp-5.3.7.orig/src/pcp/dstat/pcp-dstat.py 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/pcp/dstat/pcp-dstat.py 2022-10-20 08:57:02.176922290 +1100
@@ -455,6 +455,7 @@ class DstatTool(object):
# Internal
self.missed = 0
+ self.nomissed = False # report missed ticks by default
self.runtime = -1
self.plugins = [] # list of requested plugin names
self.allplugins = [] # list of all known plugin names
@@ -783,7 +784,8 @@ class DstatTool(object):
opts.pmSetLongOption('color', 0, '', '', 'force colors')
opts.pmSetLongOption('nocolor', 0, '', '', 'disable colors')
opts.pmSetLongOption('noheaders', 0, '', '', 'disable repetitive headers')
- opts.pmSetLongOption('noupdate', 0, '', '', 'disable intermediate headers')
+ opts.pmSetLongOption('noupdate', 0, '', '', 'disable intermediate updates')
+ opts.pmSetLongOption('nomissed', 0, '', '', 'disable missed ticks warnings')
opts.pmSetLongOption('output', 1, 'o', 'file', 'write CSV output to file')
opts.pmSetLongOption('version', 0, 'V', '', '')
opts.pmSetLongOption('debug', 1, None, '', '')
@@ -920,6 +922,8 @@ class DstatTool(object):
self.header = False
elif opt in ['noupdate']:
self.update = False
+ elif opt in ['nomissed']:
+ self.nomissed = True
elif opt in ['o', 'output']:
self.output = arg
elif opt in ['pidfile']:
@@ -1773,12 +1777,12 @@ class DstatTool(object):
outputfile = open(self.output, omode)
outputfile.write(oline)
- if self.missed > 0:
+ if self.missed > 0 and self.nomissed is False:
line = 'missed ' + str(self.missed + 1) + ' ticks'
sys.stdout.write(' ' + THEME['error'] + line + THEME['input'])
if self.output and step == self.delay:
outputfile.write(',"' + line + '"')
- self.missed = 0
+ self.missed = 0
# Finish the line
if not op.update and self.novalues is False:
sys.stdout.write('\n')

@ -1,135 +0,0 @@
commit 55e8c83ee5920ab30644f54f7a525255b1de4b84
Author: Nathan Scott <nathans@redhat.com>
Date: Mon Aug 29 14:25:03 2022 +1000
docs: describe working sudoers configuration with requiretty
When /etc/sudoers is configured with 'Defaults requiretty',
pmlogctl cannot invoke pmlogger_check in the normal fashion.
Symptoms of the problem are the following system log message:
pmlogctl[PID]: sudo: sorry, you must have a tty to run sudo
pmiectl and pmie_check are similarly affected. The simplest
solution is to add an additional configuration line excluding
these commands from requiring a tty; this is the approach now
documented.
Note these PCP commands are not interactive (require no tty)
and the unprivileged 'pcp' account uses nologin(8) as a shell
anyway, so requiretty offers no advantages here. Note also
there's debate about whether requiretty is a useful security
measure in general as it can be trivially bypassed; further
details: https://bugzilla.redhat.com/show_bug.cgi?id=1020147
Resolves Red Hat BZ #2093751
diff -Naurp pcp-5.3.7.orig/man/man1/pmie_check.1 pcp-5.3.7/man/man1/pmie_check.1
--- pcp-5.3.7.orig/man/man1/pmie_check.1 2021-11-04 08:26:15.000000000 +1100
+++ pcp-5.3.7/man/man1/pmie_check.1 2022-08-31 11:17:52.362276530 +1000
@@ -406,6 +406,42 @@ no
entries are needed as the timer mechanism provided by
.B systemd
is used instead.
+.PP
+The
+.BR pmiectl (1)
+utility may invoke
+.B pmie_check
+using the
+.BR sudo (1)
+command to run it under the $PCP_USER ``pcp'' account.
+If
+.B sudo
+is configured with the non-default
+.I requiretty
+option (see below),
+.B pmie_check
+may fail to run due to not having a tty configured.
+This issue can be resolved by adding a second line
+(expand $PCP_BINADM_DIR according to your platform)
+to the
+.I /etc/sudoers
+configuration file as follows:
+.P
+.ft CW
+.nf
+.in +0.5i
+Defaults requiretty
+Defaults!$PCP_BINADM_DIR/pmie_check !requiretty
+.in
+.fi
+.ft 1
+.P
+Note that the unprivileged PCP account under which these
+commands run uses
+.I /sbin/nologin
+as the shell, so the
+.I requiretty
+option is ineffective here and safe to disable in this way.
.SH FILES
.TP 5
.I $PCP_PMIECONTROL_PATH
diff -Naurp pcp-5.3.7.orig/man/man1/pmlogger_check.1 pcp-5.3.7/man/man1/pmlogger_check.1
--- pcp-5.3.7.orig/man/man1/pmlogger_check.1 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/man/man1/pmlogger_check.1 2022-08-31 11:20:52.470086724 +1000
@@ -830,6 +830,42 @@ no
entries are needed as the timer mechanism provided by
.B systemd
is used instead.
+.PP
+The
+.BR pmlogctl (1)
+utility may invoke
+.B pmlogger_check
+using the
+.BR sudo (1)
+command to run it under the $PCP_USER ``pcp'' account.
+If
+.B sudo
+is configured with the non-default
+.I requiretty
+option (see below),
+.B pmlogger_check
+may fail to run due to not having a tty configured.
+This issue can be resolved by adding a second line
+(expand $PCP_BINADM_DIR according to your platform)
+to the
+.I /etc/sudoers
+configuration file as follows:
+.P
+.ft CW
+.nf
+.in +0.5i
+Defaults requiretty
+Defaults!$PCP_BINADM_DIR/pmlogger_check !requiretty
+.in
+.fi
+.ft 1
+.P
+Note that the unprivileged PCP account under which these
+commands run uses
+.I /sbin/nologin
+as the shell, so the
+.I requiretty
+option is ineffective here and safe to disable in this way.
.SH FILES
.TP 5
.I $PCP_PMLOGGERCONTROL_PATH
@@ -926,7 +962,7 @@ instances for
.I hostname
have been launched in the interim.
Because the cron-driven PCP archive management scripts run under
-the uid of the user ``pcp'',
+the $PCP_USER account ``pcp'',
.BI $PCP_ARCHIVE_DIR/ hostname /SaveLogs
typically needs to be owned by the user ``pcp''.
.TP
@@ -994,6 +1030,7 @@ platforms.
.BR pmlogmv (1),
.BR pmlogrewrite (1),
.BR pmsocks (1),
+.BR sudo (1),
.BR systemd (1),
.BR xz (1)
and

@ -1,103 +0,0 @@
From 73c024c64f7db68fdcd224c27c1711fa6dd1d254 Mon Sep 17 00:00:00 2001
From: Nathan Scott <nathans@redhat.com>
Date: Tue, 28 Jun 2022 10:06:06 +1000
Subject: [PATCH] pmlogger_farm: add default configuration file for farm
loggers
Provide a mechanism whereby the farm loggers can be configured.
There has been reluctance in the past to sharing configuration
of the local primary logger, so these are now done separately.
Makes sense to me as the primary pmlogger may need to use more
frequent sampling, may not want to allow remote pmlc, etc.
Resolves Red Hat BZ #2101574
---
src/pmlogger/GNUmakefile | 1 +
src/pmlogger/pmlogger.defaults | 2 ++
src/pmlogger/pmlogger_check.sh | 5 +++--
src/pmlogger/pmlogger_farm.defaults | 27 +++++++++++++++++++++++++++
4 files changed, 33 insertions(+), 2 deletions(-)
create mode 100644 src/pmlogger/pmlogger_farm.defaults
diff -Naurp pcp-5.3.7.orig/src/pmlogger/GNUmakefile pcp-5.3.7/src/pmlogger/GNUmakefile
--- pcp-5.3.7.orig/src/pmlogger/GNUmakefile 2022-02-02 11:53:05.000000000 +1100
+++ pcp-5.3.7/src/pmlogger/GNUmakefile 2022-08-31 11:23:08.758672970 +1000
@@ -45,6 +45,7 @@ install:: $(SUBDIRS)
install:: default
$(INSTALL) -m 775 -o $(PCP_USER) -g $(PCP_GROUP) -d $(PCP_VAR_DIR)/config/pmlogger
+ $(INSTALL) -m 644 pmlogger_farm.defaults $(PCP_SYSCONFIG_DIR)/pmlogger_farm
$(INSTALL) -m 644 pmlogger.defaults $(PCP_SYSCONFIG_DIR)/pmlogger
$(INSTALL) -m 755 -d $(PCP_SHARE_DIR)/zeroconf
$(INSTALL) -m 644 pmlogger.zeroconf $(PCP_SHARE_DIR)/zeroconf/pmlogger
diff -Naurp pcp-5.3.7.orig/src/pmlogger/pmlogger_check.sh pcp-5.3.7/src/pmlogger/pmlogger_check.sh
--- pcp-5.3.7.orig/src/pmlogger/pmlogger_check.sh 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/pmlogger/pmlogger_check.sh 2022-08-31 11:23:08.758672970 +1000
@@ -1,6 +1,6 @@
#! /bin/sh
#
-# Copyright (c) 2013-2016,2018,2020-2021 Red Hat.
+# Copyright (c) 2013-2016,2018,2020-2022 Red Hat.
# Copyright (c) 1995-2000,2003 Silicon Graphics, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
PMLOGGER="$PCP_BINADM_DIR/pmlogger"
PMLOGCONF="$PCP_BINADM_DIR/pmlogconf"
PMLOGGERENVS="$PCP_SYSCONFIG_DIR/pmlogger"
+PMLOGGERFARMENVS="$PCP_SYSCONFIG_DIR/pmlogger_farm"
PMLOGGERZEROCONFENVS="$PCP_SHARE_DIR/zeroconf/pmlogger"
# error messages should go to stderr, not the GUI notifiers
@@ -972,8 +973,8 @@ END { print m }'`
continue
fi
else
+ envs=`grep -h ^PMLOGGER "$PMLOGGERFARMENVS" 2>/dev/null`
args="-h $host $args"
- envs=""
iam=""
fi
diff -Naurp pcp-5.3.7.orig/src/pmlogger/pmlogger.defaults pcp-5.3.7/src/pmlogger/pmlogger.defaults
--- pcp-5.3.7.orig/src/pmlogger/pmlogger.defaults 2022-02-03 16:11:40.000000000 +1100
+++ pcp-5.3.7/src/pmlogger/pmlogger.defaults 2022-08-31 11:23:08.758672970 +1000
@@ -1,5 +1,7 @@
# Environment variables for the primary pmlogger daemon. See also
# the pmlogger control file and pmlogconf(1) for additional details.
+# Also see separate pmlogger_farm configuration for the non-primary
+# logger configuration settings, separate to this file.
# Settings defined in this file will override any settings in the
# pmlogger zeroconf file (if present).
diff -Naurp pcp-5.3.7.orig/src/pmlogger/pmlogger_farm.defaults pcp-5.3.7/src/pmlogger/pmlogger_farm.defaults
--- pcp-5.3.7.orig/src/pmlogger/pmlogger_farm.defaults 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/src/pmlogger/pmlogger_farm.defaults 2022-08-31 11:23:08.758672970 +1000
@@ -0,0 +1,27 @@
+# Environment variables for the pmlogger farm daemons. See also
+# pmlogger control file(s) and pmlogconf(1) for additional details.
+# Also see separate pmlogger configuration for the primary logger
+# configuration settings, separate to this file.
+
+# Behaviour regarding listening on external-facing interfaces;
+# unset PMLOGGER_LOCAL to allow connections from remote hosts.
+# A value of 0 permits remote connections, 1 permits local only.
+# PMLOGGER_LOCAL=1
+
+# Max length to which the queue of pending connections may grow
+# A value of 5 is the default.
+# PMLOGGER_MAXPENDING=5
+
+# Default sampling interval pmlogger uses when no more specific
+# interval is requested. A value of 60 seconds is the default.
+# Both pmlogger command line (via control file) and also pmlogger
+# configuration file directives will override this value.
+# PMLOGGER_INTERVAL=60
+
+# The default behaviour, when pmlogger configuration comes from
+# pmlogconf(1), is to regenerate the configuration file and check for
+# changes whenever pmlogger is started from pmlogger_check(1).
+# If the PMDA configuration is stable, this is not necessary, and
+# setting PMLOGGER_CHECK_SKIP_LOGCONF to yes disables the regeneration
+# and checking.
+# PMLOGGER_CHECK_SKIP_LOGCONF=yes

File diff suppressed because it is too large Load Diff

@ -1,534 +0,0 @@
diff -Naurp pcp-5.3.7.orig/qa/1985 pcp-5.3.7/qa/1985
--- pcp-5.3.7.orig/qa/1985 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1985 2022-10-19 21:32:03.971832371 +1100
@@ -0,0 +1,38 @@
+#!/bin/sh
+# PCP QA Test No. 1985
+# Exercise a pmfind fix - valgrind-enabled variant.
+#
+# Copyright (c) 2022 Red Hat. All Rights Reserved.
+#
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+# get standard environment, filters and checks
+. ./common.product
+. ./common.filter
+. ./common.check
+
+_check_valgrind
+
+_cleanup()
+{
+ cd $here
+ $sudo rm -rf $tmp $tmp.*
+}
+
+status=0 # success is the default!
+$sudo rm -rf $tmp $tmp.* $seq.full
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# real QA test starts here
+export seq
+./1986 --valgrind \
+| $PCP_AWK_PROG '
+skip == 1 && $1 == "===" { skip = 0 }
+/^=== std err ===/ { skip = 1 }
+skip == 0 { print }
+skip == 1 { print >>"'$here/$seq.full'" }'
+
+# success, all done
+exit
diff -Naurp pcp-5.3.7.orig/qa/1985.out pcp-5.3.7/qa/1985.out
--- pcp-5.3.7.orig/qa/1985.out 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1985.out 2022-10-19 21:32:03.971832371 +1100
@@ -0,0 +1,11 @@
+QA output created by 1985
+QA output created by 1986 --valgrind
+=== std out ===
+SOURCE HOSTNAME
+=== filtered valgrind report ===
+Memcheck, a memory error detector
+Command: pmfind -S -m probe=127.0.0.1/32
+LEAK SUMMARY:
+definitely lost: 0 bytes in 0 blocks
+indirectly lost: 0 bytes in 0 blocks
+ERROR SUMMARY: 0 errors from 0 contexts ...
diff -Naurp pcp-5.3.7.orig/qa/1986 pcp-5.3.7/qa/1986
--- pcp-5.3.7.orig/qa/1986 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1986 2022-10-19 21:32:03.971832371 +1100
@@ -0,0 +1,62 @@
+#!/bin/sh
+# PCP QA Test No. 1986
+# Exercise libpcp_web timers pmfind regression fix.
+#
+# Copyright (c) 2022 Red Hat. All Rights Reserved.
+#
+
+if [ $# -eq 0 ]
+then
+ seq=`basename $0`
+ echo "QA output created by $seq"
+else
+ # use $seq from caller, unless not set
+ [ -n "$seq" ] || seq=`basename $0`
+ echo "QA output created by `basename $0` $*"
+fi
+
+# get standard environment, filters and checks
+. ./common.product
+. ./common.filter
+. ./common.check
+
+do_valgrind=false
+if [ "$1" = "--valgrind" ]
+then
+ _check_valgrind
+ do_valgrind=true
+fi
+
+test -x $PCP_BIN_DIR/pmfind || _notrun No support for pmfind
+
+_cleanup()
+{
+ cd $here
+ $sudo rm -rf $tmp $tmp.*
+}
+
+status=0 # success is the default!
+hostname=`hostname || echo localhost`
+$sudo rm -rf $tmp $tmp.* $seq.full
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_filter()
+{
+ sed \
+ -e "s@$tmp@TMP@g" \
+ -e "s/ $hostname/ HOSTNAME/" \
+ -e 's/^[a-f0-9][a-f0-9]* /SOURCE /' \
+ # end
+}
+
+# real QA test starts here
+if $do_valgrind
+then
+ _run_valgrind pmfind -S -m probe=127.0.0.1/32
+else
+ pmfind -S -m probe=127.0.0.1/32
+fi \
+| _filter
+
+# success, all done
+exit
diff -Naurp pcp-5.3.7.orig/qa/1986.out pcp-5.3.7/qa/1986.out
--- pcp-5.3.7.orig/qa/1986.out 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1986.out 2022-10-19 21:32:03.971832371 +1100
@@ -0,0 +1,2 @@
+QA output created by 1986
+SOURCE HOSTNAME
diff -Naurp pcp-5.3.7.orig/qa/group pcp-5.3.7/qa/group
--- pcp-5.3.7.orig/qa/group 2022-10-19 20:49:42.638708707 +1100
+++ pcp-5.3.7/qa/group 2022-10-19 21:32:03.972832359 +1100
@@ -1974,4 +1974,6 @@ x11
1957 libpcp local valgrind
1978 atop local
1984 pmlogconf pmda.redis local
+1985 pmfind local valgrind
+1986 pmfind local
4751 libpcp threads valgrind local pcp helgrind
diff -Naurp pcp-5.3.7.orig/src/libpcp_web/src/webgroup.c pcp-5.3.7/src/libpcp_web/src/webgroup.c
--- pcp-5.3.7.orig/src/libpcp_web/src/webgroup.c 2021-11-01 13:02:26.000000000 +1100
+++ pcp-5.3.7/src/libpcp_web/src/webgroup.c 2022-10-19 21:32:03.973832346 +1100
@@ -287,11 +287,24 @@ webgroup_new_context(pmWebGroupSettings
}
static void
+webgroup_timers_stop(struct webgroups *groups)
+{
+ if (groups->active) {
+ uv_timer_stop(&groups->timer);
+ uv_close((uv_handle_t *)&groups->timer, NULL);
+ pmWebTimerRelease(groups->timerid);
+ groups->timerid = -1;
+ groups->active = 0;
+ }
+}
+
+static void
webgroup_garbage_collect(struct webgroups *groups)
{
dictIterator *iterator;
dictEntry *entry;
context_t *cp;
+ unsigned int count = 0, drops = 0;
if (pmDebugOptions.http || pmDebugOptions.libweb)
fprintf(stderr, "%s: started\n", "webgroup_garbage_collect");
@@ -308,33 +321,40 @@ webgroup_garbage_collect(struct webgroup
uv_mutex_unlock(&groups->mutex);
webgroup_drop_context(cp, groups);
uv_mutex_lock(&groups->mutex);
+ drops++;
}
+ count++;
}
dictReleaseIterator(iterator);
+
+ /* if dropping the last remaining context, do cleanup */
+ if (groups->active && drops == count) {
+ if (pmDebugOptions.http || pmDebugOptions.libweb)
+ fprintf(stderr, "%s: freezing\n", "webgroup_garbage_collect");
+ webgroup_timers_stop(groups);
+ }
uv_mutex_unlock(&groups->mutex);
}
if (pmDebugOptions.http || pmDebugOptions.libweb)
- fprintf(stderr, "%s: finished\n", "webgroup_garbage_collect");
+ fprintf(stderr, "%s: finished [%u drops from %u entries]\n",
+ "webgroup_garbage_collect", drops, count);
}
static void
refresh_maps_metrics(void *data)
{
struct webgroups *groups = (struct webgroups *)data;
+ unsigned int value;
- if (groups->metrics) {
- unsigned int value;
-
- value = dictSize(contextmap);
- mmv_set(groups->map, groups->metrics[CONTEXT_MAP_SIZE], &value);
- value = dictSize(namesmap);
- mmv_set(groups->map, groups->metrics[NAMES_MAP_SIZE], &value);
- value = dictSize(labelsmap);
- mmv_set(groups->map, groups->metrics[LABELS_MAP_SIZE], &value);
- value = dictSize(instmap);
- mmv_set(groups->map, groups->metrics[INST_MAP_SIZE], &value);
- }
+ value = contextmap? dictSize(contextmap) : 0;
+ mmv_set(groups->map, groups->metrics[CONTEXT_MAP_SIZE], &value);
+ value = namesmap? dictSize(namesmap) : 0;
+ mmv_set(groups->map, groups->metrics[NAMES_MAP_SIZE], &value);
+ value = labelsmap? dictSize(labelsmap) : 0;
+ mmv_set(groups->map, groups->metrics[LABELS_MAP_SIZE], &value);
+ value = instmap? dictSize(instmap) : 0;
+ mmv_set(groups->map, groups->metrics[INST_MAP_SIZE], &value);
}
static void
@@ -487,6 +507,7 @@ pmWebGroupDestroy(pmWebGroupSettings *se
if (pmDebugOptions.libweb)
fprintf(stderr, "%s: destroy context %p gp=%p\n", "pmWebGroupDestroy", cp, gp);
+ webgroup_deref_context(cp);
webgroup_drop_context(cp, gp);
}
sdsfree(msg);
@@ -2394,17 +2415,12 @@ pmWebGroupClose(pmWebGroupModule *module
if (groups) {
/* walk the contexts, stop timers and free resources */
- if (groups->active) {
- groups->active = 0;
- uv_timer_stop(&groups->timer);
- pmWebTimerRelease(groups->timerid);
- groups->timerid = -1;
- }
iterator = dictGetIterator(groups->contexts);
while ((entry = dictNext(iterator)) != NULL)
webgroup_drop_context((context_t *)dictGetVal(entry), NULL);
dictReleaseIterator(iterator);
dictRelease(groups->contexts);
+ webgroup_timers_stop(groups);
memset(groups, 0, sizeof(struct webgroups));
free(groups);
}
diff -Naurp pcp-5.3.7.orig/src/pmfind/source.c pcp-5.3.7/src/pmfind/source.c
--- pcp-5.3.7.orig/src/pmfind/source.c 2021-02-17 15:27:41.000000000 +1100
+++ pcp-5.3.7/src/pmfind/source.c 2022-10-19 21:32:03.973832346 +1100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Red Hat.
+ * Copyright (c) 2020,2022 Red Hat.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -25,6 +25,7 @@ static pmWebGroupSettings settings;
typedef struct {
sds source;
sds hostspec;
+ unsigned int refcount;
} context_t;
typedef struct {
@@ -38,22 +39,34 @@ typedef struct {
} sources_t;
static void
+source_release(sources_t *sp, context_t *cp, sds ctx)
+{
+ pmWebGroupDestroy(&settings, ctx, sp);
+ sdsfree(cp->hostspec);
+ sdsfree(cp->source);
+ free(cp);
+}
+
+static void
sources_release(void *arg, const struct dictEntry *entry)
{
sources_t *sp = (sources_t *)arg;
context_t *cp = (context_t *)dictGetVal(entry);
sds ctx = (sds)entry->key;
- pmWebGroupDestroy(&settings, ctx, sp);
- sdsfree(cp->hostspec);
- sdsfree(cp->source);
+ if (pmDebugOptions.discovery)
+ fprintf(stderr, "releasing context %s\n", ctx);
+
+ source_release(sp, cp, ctx);
}
static void
-sources_containers(sources_t *sp, sds id, dictEntry *uniq)
+sources_containers(sources_t *sp, context_t *cp, sds id, dictEntry *uniq)
{
uv_mutex_lock(&sp->mutex);
- sp->count++; /* issuing another PMWEBAPI request */
+ /* issuing another PMWEBAPI request */
+ sp->count++;
+ cp->refcount++;
uv_mutex_unlock(&sp->mutex);
pmWebGroupScrape(&settings, id, sp->params, sp);
@@ -75,6 +88,7 @@ on_source_context(sds id, pmWebSource *s
cp->source = sdsdup(src->source);
cp->hostspec = sdsdup(src->hostspec);
+ cp->refcount = 1;
uv_mutex_lock(&sp->mutex);
dictAdd(sp->contexts, id, cp);
@@ -84,7 +98,7 @@ on_source_context(sds id, pmWebSource *s
if (entry) { /* source just discovered */
printf("%s %s\n", src->source, src->hostspec);
if (containers)
- sources_containers(sp, id, entry);
+ sources_containers(sp, cp, id, entry);
}
}
@@ -116,7 +130,9 @@ static void
on_source_done(sds context, int status, sds message, void *arg)
{
sources_t *sp = (sources_t *)arg;
- int count = 0, release = 0;
+ context_t *cp;
+ dictEntry *he;
+ int remove = 0, count = 0, release = 0;
if (pmDebugOptions.discovery)
fprintf(stderr, "done on context %s (sts=%d)\n", context, status);
@@ -127,19 +143,26 @@ on_source_done(sds context, int status,
uv_mutex_lock(&sp->mutex);
if ((count = --sp->count) <= 0)
release = 1;
+ if ((he = dictFind(sp->contexts, context)) != NULL &&
+ (cp = (context_t *)dictGetVal(he)) != NULL &&
+ (--cp->refcount <= 0))
+ remove = 1;
uv_mutex_unlock(&sp->mutex);
+ if (remove) {
+ if (pmDebugOptions.discovery)
+ fprintf(stderr, "remove context %s\n", context);
+ source_release(sp, cp, context);
+ dictDelete(sp->contexts, context);
+ }
+
if (release) {
unsigned long cursor = 0;
-
- if (pmDebugOptions.discovery)
- fprintf(stderr, "release context %s (sts=%d)\n", context, status);
do {
cursor = dictScan(sp->contexts, cursor, sources_release, NULL, sp);
} while (cursor);
- } else {
- if (pmDebugOptions.discovery)
- fprintf(stderr, "not yet releasing (count=%d)\n", count);
+ } else if (pmDebugOptions.discovery) {
+ fprintf(stderr, "not yet releasing (count=%d)\n", count);
}
}
@@ -190,6 +213,7 @@ sources_discovery_start(uv_timer_t *arg)
}
dictRelease(dp);
+ pmWebTimerClose();
}
/*
@@ -214,8 +238,8 @@ source_discovery(int count, char **urls)
uv_mutex_init(&find.mutex);
find.urls = urls;
find.count = count; /* at least one PMWEBAPI request for each url */
- find.uniq = dictCreate(&sdsDictCallBacks, NULL);
- find.params = dictCreate(&sdsDictCallBacks, NULL);
+ find.uniq = dictCreate(&sdsKeyDictCallBacks, NULL);
+ find.params = dictCreate(&sdsOwnDictCallBacks, NULL);
dictAdd(find.params, sdsnew("name"), sdsnew("containers.state.running"));
find.contexts = dictCreate(&sdsKeyDictCallBacks, NULL);
@@ -230,6 +254,7 @@ source_discovery(int count, char **urls)
pmWebGroupSetup(&settings.module);
pmWebGroupSetEventLoop(&settings.module, loop);
+ pmWebTimerSetEventLoop(loop);
/*
* Start a one-shot timer to add a start function into the loop
@@ -244,7 +269,9 @@ source_discovery(int count, char **urls)
/*
* Finished, release all resources acquired so far
*/
+ pmWebGroupClose(&settings.module);
uv_mutex_destroy(&find.mutex);
+ dictRelease(find.uniq);
dictRelease(find.params);
dictRelease(find.contexts);
return find.status;
diff -Naurp pcp-5.3.7.orig/src/pmproxy/src/server.c pcp-5.3.7/src/pmproxy/src/server.c
--- pcp-5.3.7.orig/src/pmproxy/src/server.c 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/pmproxy/src/server.c 2022-10-19 21:31:43.831093354 +1100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019,2021 Red Hat.
+ * Copyright (c) 2018-2019,2021-2022 Red Hat.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
@@ -310,17 +310,21 @@ on_write_callback(uv_callback_t *handle,
struct client *client = (struct client *)request->writer.data;
int sts;
+ (void)handle;
if (pmDebugOptions.af)
fprintf(stderr, "%s: client=%p\n", "on_write_callback", client);
if (client->stream.secure == 0) {
sts = uv_write(&request->writer, (uv_stream_t *)&client->stream,
&request->buffer[0], request->nbuffers, request->callback);
- if (sts != 0)
- fprintf(stderr, "%s: ERROR uv_write failed\n", "on_write_callback");
+ if (sts != 0) {
+ pmNotifyErr(LOG_ERR, "%s: %s - uv_write failed [%s]: %s\n",
+ pmGetProgname(), "on_write_callback",
+ uv_err_name(sts), uv_strerror(sts));
+ client_close(client);
+ }
} else
secure_client_write(client, request);
- (void)handle;
return 0;
}
@@ -455,14 +459,16 @@ on_client_connection(uv_stream_t *stream
uv_handle_t *handle;
if (status != 0) {
- fprintf(stderr, "%s: client connection failed: %s\n",
- pmGetProgname(), uv_strerror(status));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "connection",
+ uv_err_name(status), uv_strerror(status));
return;
}
if ((client = calloc(1, sizeof(*client))) == NULL) {
- fprintf(stderr, "%s: out-of-memory for new client\n",
- pmGetProgname());
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "calloc",
+ "ENOMEM", strerror(ENOMEM));
return;
}
if (pmDebugOptions.context | pmDebugOptions.af)
@@ -476,16 +482,18 @@ on_client_connection(uv_stream_t *stream
status = uv_tcp_init(proxy->events, &client->stream.u.tcp);
if (status != 0) {
- fprintf(stderr, "%s: client tcp init failed: %s\n",
- pmGetProgname(), uv_strerror(status));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "uv_tcp_init",
+ uv_err_name(status), uv_strerror(status));
client_put(client);
return;
}
status = uv_accept(stream, (uv_stream_t *)&client->stream.u.tcp);
if (status != 0) {
- fprintf(stderr, "%s: client tcp init failed: %s\n",
- pmGetProgname(), uv_strerror(status));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "uv_accept",
+ uv_err_name(status), uv_strerror(status));
client_put(client);
return;
}
@@ -496,8 +504,9 @@ on_client_connection(uv_stream_t *stream
status = uv_read_start((uv_stream_t *)&client->stream.u.tcp,
on_buffer_alloc, on_client_read);
if (status != 0) {
- fprintf(stderr, "%s: client read start failed: %s\n",
- pmGetProgname(), uv_strerror(status));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "on_client_connection", "uv_read_start",
+ uv_err_name(status), uv_strerror(status));
client_close(client);
}
}
@@ -530,8 +539,9 @@ open_request_port(struct proxy *proxy, s
sts = uv_listen((uv_stream_t *)&stream->u.tcp, maxpending, on_client_connection);
if (sts != 0) {
- fprintf(stderr, "%s: socket listen error %s\n",
- pmGetProgname(), uv_strerror(sts));
+ pmNotifyErr(LOG_ERR, "%s: %s - uv_listen failed [%s]: %s\n",
+ pmGetProgname(), "open_request_port",
+ uv_err_name(sts), uv_strerror(sts));
uv_close(handle, NULL);
return -ENOTCONN;
}
@@ -554,15 +564,23 @@ open_request_local(struct proxy *proxy,
uv_pipe_init(proxy->events, &stream->u.local, 0);
handle = (uv_handle_t *)&stream->u.local;
handle->data = (void *)proxy;
- uv_pipe_bind(&stream->u.local, name);
+ sts = uv_pipe_bind(&stream->u.local, name);
+ if (sts != 0) {
+ pmNotifyErr(LOG_ERR, "%s: %s - uv_pipe_bind %s failed [%s]: %s\n",
+ pmGetProgname(), "open_request_local", name,
+ uv_err_name(sts), uv_strerror(sts));
+ uv_close(handle, NULL);
+ return -ENOTCONN;
+ }
#ifdef HAVE_UV_PIPE_CHMOD
uv_pipe_chmod(&stream->u.local, UV_READABLE | UV_WRITABLE);
#endif
sts = uv_listen((uv_stream_t *)&stream->u.local, maxpending, on_client_connection);
if (sts != 0) {
- fprintf(stderr, "%s: local listen error %s\n",
- pmGetProgname(), uv_strerror(sts));
+ pmNotifyErr(LOG_ERR, "%s: %s - %s failed [%s]: %s\n",
+ pmGetProgname(), "open_request_local", "uv_listen",
+ uv_err_name(sts), uv_strerror(sts));
uv_close(handle, NULL);
return -ENOTCONN;
}

@ -1,32 +0,0 @@
commit 4014e295f7b5f541439774bd3c88924d3c061325
Author: Masatake YAMATO <yamato@redhat.com>
Date: Thu Oct 20 13:55:43 2022 +0900
pmdas/snmp: install the agent specific configuration file to PMDATMPDIR
When running ./Install of the agent, the following line is printed in
/var/log/pcp/pmcd/snmp.log.
Log for pmdasnmp on pcp-netsnmp started Tue Oct 18 22:45:23 2022
opening /var/lib/pcp/pmdas/snmp/snmp.conf No such file or directory at /var/lib/pcp/pmdas/snmp/pmdasnmp.pl line 90.
As a result, pmdasnmp.pl cannot read its configuration file though it
is "./Install"ed.
Signed-off-by: Masatake YAMATO <yamato@redhat.com>
diff --git a/src/pmdas/snmp/GNUmakefile b/src/pmdas/snmp/GNUmakefile
index ce0e3e8036..7bf5471a76 100644
--- a/src/pmdas/snmp/GNUmakefile
+++ b/src/pmdas/snmp/GNUmakefile
@@ -44,7 +44,7 @@ install_pcp install: default
$(INSTALL) -m 755 -t $(PMDATMPDIR) Install Remove $(PMDAADMDIR)
$(INSTALL) -m 644 -t $(PMDATMPDIR)/pmda$(IAM).pl pmda$(IAM).pl $(PMDAADMDIR)/pmda$(IAM).pl
$(INSTALL) -m 755 -d $(PMDACONFIG)
- $(INSTALL) -m 644 snmp.conf $(PMDACONFIG)/snmp.conf
+ $(INSTALL) -m 644 -t $(PMDATMPDIR)/$(IAM).conf $(IAM).conf $(PMDACONFIG)/$(IAM).conf
@$(INSTALL_MAN)
else
build-me:

@ -1,317 +0,0 @@
commit 1e216d84c6b95b4f9cb7ee6b5adf9591f8af37d5
Author: Nathan Scott <nathans@redhat.com>
Date: Wed Dec 7 11:40:43 2022 +1100
pmdaopenmetrics: validate given names before using them for metrics
Its possible pmdaopenmetrics is fed garbage accidentally, e.g. in the
case where a /metrics end point is not made visible and an HTTP error
response is returned (misconfiguration).
Failure to safeguard this results in the generation of diagnostics in
the openmetrics PMDA log file at an alarming rate until all available
filesystem space is completely consumed. It can also result in bogus
metric names being exposed to PMAPI clients.
This commit both adds new safeguards and scales back diagnostic noise
from the agent. In some places it switches to more appropriate APIs
for the level of logging being performed (esp. debug messages).
Resolves Red Hat BZ #2139325
diff -Naurp pcp-5.3.7.orig/qa/1191.out pcp-5.3.7/qa/1191.out
--- pcp-5.3.7.orig/qa/1191.out 2021-11-01 13:02:26.000000000 +1100
+++ pcp-5.3.7/qa/1191.out 2023-03-09 11:49:42.372413973 +1100
@@ -10884,13 +10884,6 @@ GC Wall Time for b5be5b9f_b0f1_47de_b436
inst [0 or "0 collector_name:Copy"] value 24462465
inst [1 or "1 collector_name:MSC"] value 49519
-openmetrics.vmware_exporter.[root@ci-vm-10-0-138-196 [vmware_host_net_usage_average]
- Data Type: double InDom: PM_INDOM_NULL 0xffffffff
- Semantics: instant Units: none
-Help:
-vmware_host_net_usage_average
-Error: Try again. Information not currently available
-
openmetrics.vmware_exporter.vmware_datastore_accessible [VMWare datastore accessible (true / false)]
Data Type: double InDom: 144.35859 0x24008c13
Semantics: instant Units: none
diff -Naurp pcp-5.3.7.orig/qa/1221.out pcp-5.3.7/qa/1221.out
--- pcp-5.3.7.orig/qa/1221.out 2021-11-01 13:02:26.000000000 +1100
+++ pcp-5.3.7/qa/1221.out 2023-03-09 11:49:42.382414004 +1100
@@ -7105,9 +7105,6 @@ openmetrics.thermostat.tms_jvm_gc_b5be5b
inst [0 or "0 collector_name:Copy"] labels {"agent":"openmetrics","collector_name":"Copy","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"instname":"0 collector_name:Copy","machineid":MACHINEID,"source":"thermostat","url":FILEURL,"userid":NUM}
inst [1 or "1 collector_name:MSC"] labels {"agent":"openmetrics","collector_name":"MSC","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"instname":"1 collector_name:MSC","machineid":MACHINEID,"source":"thermostat","url":FILEURL,"userid":NUM}
-openmetrics.vmware_exporter.[root@ci-vm-10-0-138-196
- labels {"agent":"openmetrics","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
-
openmetrics.vmware_exporter.vmware_datastore_accessible
labels {"agent":"openmetrics","dc_name":"ha-datacenter","domainname":DOMAINNAME,"ds_cluster":"","ds_name":"name008","groupid":NUM,"hostname":HOSTNAME,"machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
inst [0 or "0 dc_name:ha-datacenter ds_cluster: ds_name:name026"] labels {"agent":"openmetrics","dc_name":"ha-datacenter","domainname":DOMAINNAME,"ds_cluster":"","ds_name":"name026","groupid":NUM,"hostname":HOSTNAME,"instname":"0 dc_name:ha-datacenter ds_cluster: ds_name:name026","machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
@@ -15331,9 +15328,6 @@ openmetrics.thermostat.tms_jvm_gc_b5be5b
inst [0 or "0 collector_name:Copy"] labels {"agent":"openmetrics","collector_name":"Copy","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"instname":"0 collector_name:Copy","machineid":MACHINEID,"source":"thermostat","url":FILEURL,"userid":NUM}
inst [1 or "1 collector_name:MSC"] labels {"agent":"openmetrics","collector_name":"MSC","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"instname":"1 collector_name:MSC","machineid":MACHINEID,"source":"thermostat","url":FILEURL,"userid":NUM}
-openmetrics.vmware_exporter.[root@ci-vm-10-0-138-196
- labels {"agent":"openmetrics","domainname":DOMAINNAME,"groupid":NUM,"hostname":HOSTNAME,"machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
-
openmetrics.vmware_exporter.vmware_datastore_accessible
labels {"agent":"openmetrics","dc_name":"ha-datacenter","domainname":DOMAINNAME,"ds_cluster":"","ds_name":"name008","groupid":NUM,"hostname":HOSTNAME,"machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
inst [0 or "0 dc_name:ha-datacenter ds_cluster: ds_name:name026"] labels {"agent":"openmetrics","dc_name":"ha-datacenter","domainname":DOMAINNAME,"ds_cluster":"","ds_name":"name026","groupid":NUM,"hostname":HOSTNAME,"instname":"0 dc_name:ha-datacenter ds_cluster: ds_name:name026","machineid":MACHINEID,"source":"vmware_exporter","url":FILEURL,"userid":NUM}
diff -Naurp pcp-5.3.7.orig/qa/1342 pcp-5.3.7/qa/1342
--- pcp-5.3.7.orig/qa/1342 2021-02-17 15:27:41.000000000 +1100
+++ pcp-5.3.7/qa/1342 2023-03-09 11:49:42.382414004 +1100
@@ -99,6 +99,8 @@ echo == Note: check $seq.full for log en
echo == pmdaopenmetrics LOG == >>$here/$seq.full
cat $PCP_LOG_DIR/pmcd/openmetrics.log >>$here/$seq.full
+# cleanup preparing for remove (else error messages here)
+$sudo rm $PCP_PMDAS_DIR/openmetrics/config.d/simple_metric.*
_pmdaopenmetrics_remove
# success, all done
diff -Naurp pcp-5.3.7.orig/qa/1727 pcp-5.3.7/qa/1727
--- pcp-5.3.7.orig/qa/1727 2021-09-23 14:19:12.000000000 +1000
+++ pcp-5.3.7/qa/1727 2023-03-09 11:49:42.382414004 +1100
@@ -2,6 +2,7 @@
# PCP QA Test No. 1727
# Test duplicate instname labels in /metrics webapi when a context
# level label such as "hostname" is explicitly specified.
+# Test invalid OpenMetrics metric names also.
#
# Copyright (c) 2021 Red Hat. All Rights Reserved.
#
@@ -54,8 +55,17 @@ echo '# TYPE somemetric gauge'
echo 'somemetric{hostname="$MYHOST"} 1234'
EOF
-chmod 755 $tmp.script
+cat <<EOF >$tmp.badxml
+#! /bin/bash
+
+cat $here/sadist/891688-dash-time.xml
+echo '# TYPE somemetric gauge'
+echo 'somemetric{hostname="$MYHOST"} 1234'
+EOF
+
+chmod 755 $tmp.script $tmp.badxml
$sudo mv $tmp.script $PCP_PMDAS_DIR/openmetrics/config.d/duplicate_instname_label.sh
+$sudo mv $tmp.badxml $PCP_PMDAS_DIR/openmetrics/config.d/invalid_metrics_badinput.sh
_pmdaopenmetrics_install
if ! _pmdaopenmetrics_wait_for_metric openmetrics.control.calls
@@ -68,6 +78,14 @@ echo; echo === /metrics webapi listing.
curl -Gs 'http://localhost:44322/metrics?names=openmetrics.duplicate_instname_label.somemetric' \
| _filter_openmetrics_labels
+echo; echo === verify metric name validity using pminfo
+pminfo -v openmetrics
+
+# squash errors for a clean uninstall
+$sudo rm $PCP_PMDAS_DIR/openmetrics/config.d/invalid_metrics_badinput.sh
+# capture openmetrics log for posterity
+cat $PCP_LOG_DIR/pmcd/openmetrics.log >> $here/$seq.full
+
_pmdaopenmetrics_remove
# success, all done
diff -Naurp pcp-5.3.7.orig/qa/1727.out pcp-5.3.7/qa/1727.out
--- pcp-5.3.7.orig/qa/1727.out 2021-09-13 14:40:08.000000000 +1000
+++ pcp-5.3.7/qa/1727.out 2023-03-09 11:49:42.382414004 +1100
@@ -8,6 +8,8 @@ QA output created by 1727
# TYPE openmetrics_duplicate_instname_label_somemetric gauge
openmetrics_duplicate_instname_label_somemetric{hostname=HOSTNAME,instid="0",instname="0 hostname:HOSTNAME",domainname=DOMAINNAME,machineid=MACHINEID,source="duplicate_instname_label"} 1234
+=== verify metric name validity using pminfo
+
=== remove openmetrics agent ===
Culling the Performance Metrics Name Space ...
openmetrics ... done
diff -Naurp pcp-5.3.7.orig/qa/openmetrics/samples/vmware_exporter.txt pcp-5.3.7/qa/openmetrics/samples/vmware_exporter.txt
--- pcp-5.3.7.orig/qa/openmetrics/samples/vmware_exporter.txt 2021-11-01 13:02:26.000000000 +1100
+++ pcp-5.3.7/qa/openmetrics/samples/vmware_exporter.txt 2023-03-09 11:49:42.382414004 +1100
@@ -1035,25 +1035,6 @@ vmware_host_net_errorsTx_summation{clust
# HELP vmware_host_net_usage_average vmware_host_net_usage_average
# TYPE vmware_host_net_usage_average gauge
vmware_host_net_usage_average{cluster_name="",dc_name="ha-datacenter",host_name="SOMEHOSTNAME"} 3.0
-[root@ci-vm-10-0-138-196 ~]# curl -Gs http://localhost:9272/metrics > curl.txt
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]#
-[root@ci-vm-10-0-138-196 ~]# cat curl.txt
# HELP vmware_vm_power_state VMWare VM Power state (On / Off)
# TYPE vmware_vm_power_state gauge
vmware_vm_power_state{cluster_name="",dc_name="ha-datacenter",ds_name="name026",host_name="SOMEHOSTNAME",vm_name="name023-sat61-client"} 0.0
diff -Naurp pcp-5.3.7.orig/src/pmdas/openmetrics/pmdaopenmetrics.python pcp-5.3.7/src/pmdas/openmetrics/pmdaopenmetrics.python
--- pcp-5.3.7.orig/src/pmdas/openmetrics/pmdaopenmetrics.python 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/pmdas/openmetrics/pmdaopenmetrics.python 2023-03-09 11:49:42.382414004 +1100
@@ -122,7 +122,7 @@ class Metric(object):
# c_units = c_api.pmUnits_int(self.munits.dimSpace, self.munits.dimTime, self.munits.dimCount,
# self.munits.scaleSpace, self.munits.scaleTime, self.munits.scaleCount)
- self.source.pmda.log("Metric: adding new metric %s pmid=%s type=%d sem=%d singular=%s mindom=0x%x labels=%s" %
+ self.source.pmda.debug("Metric: adding metric %s pmid=%s type=%d sem=%d singular=%s mindom=0x%x labels=%s" %
(name, pmContext.pmIDStr(self.pmid), self.mtype, self.msem, self.singular, self.mindom, self.labels))
self.obj = pmdaMetric(self.pmid, self.mtype, self.mindom, self.msem, self.munits)
@@ -565,6 +565,7 @@ class Source(object):
self.cluster = cluster # unique/persistent id# for nickname
self.path = path # pathname to .url or executable file
self.url = None
+ self.parse_error = False
self.parse_url_time = 0 # timestamp of config file when it was last parsed
self.is_scripted = is_scripted
self.pmda = thispmda # the shared pmda
@@ -659,6 +660,19 @@ class Source(object):
self.pmda.log("instname_labels returning %s" % naming_labels) if self.pmda.dbg else None
return naming_labels
+ def valid_metric_name(self, name):
+ '''
+ Check validity of given metric name component:
+ - only contains alphanumerics and/or underscores
+ (colon removed later, allowed through here);
+ - only starts with an alphabetic character.
+ '''
+ if not name[0].isalpha():
+ return False
+ if not re.sub('[_.:]', '', name).isalnum():
+ return False
+ return True
+
def parse_metric_line(self, line, pcpline, helpline, typeline):
'''
Parse the sample line, identify/create corresponding metric & instance.
@@ -687,8 +701,10 @@ class Source(object):
# Nb: filter pattern is applied only to the leaf component of the full metric name
if self.check_filter(sp.name, "METRIC") != "INCLUDE":
self.pmda.log("Metric %s excluded by config filters" % fullname)
- return
+ return True
else:
+ if not self.valid_metric_name(sp.name):
+ raise ValueError('invalid metric name: ' + sp.name)
# new metric
metricnum = self.pmids_table.intern_lookup_value(sp.name)
# check if the config specifies metadata for this metric
@@ -708,24 +724,32 @@ class Source(object):
else:
m.store_inst(naming_labels, sp.value)
self.pmda.set_notify_change()
+ except ValueError as e:
+ if not self.parse_error:
+ self.pmda.err("cannot parse name in %s: %s" % (line, e))
+ self.parse_error = True # one-time-only error diagnostic
+ return False
except Exception as e:
+ if self.pmda.dbg:
+ traceback.print_exc() # traceback can be handy here
self.pmda.err("cannot parse/store %s: %s" % (line, e))
- traceback.print_exc() # traceback can be handy here
-
+ return False
+ return True
def parse_lines(self, text):
- '''Refresh all the metric metadata as it is found, including creating
- new metrics. Store away metric values for subsequent
- fetch()es. Parse errors may result in exceptions.
- That's OK, we don't try heroics to parse non-compliant
- data. Return number of metrics extracted.
'''
-
+ Refresh all the metric metadata as it is found, including creating
+ new metrics. Store away metric values for subsequent fetch()es.
+ Input parse errors result in exceptions and early termination.
+ That's OK, we don't try heroics to parse non-compliant data.
+ Return number of metrics extracted.
+ '''
num_metrics = 0
lines = text.splitlines()
pcpline = None
helpline = None
typeline = None
+ badness = False
state = "metadata"
for line in lines:
self.pmda.debug("line: %s state: %s" % (line, state)) if self.pmda.dbg else None
@@ -762,9 +786,15 @@ class Source(object):
# NB: could verify helpline/typeline lp[2] matches,
# but we don't have to go out of our way to support
# non-compliant exporters.
- self.parse_metric_line(l, pcpline, helpline, typeline)
+ if not self.parse_metric_line(l, pcpline, helpline, typeline):
+ badness = True
+ break # bad metric line, skip the remainder of this file
num_metrics += 1
+ # clear one-time-only error diagnostic if the situation is now resolved
+ if not badness:
+ self.parse_error = False
+
# NB: this logic only ever -adds- Metrics to a Source. If a source
# stops supplying some metrics, then a PCP app will see a PM_ERR_INST
# coming back when it tries to fetch them. We could perhaps keep the
@@ -842,7 +872,7 @@ class Source(object):
metadata = line.split(' ')[1:]
self.metadatalist.append(metadata)
else:
- self.pmda.log('Warning: %s ignored unrecognised config entry "%s"' % (self.url, line))
+ self.pmda.err('%s ignored unrecognised config entry "%s"' % (self.url, line))
self.pmda.debug("DEBUG url: %s HEADERS: %s" % (self.url, self.headers)) if self.pmda.dbg else None
self.pmda.debug("DEBUG url: %s FILTERS: %s" % (self.url, self.filterlist)) if self.pmda.dbg else None
@@ -911,8 +941,7 @@ class Source(object):
except Exception as e:
self.pmda.stats_status[self.cluster] = 'failed to fetch URL or execute script %s: %s' % (self.path, e)
self.pmda.stats_status_code[self.cluster] = status_code
- self.pmda.debug('Warning: cannot fetch URL or execute script %s: %s' % (self.path, e)) if self.pmda.dbg else None
- return
+ self.pmda.debug('cannot fetch URL or execute script %s: %s' % (self.path, e))
def refresh2(self, timeout):
'''
@@ -950,7 +979,7 @@ class Source(object):
self.pmda.log("fetch: item=%d inst=%d m.mname=%s" % (item, inst, m.mname)) if self.pmda.dbg else None
return m.fetch_inst(inst)
except Exception as e:
- self.pmda.log("Warning: cannot fetch item %d inst %d: %s" % (item, inst, e))
+ self.pmda.debug("cannot fetch item %d inst %d: %s" % (item, inst, e))
return [c_api.PM_ERR_AGAIN, 0]
class OpenMetricsPMDA(PMDA):
@@ -1468,7 +1497,7 @@ class OpenMetricsPMDA(PMDA):
def debug(self, s):
if self.dbg:
- super(OpenMetricsPMDA, self).log("debug: " + s)
+ super(OpenMetricsPMDA, self).dbg(s)
def log(self, message):
PMDA.log(message)
@@ -1526,8 +1555,9 @@ if __name__ == '__main__':
if args.nosort:
sort_conf_list = False
- pmdadir = os.path.join(os.getenv('PCP_PMDAS_DIR'), args.root)
if not args.config.startswith("/"):
+ pmdadir = os.getenv('PCP_PMDAS_DIR') or '/'
+ pmdadir = os.path.join(pmdadir, args.root)
args.config = os.path.join(pmdadir, args.config)
# This PMDA starts up in the "notready" state, see the Install script where

@ -1,706 +0,0 @@
commit f4e34cd32bc73b53c2fc94c8deb670044d9d18d6
Author: Nathan Scott <nathans@redhat.com>
Date: Wed Dec 7 12:17:19 2022 +1100
pmdanfsclient: fix srcport handling for rdma and udp mounts
This bug fix kindly brought to you by the good folk at Penn State Uni.
Regression test update by myself but the real fix came from others.
Resolves Red Hat BZ #2150889
diff --git a/qa/798.out.32 b/qa/798.out.32
index 8db1b9896..854d667a2 100644
--- a/qa/798.out.32
+++ b/qa/798.out.32
@@ -165,6 +165,110 @@ pmInDom: 62.0
dbpmda>
Log for pmdanfsclient on HOST ...
Log finished ...
+=== Test case: mountstats-el8.7-rdma.qa
+dbpmda> open pipe $PYTHON pmdanfsclient.python
+Start $PYTHON PMDA: $PYTHON pmdanfsclient.python
+dbpmda> # on some platforms this may take a while ...
+dbpmda> wait 2
+dbpmda> getdesc on
+dbpmda> desc nfsclient.export
+PMID: 62.0.1
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.mountpoint
+PMID: 62.0.2
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.string
+PMID: 62.1.1
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.proto
+PMID: 62.1.24
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.vers
+PMID: 62.1.6
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> fetch nfsclient.export
+PMID(s): 62.0.1
+__pmResult ... numpmid: 1
+ 62.0.1 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "example.com:/group"
+ inst [N or ???] value "example.com:/home"
+ inst [N or ???] value "example.com:/icds"
+ inst [N or ???] value "example.com:/icds-pcp-data"
+ inst [N or ???] value "example.com:/icds-prod"
+ inst [N or ???] value "example.com:/icds-test"
+ inst [N or ???] value "example.com:/work"
+dbpmda> fetch nfsclient.mountpoint
+PMID(s): 62.0.2
+__pmResult ... numpmid: 1
+ 62.0.2 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "/storage/group"
+ inst [N or ???] value "/storage/home"
+ inst [N or ???] value "/storage/icds"
+ inst [N or ???] value "/storage/pcp-data"
+ inst [N or ???] value "/storage/prod"
+ inst [N or ???] value "/storage/test"
+ inst [N or ???] value "/storage/work"
+dbpmda> fetch nfsclient.options.string
+PMID(s): 62.1.1
+__pmResult ... numpmid: 1
+ 62.1.1 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.1,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.18,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.23,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.27,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.29,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.6,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.7,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+dbpmda> fetch nfsclient.options.proto
+PMID(s): 62.1.24
+__pmResult ... numpmid: 1
+ 62.1.24 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+dbpmda> fetch nfsclient.options.vers
+PMID(s): 62.1.6
+__pmResult ... numpmid: 1
+ 62.1.6 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+dbpmda> fetch nfsclient.options.vers
+PMID(s): 62.1.6
+__pmResult ... numpmid: 1
+ 62.1.6 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+dbpmda> instance 62.0
+pmInDom: 62.0
+[ X] inst: X name: "/storage/group"
+[X+1] inst: X+1 name: "/storage/home"
+[X+2] inst: X+2 name: "/storage/icds"
+[X+3] inst: X+3 name: "/storage/pcp-data"
+[X+4] inst: X+4 name: "/storage/prod"
+[X+5] inst: X+5 name: "/storage/test"
+[X+6] inst: X+6 name: "/storage/work"
+dbpmda>
+Log for pmdanfsclient on HOST ...
+Log finished ...
=== Test case: mountstats.qa
dbpmda> open pipe $PYTHON pmdanfsclient.python
Start $PYTHON PMDA: $PYTHON pmdanfsclient.python
diff --git a/qa/798.out.64 b/qa/798.out.64
index 6b6a4b73c..860fa582e 100644
--- a/qa/798.out.64
+++ b/qa/798.out.64
@@ -165,6 +165,110 @@ pmInDom: 62.0
dbpmda>
Log for pmdanfsclient on HOST ...
Log finished ...
+=== Test case: mountstats-el8.7-rdma.qa
+dbpmda> open pipe $PYTHON pmdanfsclient.python
+Start $PYTHON PMDA: $PYTHON pmdanfsclient.python
+dbpmda> # on some platforms this may take a while ...
+dbpmda> wait 2
+dbpmda> getdesc on
+dbpmda> desc nfsclient.export
+PMID: 62.0.1
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.mountpoint
+PMID: 62.0.2
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.string
+PMID: 62.1.1
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.proto
+PMID: 62.1.24
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> desc nfsclient.options.vers
+PMID: 62.1.6
+ Data Type: string InDom: 62.0 0xf800000
+ Semantics: instant Units: none
+dbpmda> fetch nfsclient.export
+PMID(s): 62.0.1
+__pmResult ... numpmid: 1
+ 62.0.1 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "example.com:/group"
+ inst [N or ???] value "example.com:/home"
+ inst [N or ???] value "example.com:/icds"
+ inst [N or ???] value "example.com:/icds-pcp-data"
+ inst [N or ???] value "example.com:/icds-prod"
+ inst [N or ???] value "example.com:/icds-test"
+ inst [N or ???] value "example.com:/work"
+dbpmda> fetch nfsclient.mountpoint
+PMID(s): 62.0.2
+__pmResult ... numpmid: 1
+ 62.0.2 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "/storage/group"
+ inst [N or ???] value "/storage/home"
+ inst [N or ???] value "/storage/icds"
+ inst [N or ???] value "/storage/pcp-data"
+ inst [N or ???] value "/storage/prod"
+ inst [N or ???] value "/storage/test"
+ inst [N or ???] value "/storage/work"
+dbpmda> fetch nfsclient.options.string
+PMID(s): 62.1.1
+__pmResult ... numpmid: 1
+ 62.1.1 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.1,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.18,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.23,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.27,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.29,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.6,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+ inst [N or ???] value "rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.7,mountvers=3,mountport=0,mountproto=tcp,local_lock=all"
+dbpmda> fetch nfsclient.options.proto
+PMID(s): 62.1.24
+__pmResult ... numpmid: 1
+ 62.1.24 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+ inst [N or ???] value "rdma"
+dbpmda> fetch nfsclient.options.vers
+PMID(s): 62.1.6
+__pmResult ... numpmid: 1
+ 62.1.6 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+dbpmda> fetch nfsclient.options.vers
+PMID(s): 62.1.6
+__pmResult ... numpmid: 1
+ 62.1.6 (<noname>): numval: 7 valfmt: 1 vlist[]:
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+ inst [N or ???] value "3"
+dbpmda> instance 62.0
+pmInDom: 62.0
+[ X] inst: X name: "/storage/group"
+[X+1] inst: X+1 name: "/storage/home"
+[X+2] inst: X+2 name: "/storage/icds"
+[X+3] inst: X+3 name: "/storage/pcp-data"
+[X+4] inst: X+4 name: "/storage/prod"
+[X+5] inst: X+5 name: "/storage/test"
+[X+6] inst: X+6 name: "/storage/work"
+dbpmda>
+Log for pmdanfsclient on HOST ...
+Log finished ...
=== Test case: mountstats.qa
dbpmda> open pipe $PYTHON pmdanfsclient.python
Start $PYTHON PMDA: $PYTHON pmdanfsclient.python
diff --git a/qa/nfsclient/mountstats-el8.7-rdma.qa b/qa/nfsclient/mountstats-el8.7-rdma.qa
new file mode 100644
index 000000000..98dd836a1
--- /dev/null
+++ b/qa/nfsclient/mountstats-el8.7-rdma.qa
@@ -0,0 +1,437 @@
+device proc mounted on /proc with fstype proc
+device devtmpfs mounted on /dev with fstype devtmpfs
+device securityfs mounted on /sys/kernel/security with fstype securityfs
+device tmpfs mounted on /dev/shm with fstype tmpfs
+device devpts mounted on /dev/pts with fstype devpts
+device tmpfs mounted on /run with fstype tmpfs
+device tmpfs mounted on /sys/fs/cgroup with fstype tmpfs
+device cgroup mounted on /sys/fs/cgroup/systemd with fstype cgroup
+device pstore mounted on /sys/fs/pstore with fstype pstore
+device efivarfs mounted on /sys/firmware/efi/efivars with fstype efivarfs
+device bpf mounted on /sys/fs/bpf with fstype bpf
+device cgroup mounted on /sys/fs/cgroup/hugetlb with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/cpu,cpuacct with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/memory with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/freezer with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/devices with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/net_cls,net_prio with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/perf_event with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/rdma with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/cpuset with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/pids with fstype cgroup
+device cgroup mounted on /sys/fs/cgroup/blkio with fstype cgroup
+device configfs mounted on /sys/kernel/config with fstype configfs
+device rootfs mounted on / with fstype tmpfs
+device rw mounted on /.sllocal/log with fstype tmpfs
+device rpc_pipefs mounted on /var/lib/nfs/rpc_pipefs with fstype rpc_pipefs
+device hugetlbfs mounted on /dev/hugepages with fstype hugetlbfs
+device debugfs mounted on /sys/kernel/debug with fstype debugfs
+device mqueue mounted on /dev/mqueue with fstype mqueue
+device systemd-1 mounted on /proc/sys/fs/binfmt_misc with fstype autofs
+device binfmt_misc mounted on /proc/sys/fs/binfmt_misc with fstype binfmt_misc
+device fusectl mounted on /sys/fs/fuse/connections with fstype fusectl
+device /dev/sda3 mounted on /tmp with fstype xfs
+device /dev/sda1 mounted on /var/log with fstype xfs
+device /dev/sda2 mounted on /var/tmp with fstype xfs
+device tracefs mounted on /sys/kernel/debug/tracing with fstype tracefs
+device example.com:/icds-prod mounted on /storage/prod with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.30,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 3 36 0 0 3 8 44 0 0 3 0 0 0 0 3 0 0 3 0 0 0 0 0 0 0 0 0
+ bytes: 5086 0 0 0 5086 0 4 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 0 5 5 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 268 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 1 4220 0 0 28 0 0 0 0 0 0 11 1 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 1 0 4746 4746 0 72 0 0 0 0 0 0 11 1 0 0
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 18 0 19 0
+ GETATTR: 3 3 0 276 336 0 0 0 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 8 8 0 852 1600 16 1 17 1
+ ACCESS: 7 7 0 672 840 10 1 11 0
+ READLINK: 1 1 0 92 148 5 0 5 0
+ READ: 3 3 0 312 5472 0 1 1 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 0 0 0 0 0 0 0 0 0
+ FSSTAT: 0 0 0 0 0 0 0 0 0
+ FSINFO: 2 2 0 184 328 6 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds-test mounted on /storage/test with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.24,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ bytes: 0 0 0 0 0 0 0 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 0 3 3 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 1 0 0 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ xprt: rdma 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 6 0 6 0
+ GETATTR: 0 0 0 0 0 0 0 0 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 0 0 0 0 0 0 0 0 0
+ ACCESS: 0 0 0 0 0 0 0 0 0
+ READLINK: 0 0 0 0 0 0 0 0 0
+ READ: 0 0 0 0 0 0 0 0 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 0 0 0 0 0 0 0 0 0
+ FSSTAT: 0 0 0 0 0 0 0 0 0
+ FSINFO: 2 2 0 184 328 6 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds mounted on /storage/icds with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.25,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 2 191 0 0 3 13 207 0 0 30 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0
+ bytes: 2452 0 0 0 2555904 0 624 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 0 12 12 0 12 0 0 6 0 352256 352256 0 0 0 0 0 0 0 0 11 6 0 0
+ xprt: rdma 0 0 1 0 0 10 10 0 10 0 0 5 0 184320 184320 0 0 0 0 0 0 0 0 11 5 0 0
+ xprt: rdma 0 0 2 0 0 10 10 0 10 0 0 4 0 225280 225280 0 0 0 0 0 0 0 0 22 4 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 6 0 294912 294912 0 0 0 0 0 0 0 0 11 6 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 7 0 339968 339968 0 0 0 0 0 0 0 0 11 7 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 7 0 352256 352256 0 0 0 0 0 0 0 0 11 7 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 7 0 405504 405504 0 0 0 0 0 0 0 0 11 7 0 0
+ xprt: rdma 0 0 1 0 0 9 9 0 9 0 0 7 0 401408 401408 0 0 0 0 0 0 0 0 11 7 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 2 2 0 188 224 11 0 11 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 13 13 0 1364 1952 15 3 19 5
+ ACCESS: 9 9 0 864 1080 16 1 18 0
+ READLINK: 0 0 0 0 0 0 0 0 0
+ READ: 49 49 0 5096 2562176 0 15 17 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 0 0 0 0 0 0 0 0 0
+ FSSTAT: 0 0 0 0 0 0 0 0 0
+ FSINFO: 2 2 0 184 328 5 0 5 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/group mounted on /storage/group with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.1,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 359470 11669975 26864 99083 237241 104622 12074173 76552450 12372 24061401 0 184482 64402 15675 250513 4475 60207 205420 0 13655 41997395 0 0 0 0 0 0
+ bytes: 5990252963873 271047280191 0 0 1730835995062 212213971093 422603768 51862807
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 111 1 28 5001450 5001450 0 16468960 244630485 411153 4499604 2419 216488884485 216363618297 705084 2119278 0 610 0 0 0 0 4422 4913176 0 0
+ xprt: rdma 0 0 110 1 28 5000369 5000369 0 16473884 244723520 411409 4497949 2419 216448785625 216324215309 744372 2220863 0 584 0 0 0 0 4466 4911777 0 0
+ xprt: rdma 0 0 110 1 12 5001230 5001230 0 16466569 244632808 411122 4499109 2294 216463811427 216346453699 725596 2162884 0 656 0 0 0 0 4642 4912525 0 0
+ xprt: rdma 0 0 114 1 12 5000643 5000643 0 16476687 244518169 411370 4498397 2400 216437529332 216314007816 754588 2166359 0 638 0 0 0 0 4675 4912167 0 0
+ xprt: rdma 0 0 113 0 12 5001675 5001675 0 16476119 244839819 411304 4499163 2420 216552802382 216427322070 674092 2054877 0 666 0 0 0 0 4796 4912887 0 0
+ xprt: rdma 0 0 112 1 62 5001928 5001928 0 16478906 244718584 411345 4499807 2449 216598144747 216470930799 709516 2049733 0 629 0 0 0 0 4664 4913601 0 0
+ xprt: rdma 0 0 108 0 62 5000620 5000620 0 16457063 244260204 411118 4498680 2347 216410122025 216289485037 742848 2157015 0 618 0 0 0 0 4488 4912145 0 0
+ xprt: rdma 0 0 105 0 62 5001170 5001170 0 16465684 244462170 411102 4499474 2363 216499718003 216377867059 774804 2218299 0 664 0 0 0 0 4532 4912939 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 6 0 6 0
+ GETATTR: 359470 359470 0 37679148 40260640 6615 68172 77464 0
+ SETATTR: 21554 21554 0 3038404 775944 123 18549 18914 0
+ LOOKUP: 116511 116511 0 14261900 8782176 956 41688 44033 90189
+ ACCESS: 64375 64375 0 7046576 7725000 886 14014 15459 0
+ READLINK: 736 736 0 79416 104332 2 249 269 0
+ READ: 36019499 36019499 0 4279070464 1735446589952 406570 26116356 27160896 0
+ WRITE: 3304938 3304938 0 212624926960 528790080 1195907908 50317800 1246575394 0
+ CREATE: 77879 77879 0 11891764 19937024 494 299155 303286 0
+ MKDIR: 2201 2201 0 325500 545856 26 7601 7669 80
+ SYMLINK: 4 4 0 768 1024 0 13 13 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 15039 15039 0 1895992 541404 46 25825 26057 0
+ RMDIR: 1859 1859 0 223452 66924 4 3096 3127 3
+ RENAME: 6525 6525 0 1219928 287100 79 13674 13918 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 5842 5842 0 747112 19898184 14 7271 7470 0
+ READDIRPLUS: 9028 9028 0 1164116 74659892 169 17053 17575 0
+ FSSTAT: 113 113 0 11856 18984 34 126 162 0
+ FSINFO: 2 2 0 184 328 6 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/home mounted on /storage/home with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.7,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 1431711 44943044 116357 137881 1036341 97602 45725153 741968 1060 314347 7 82823 747343 132339 751136 4247 9781 660494 0 1621 659329 139 0 0 0 0 0
+ bytes: 31191656856 2345480650 0 0 13941630626 2386575218 3519989 615297
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 299 2 19 314383 314383 0 360317 2824 6031 38731 7094 2143783391 1741491091 2076748 3911658 0 48 0 0 0 0 3652 51856 0 0
+ xprt: rdma 0 0 306 2 19 314284 314284 0 359761 3339 6178 38680 7086 2138653820 1737586668 2122588 3993750 0 53 0 0 0 0 3729 51944 0 0
+ xprt: rdma 0 0 305 2 19 314534 314534 0 360342 3020 6127 39049 7491 2177604450 1749298514 2178016 4057012 0 51 0 0 0 0 3718 52667 0 0
+ xprt: rdma 0 0 304 2 19 314570 314568 2 687294 2628 6152 38977 6977 2151798321 1756744897 2311096 3945741 0 45 0 0 0 0 3707 52106 0 0
+ xprt: rdma 0 0 301 3 19 314546 314545 1 495960 2588 6045 38911 7196 2164869636 1756151804 1995156 3909706 0 48 0 0 0 0 3674 52152 0 0
+ xprt: rdma 0 0 300 2 19 314597 314597 0 361338 2792 6190 39020 6968 2150734096 1755759176 2129072 3997461 0 47 0 0 0 0 3663 52178 0 0
+ xprt: rdma 0 0 300 2 19 314532 314532 0 360982 2603 6177 38991 7215 2162670305 1752241557 2106240 4097289 0 45 0 0 0 0 3663 52383 0 0
+ xprt: rdma 0 0 299 2 19 314696 314696 0 360978 2938 6117 38938 7297 2164527924 1748297356 2332620 3915498 0 51 0 0 0 0 3652 52352 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 1431711 1431711 0 152340376 160351352 23887 262978 318138 5
+ SETATTR: 134526 134526 0 20992092 4842936 592 62889 65101 0
+ LOOKUP: 105669 105669 0 13828636 11957600 1842 36086 41336 61002
+ ACCESS: 228039 228039 0 26382944 27364652 2814 47214 53250 7
+ READLINK: 1627 1627 0 170916 228932 15 337 391 0
+ READ: 420683 420683 0 50800636 13995575392 7785 255480 270435 0
+ WRITE: 87100 87100 0 2397805952 13936000 172750 365004 540345 0
+ CREATE: 29307 29307 0 4889564 7501712 111 49570 50330 4
+ MKDIR: 7156 7156 0 1067212 1800036 15 11511 11613 145
+ SYMLINK: 446 446 0 82020 113516 1 682 688 3
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 5852 5852 0 836620 210672 17 13008 13133 0
+ RMDIR: 2417 2417 0 277660 87012 5 3319 3357 54
+ RENAME: 2882 2882 0 542372 126808 62 26287 26407 0
+ LINK: 212 212 0 32072 26288 0 420 425 0
+ READDIR: 26 26 0 3348 156548 0 81 82 0
+ READDIRPLUS: 46977 46977 0 6063288 86387612 202 55663 58039 0
+ FSSTAT: 2784 2784 0 291028 467712 28 3651 4047 0
+ FSINFO: 2 2 0 184 328 5 0 5 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds mounted on /storage/icds with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.23,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 1504197 4360204878 3573 11827 998351 46870 4361262723 0 13 86345 0 0 412964 0 810302 0 0 787954 0 0 0 0 0 0 0 0 0
+ bytes: 2473882007460 0 0 0 3117996888 0 781324 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 9 0 15 218040 218040 0 220710 0 0 10661 757 426647800 380864004 0 2882858 0 0 0 0 0 0 99 11418 0 0
+ xprt: rdma 0 0 11 0 20 217911 217911 0 220214 0 0 10742 764 434579499 388755611 0 2968495 0 0 0 0 0 0 121 11506 0 0
+ xprt: rdma 0 0 11 0 20 218423 218423 0 220758 0 0 10858 830 439053205 388939529 0 2997186 0 0 0 0 0 0 121 11688 0 0
+ xprt: rdma 0 0 10 0 20 217656 217656 0 220142 0 0 10798 776 439744412 392148264 0 2965342 0 0 0 0 0 0 110 11574 0 0
+ xprt: rdma 0 0 10 0 20 218280 218280 0 220648 0 0 10783 764 433088272 387536212 0 2916853 0 0 0 0 0 0 110 11547 0 0
+ xprt: rdma 0 0 10 0 15 217897 217897 0 220528 0 0 10800 744 432500217 387918105 0 3035589 0 0 0 0 0 0 110 11544 0 0
+ xprt: rdma 0 0 9 0 15 218233 218233 0 220901 0 0 10876 786 437891918 390297186 0 2843850 0 0 0 0 0 0 99 11662 0 0
+ xprt: rdma 0 0 9 0 15 218287 218287 0 221066 0 0 10817 799 438124667 390593603 0 2900001 0 0 0 0 0 0 99 11616 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 1504197 1504197 0 159348668 168470064 5823 250532 346757 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 47852 47852 0 6159548 5950144 134 18038 23896 24837
+ ACCESS: 79076 79076 0 8750456 9487564 188 14705 18515 389
+ READLINK: 331 331 0 36048 45912 1 109 121 0
+ READ: 106156 106156 0 12595328 3131627640 2442 102180 106716 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 972 972 0 159400 34992 4 1815 1875 972
+ MKDIR: 130 130 0 18948 4680 0 259 260 130
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 5863 5863 0 765784 12555964 21 6882 7339 0
+ FSSTAT: 120 120 0 13172 20160 0 86 88 0
+ FSINFO: 2 2 0 184 328 5 0 5 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/work mounted on /storage/work with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.18,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 963419 23064824 4957 23437 498893 36863 23579290 20398227 3127 191254 348 187305 207697 6261 405125 1210 0 394647 0 3923 18843784 3 0 0 0 0 0
+ bytes: 41568180225 14269941286 0 0 23191576124 14729236454 5668359 3684526
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 111 1 1 231044 231042 2 873131 366580 33899 45806 1273 2967035298 2901558270 7296576 1169614 0 47 0 0 0 0 1529 80978 0 0
+ xprt: rdma 0 0 106 0 1 231329 231328 1 717682 361947 33828 45455 1262 2952240311 2887232063 7392372 1221754 0 52 0 0 0 0 1474 80545 0 0
+ xprt: rdma 0 0 106 1 6 231078 231078 0 485645 363676 33786 45721 1320 2967925402 2899619554 7176800 1189264 0 48 0 0 0 0 1474 80827 0 0
+ xprt: rdma 0 0 109 0 6 231226 231226 0 486538 362707 33929 45482 1274 2956234946 2890484558 7294244 1190630 0 44 0 0 0 0 1507 80685 0 0
+ xprt: rdma 0 0 109 0 1 231464 231464 0 486184 367681 33875 45789 1233 2968915191 2906173167 7325356 1151195 0 42 0 0 0 0 1507 80897 0 0
+ xprt: rdma 0 0 110 0 1 231566 231566 0 485613 362833 33801 45688 1245 2967609016 2903914028 7186764 1166808 0 43 0 0 0 0 1518 80734 0 0
+ xprt: rdma 0 0 107 0 1 231285 231285 0 485205 365643 33905 45719 1299 2968468178 2901567814 7074060 1190420 0 47 0 0 0 0 1485 80923 0 0
+ xprt: rdma 0 0 109 1 1 231926 231926 0 487042 369268 33760 45696 1286 2970922269 2904447457 7343684 1204180 0 45 0 0 0 0 1507 80742 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 6 0 6 0
+ GETATTR: 963419 963419 0 101370948 107902704 15461 186184 213618 3
+ SETATTR: 8104 8104 0 1151604 291744 47 11806 11970 0
+ LOOKUP: 42796 42796 0 5399836 4859232 622 14626 15832 24620
+ ACCESS: 45471 45471 0 4949688 5456520 1051 8791 10504 1
+ READLINK: 14 14 0 1500 1908 0 3 3 0
+ READ: 371033 371033 0 43379708 23239084264 2450 276265 283119 0
+ WRITE: 390214 390214 0 14779478604 62433744 3531147 2810230 6351526 4
+ CREATE: 14579 14579 0 2254256 3731564 70 47115 47475 3
+ MKDIR: 2317 2317 0 378616 591172 17 9179 9302 9
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 903 903 0 121280 32508 7 1340 1364 0
+ RMDIR: 1 1 0 116 36 0 2 2 1
+ RENAME: 1691 1691 0 336668 74404 14 2998 3057 0
+ LINK: 71 71 0 12064 8804 0 91 92 0
+ READDIR: 4 4 0 512 15012 0 27 28 0
+ READDIRPLUS: 6573 6573 0 845980 12381640 70 7615 8194 0
+ FSSTAT: 49 49 0 5256 8232 56 41 99 0
+ FSINFO: 2 2 0 184 328 5 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds-prod mounted on /storage/prod with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.27,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 216327 953839 0 2 94628 41 977594 0 0 32 0 0 7792 0 176250 0 0 90732 0 0 0 0 0 0 0 0 0
+ bytes: 900620338 0 0 0 317803 0 96 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 120 27117 27117 0 27120 0 0 3 0 44329 44329 0 0 0 0 0 0 0 0 11 3 0 0
+ xprt: rdma 0 0 1 0 120 27117 27117 0 27121 0 0 5 1 110959 82199 0 0 0 0 0 0 0 0 11 6 0 0
+ xprt: rdma 0 0 1 0 28 27114 27114 0 27115 0 0 1 0 13215 13215 0 4239 0 0 0 0 0 0 11 1 0 0
+ xprt: rdma 0 0 1 0 28 27113 27113 0 27113 0 0 2 0 61752 61752 0 395 0 0 0 0 0 0 11 2 0 0
+ xprt: rdma 0 0 1 0 120 27115 27115 0 27118 0 0 2 1 24412 20192 0 3380 0 0 0 0 0 0 11 3 0 0
+ xprt: rdma 0 0 1 0 120 27113 27113 0 27114 0 0 4 0 24016 24016 0 1518 0 0 0 0 0 0 11 4 0 0
+ xprt: rdma 0 0 1 0 120 27115 27115 0 27119 0 0 3 1 118321 55597 0 518 0 0 0 0 0 0 11 4 0 0
+ xprt: rdma 0 0 1 0 120 27113 27113 0 27114 0 0 2 1 75257 10593 0 0 0 0 0 0 0 0 11 3 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 216327 216327 0 19911140 24228624 790 44005 48809 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 45 45 0 5016 7200 16 15 32 15
+ ACCESS: 494 494 0 49128 59280 17 148 169 0
+ READLINK: 1 1 0 92 148 0 0 0 0
+ READ: 32 32 0 3392 321932 1 28 30 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 2 2 0 236 3940 0 3 3 0
+ FSSTAT: 11 11 0 1136 1848 0 8 8 0
+ FSINFO: 2 2 0 184 328 5 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds-test mounted on /storage/test with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.29,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 11 226 0 0 0 6 260 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ bytes: 0 0 0 0 0 0 0 0
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 5 0 0 7 7 0 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 0 0 0
+ xprt: rdma 0 0 6 0 0 6 6 0 6 0 0 0 1 28932 172 0 0 0 0 0 0 0 0 66 1 0 0
+ xprt: rdma 0 0 5 0 0 5 5 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 0 0 0
+ xprt: rdma 0 0 5 0 0 5 5 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 0 0 0
+ xprt: rdma 0 0 5 0 0 5 5 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 0 0 0
+ xprt: rdma 0 0 4 0 0 4 4 0 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 44 0 0 0
+ xprt: rdma 0 0 4 0 0 4 4 0 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 44 0 0 0
+ xprt: rdma 0 0 4 0 0 4 4 0 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 44 0 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 6 0 6 0
+ GETATTR: 11 11 0 1156 1232 174 4 178 0
+ SETATTR: 0 0 0 0 0 0 0 0 0
+ LOOKUP: 6 6 0 740 192 35 2 37 6
+ ACCESS: 6 6 0 652 720 99 2 102 0
+ READLINK: 0 0 0 0 0 0 0 0 0
+ READ: 0 0 0 0 0 0 0 0 0
+ WRITE: 0 0 0 0 0 0 0 0 0
+ CREATE: 0 0 0 0 0 0 0 0 0
+ MKDIR: 0 0 0 0 0 0 0 0 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 0 0 0 0 0 0 0 0 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 0 0 0 0 0 0 0 0 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 0 0 0 0 0 0 0 0 0
+ FSSTAT: 12 12 0 1240 2016 109 6 116 0
+ FSINFO: 2 2 0 184 328 5 0 6 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device example.com:/icds-pcp-data mounted on /storage/pcp-data with fstype nfs statvers=1.1
+ opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=rdma,nconnect=8,port=20049,timeo=600,retrans=2,sec=sys,mountaddr=10.6.159.6,mountvers=3,mountport=0,mountproto=tcp,local_lock=all
+ age: 1166720
+ caps: caps=0x10003fcf,wtmult=4096,dtsize=65536,bsize=0,namlen=255
+ sec: flavor=1,pseudoflavor=1
+ events: 6699 70747 708 20599 2762 2253 102622 720956 1 5508 0 37492 1710 163 1296 0 0 1194 0 0 720956 0 0 0 0 0 0
+ bytes: 1920695637 2068044591 0 0 1319950703 2143394964 322276 542160
+ RPC iostats version: 1.1 p/v: 100003/3 (nfs)
+ xprt: rdma 0 0 1 0 163 11491 11491 0 11791 0 5235 2523 200 173699455 165053291 995572 0 0 0 0 0 0 0 11 7958 0 0
+ xprt: rdma 0 0 1 0 163 11490 11490 0 11783 0 5227 2523 206 174106340 165126008 1038128 0 0 0 0 0 0 0 11 7956 0 0
+ xprt: rdma 0 0 1 0 102 11490 11490 0 11795 0 5201 2531 215 174949903 165380715 1042544 0 0 0 0 0 0 0 11 7947 0 0
+ xprt: rdma 0 0 1 0 102 11490 11490 0 11803 0 5249 2530 198 173876087 165402039 1006656 2100 0 0 0 0 0 0 11 7977 0 0
+ xprt: rdma 0 0 1 0 102 11490 11490 0 11809 0 5227 2522 200 173699084 165094476 1049444 1968 0 0 0 0 0 0 11 7949 0 0
+ xprt: rdma 0 0 1 0 43 11490 11490 0 11796 0 5213 2527 208 174387760 165255764 1003696 1736 0 0 0 0 0 0 11 7948 0 0
+ xprt: rdma 0 0 1 0 43 11490 11490 0 11812 0 5229 2520 208 174061854 164930038 1022056 0 0 0 0 0 0 0 11 7957 0 0
+ xprt: rdma 0 0 1 0 43 11490 11490 0 11797 0 5218 2519 216 174643548 164999120 1041756 0 0 0 0 0 0 0 11 7953 0 0
+ per-op statistics
+ NULL: 1 1 0 40 24 5 0 5 0
+ GETATTR: 6699 6699 0 658876 750288 34 1459 1700 0
+ SETATTR: 878 878 0 112776 31608 3 378 409 0
+ LOOKUP: 1631 1631 0 188400 64096 16 372 413 1569
+ ACCESS: 952 952 0 95396 114240 20 197 241 0
+ READLINK: 0 0 0 0 0 0 0 0 0
+ READ: 20198 20198 0 2204072 1322536072 1049 7547 9069 0
+ WRITE: 57666 57666 0 2149857320 9226560 1838 115379 120554 0
+ CREATE: 826 826 0 99672 211456 3 1352 1378 0
+ MKDIR: 15 15 0 2048 3840 0 32 32 0
+ SYMLINK: 0 0 0 0 0 0 0 0 0
+ MKNOD: 0 0 0 0 0 0 0 0 0
+ REMOVE: 715 715 0 74796 25740 5 967 995 0
+ RMDIR: 0 0 0 0 0 0 0 0 0
+ RENAME: 2 2 0 304 88 0 3 3 0
+ LINK: 0 0 0 0 0 0 0 0 0
+ READDIR: 0 0 0 0 0 0 0 0 0
+ READDIRPLUS: 700 700 0 84356 1157640 5 532 602 0
+ FSSTAT: 684 684 0 65712 114912 8 750 794 0
+ FSINFO: 2 2 0 184 328 5 0 5 0
+ PATHCONF: 0 0 0 0 0 0 0 0 0
+ COMMIT: 0 0 0 0 0 0 0 0 0
+
+device scratch mounted on /scratch with fstype gpfs
diff --git a/src/pmdas/nfsclient/pmdanfsclient.python b/src/pmdas/nfsclient/pmdanfsclient.python
index 8e96e9031..786d95845 100644
--- a/src/pmdas/nfsclient/pmdanfsclient.python
+++ b/src/pmdas/nfsclient/pmdanfsclient.python
@@ -585,7 +585,7 @@ class NFSCLIENTPMDA(PMDA):
client.xprt.sending_u = self.longs(values[12])
client.xprt.pending_u = self.longs(values[13])
elif xprt_prot == 'udp':
- client.xprt.srcport = values[1]
+ client.xprt.srcport = self.chars(values[1])
client.xprt.bind_count = self.longs(values[2])
client.xprt.sends = self.longs(values[3])
client.xprt.recvs = self.longs(values[4])
@@ -596,7 +596,7 @@ class NFSCLIENTPMDA(PMDA):
client.xprt.sending_u = self.longs(values[9])
client.xprt.pending_u = self.longs(values[10])
elif xprt_prot == 'rdma':
- client.xprt.srcport = values[1]
+ client.xprt.srcport = self.chars(values[1])
client.xprt.bind_count = self.longs(values[2])
client.xprt.connect_count = self.longs(values[3])
client.xprt.connect_time = self.longs(values[4])

File diff suppressed because it is too large Load Diff

@ -1,94 +0,0 @@
diff -Naurp pcp-5.3.7.orig/src/pmdas/bcc/Upgrade pcp-5.3.7/src/pmdas/bcc/Upgrade
--- pcp-5.3.7.orig/src/pmdas/bcc/Upgrade 2021-08-16 14:12:25.000000000 +1000
+++ pcp-5.3.7/src/pmdas/bcc/Upgrade 2023-07-10 16:25:31.904767032 +1000
@@ -26,8 +26,11 @@ then
else
sed -i -e "s,^\(bcc.*binary\),\1 notready,g" $PCP_PMCDCONF_PATH
fi
- sed -i \
- -e "s,python $PCP_PMDAS_DIR/bcc/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/bcc/,g" \
- $PCP_PMCDCONF_PATH 2>/dev/null
+ if grep -q '^bcc.*python ' "$PCP_PMCDCONF_PATH" 2>/dev/null
+ then
+ sed -i \
+ -e "s,python $PCP_PMDAS_DIR/bcc/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/bcc/,g" \
+ $PCP_PMCDCONF_PATH 2>/dev/null
+ fi
fi
exit 0
diff -Naurp pcp-5.3.7.orig/src/pmdas/kvm/Upgrade pcp-5.3.7/src/pmdas/kvm/Upgrade
--- pcp-5.3.7.orig/src/pmdas/kvm/Upgrade 2019-02-06 17:16:29.000000000 +1100
+++ pcp-5.3.7/src/pmdas/kvm/Upgrade 2023-07-10 16:25:31.904767032 +1000
@@ -17,7 +17,7 @@
. $PCP_DIR/etc/pcp.env
-if grep -q ^kvm "$PCP_PMCDCONF_PATH" 2>/dev/null
+if grep -q '^kvm.*perl.*' "$PCP_PMCDCONF_PATH" 2>/dev/null
then
sed -i -e "s,perl $PCP_PMDAS_DIR/kvm/pmdakvm.pl,$PCP_PMDAS_DIR/kvm/pmdakvm -d 95,g" $PCP_PMCDCONF_PATH 2>/dev/null
fi
diff -Naurp pcp-5.3.7.orig/src/pmdas/mssql/Upgrade pcp-5.3.7/src/pmdas/mssql/Upgrade
--- pcp-5.3.7.orig/src/pmdas/mssql/Upgrade 2021-08-16 14:12:25.000000000 +1000
+++ pcp-5.3.7/src/pmdas/mssql/Upgrade 2023-07-10 16:25:31.904767032 +1000
@@ -17,14 +17,20 @@
. $PCP_DIR/etc/pcp.env
-if grep -q ^mssql "$PCP_PMCDCONF_PATH" 2>/dev/null
+if grep -q '^mssql.*perl ' "$PCP_PMCDCONF_PATH" 2>/dev/null
then
sed -i \
- -e "s,python $PCP_PMDAS_DIR/mssql/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/mssql/,g" \
-e "s,perl $PCP_PMDAS_DIR/mssql/pmdamssql.pl,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/mssql/pmdamssql.python,g" \
$PCP_PMCDCONF_PATH 2>/dev/null
fi
+if grep -q '^mssql.*python ' "$PCP_PMCDCONF_PATH" 2>/dev/null
+then
+ sed -i \
+ -e "s,python $PCP_PMDAS_DIR/mssql/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/mssql/,g" \
+ $PCP_PMCDCONF_PATH 2>/dev/null
+fi
+
perlpath=`which $PCP_PERL_PROG`
original="$PCP_PMDAS_DIR/mssql/mssql.conf"
upgraded="$PCP_PMDAS_DIR/mssql/mssql.conf.tmp"
diff -Naurp pcp-5.3.7.orig/src/pmdas/openmetrics/Upgrade pcp-5.3.7/src/pmdas/openmetrics/Upgrade
--- pcp-5.3.7.orig/src/pmdas/openmetrics/Upgrade 2021-08-16 14:12:25.000000000 +1000
+++ pcp-5.3.7/src/pmdas/openmetrics/Upgrade 2023-07-10 16:25:31.904767032 +1000
@@ -36,7 +36,7 @@ then
rm -f "$PCP_VAR_DIR/pmns/prometheus" 2>/dev/null
fi
-if grep -q ^openmetrics "$PCP_PMCDCONF_PATH" 2>/dev/null
+if grep -q '^openmetrics.*python ' "$PCP_PMCDCONF_PATH" 2>/dev/null
then
sed -i -e "s,python $PCP_PMDAS_DIR/openmetrics/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/openmetrics/,g" $PCP_PMCDCONF_PATH 2>/dev/null
fi
diff -Naurp pcp-5.3.7.orig/src/pmdas/postgresql/Upgrade pcp-5.3.7/src/pmdas/postgresql/Upgrade
--- pcp-5.3.7.orig/src/pmdas/postgresql/Upgrade 2021-08-16 14:12:25.000000000 +1000
+++ pcp-5.3.7/src/pmdas/postgresql/Upgrade 2023-07-10 16:25:31.914767070 +1000
@@ -17,14 +17,20 @@
. $PCP_DIR/etc/pcp.env
-if grep -q ^postgresql "$PCP_PMCDCONF_PATH" 2>/dev/null
+if grep -q '^postgresql.*perl ' "$PCP_PMCDCONF_PATH" 2>/dev/null
then
sed -i \
- -e "s,python $PCP_PMDAS_DIR/postgresql/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/postgresql/,g" \
-e "s,perl $PCP_PMDAS_DIR/postgresql/pmdapostgresql.pl,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/postgresql/pmdapostgresql.python,g" \
$PCP_PMCDCONF_PATH 2>/dev/null
fi
+if grep -q '^postgresql.*python ' "$PCP_PMCDCONF_PATH" 2>/dev/null
+then
+ sed -i \
+ -e "s,python $PCP_PMDAS_DIR/postgresql/,$PCP_PYTHON_PROG $PCP_PMDAS_DIR/postgresql/,g" \
+ $PCP_PMCDCONF_PATH 2>/dev/null
+fi
+
perlpath=`which $PCP_PERL_PROG`
original="$PCP_PMDAS_DIR/postgresql/postgresql.conf"
upgraded="$PCP_PMDAS_DIR/postgresql/pmdapostgresql.conf"

@ -1,50 +0,0 @@
diff --git a/src/pmdas/podman/.gitignore b/src/pmdas/podman/.gitignore
index a4f35e0f43..aea2c4052e 100644
--- a/src/pmdas/podman/.gitignore
+++ b/src/pmdas/podman/.gitignore
@@ -2,8 +2,6 @@ deps/
domain.h
pmdapodman
pmda_podman.so
-jsonsl.c
-jsonsl.h
help.dir
help.pag
exports
diff --git a/src/pmdas/podman/GNUmakefile b/src/pmdas/podman/GNUmakefile
index d6e58cfca5..1117a7a4e4 100644
--- a/src/pmdas/podman/GNUmakefile
+++ b/src/pmdas/podman/GNUmakefile
@@ -83,6 +83,7 @@ domain.h: ../../pmns/stdpmid
$(OBJECTS): domain.h
pmda.o: $(VERSION_SCRIPT)
pmda.o: $(TOPDIR)/src/include/pcp/libpcp.h
+podman.o: $(JSONSL_HFILES)
check:: $(CFILES) $(HFILES)
$(CLINT) $^
diff --git a/src/pmdas/root/.gitignore b/src/pmdas/root/.gitignore
index 21f507f0dd..b78b1fd28a 100644
--- a/src/pmdas/root/.gitignore
+++ b/src/pmdas/root/.gitignore
@@ -1,8 +1,6 @@
deps/
domain.h
pmdaroot
-jsonsl.c
-jsonsl.h
help.dir
help.pag
pmns
diff --git a/src/pmdas/root/GNUmakefile b/src/pmdas/root/GNUmakefile
index ed01a18fb8..b02d4ea834 100644
--- a/src/pmdas/root/GNUmakefile
+++ b/src/pmdas/root/GNUmakefile
@@ -83,6 +83,7 @@ pmns :
$(LN_S) -f root_root pmns
lxc.o root.o: $(TOPDIR)/src/include/pcp/libpcp.h
+podman.o: $(JSONSL_HFILES)
check:: $(CFILES) $(HFILES)
$(CLINT) $^

@ -0,0 +1,12 @@
diff -Naurp pcp-6.2.0.orig/src/include/pcp.conf.in pcp-6.2.0/src/include/pcp.conf.in
--- pcp-6.2.0.orig/src/include/pcp.conf.in 2023-12-08 10:24:17.000000000 +1100
+++ pcp-6.2.0/src/include/pcp.conf.in 2024-02-12 04:10:10.649953498 +1100
@@ -140,7 +140,7 @@ PCP_ARCHIVE_DIR=@pcp_archive_dir@
# default version for generating PCP archives
# Possible versions: 2, 3 (3 requires PCP 6+)
-PCP_ARCHIVE_VERSION=3
+PCP_ARCHIVE_VERSION=2
# directory for daily PCP activity reports
# Standard path: /var/log/pcp/sa

@ -1,34 +0,0 @@
commit ce6112399ebf0ff39069a34bc9286242c875555e
Author: adam kaminski <adam@adamkaminski.com>
Date: Fri Feb 9 12:49:34 2024 -0500
pmapi.py : fix for struct_time() and day of year out of range on yyyy-01-01
Fix for `day of year out of range` on 2024-01-01, due to self.tm_yday - 1, which returns `[2024, 1, 1, 2, 3, 0, 0, -1, 0]`. The range for tm_yday should be [1, 366].
# timedatectl set-ntp false
# timedatectl set-time "2024-01-01 00:00:00"
# /usr/libexec/pcp/bin/pcp-mpstat -P ALL -t 1 -s 2
Traceback (most recent call last):
File "/usr/libexec/pcp/bin/pcp-mpstat", line 653, in <module>
sts = manager.run()
File "/usr/lib64/python3.6/site-packages/pcp/pmcc.py", line 687, in run
self._printer.report(self)
File "/usr/libexec/pcp/bin/pcp-mpstat", line 606, in report
self.print_machine_info(group, manager)
File "/usr/libexec/pcp/bin/pcp-mpstat", line 585, in print_machine_info
time_string = time.strftime("%x", timestamp.struct_time())
ValueError: day of year out of range
diff -Naurp pcp-5.3.7.orig/src/python/pcp/pmapi.py pcp-5.3.7/src/python/pcp/pmapi.py
--- pcp-5.3.7.orig/src/python/pcp/pmapi.py 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/python/pcp/pmapi.py 2024-08-08 09:56:16.298625548 +1000
@@ -299,7 +299,7 @@ class tm(Structure):
pywday = 6
stlist = [self.tm_year + 1900, self.tm_mon + 1, self.tm_mday,
self.tm_hour, self.tm_min, self.tm_sec,
- pywday, self.tm_yday - 1, self.tm_isdst]
+ pywday, self.tm_yday + 1, self.tm_isdst]
return time.struct_time(stlist)
def __str__(self):

@ -1,6 +1,6 @@
diff -Naurp pcp-5.3.7.orig/qa/1518 pcp-5.3.7/qa/1518
--- pcp-5.3.7.orig/qa/1518 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1518 2024-09-09 13:13:14.561437414 +1000
diff -Naurp pcp-6.2.0.orig/qa/1518 pcp-6.2.0/qa/1518
--- pcp-6.2.0.orig/qa/1518 1970-01-01 10:00:00.000000000 +1000
+++ pcp-6.2.0/qa/1518 2024-09-17 10:11:45.805874610 +1000
@@ -0,0 +1,75 @@
+#!/bin/sh
+# PCP QA Test No. 1518
@ -77,9 +77,9 @@ diff -Naurp pcp-5.3.7.orig/qa/1518 pcp-5.3.7/qa/1518
+
+# success, all done
+exit
diff -Naurp pcp-5.3.7.orig/qa/1518.out pcp-5.3.7/qa/1518.out
--- pcp-5.3.7.orig/qa/1518.out 1970-01-01 10:00:00.000000000 +1000
+++ pcp-5.3.7/qa/1518.out 2024-09-09 13:13:14.561437414 +1000
diff -Naurp pcp-6.2.0.orig/qa/1518.out pcp-6.2.0/qa/1518.out
--- pcp-6.2.0.orig/qa/1518.out 1970-01-01 10:00:00.000000000 +1000
+++ pcp-6.2.0/qa/1518.out 2024-09-17 10:11:45.806874611 +1000
@@ -0,0 +1,11 @@
+QA output created by 1518
+expect error to be logged ...
@ -92,21 +92,21 @@ diff -Naurp pcp-5.3.7.orig/qa/1518.out pcp-5.3.7/qa/1518.out
+indirectly lost: 0 bytes in 0 blocks
+Rerun with --leak-check=full to see details of leaked memory
+ERROR SUMMARY: 0 errors from 0 contexts ...
diff -Naurp pcp-5.3.7.orig/qa/group pcp-5.3.7/qa/group
--- pcp-5.3.7.orig/qa/group 2024-09-09 13:11:13.796450545 +1000
+++ pcp-5.3.7/qa/group 2024-09-09 13:13:49.419437747 +1000
@@ -1847,6 +1847,7 @@ x11
1490 python local labels
1495 pmlogrewrite labels pmdumplog local
diff -Naurp pcp-6.2.0.orig/qa/group pcp-6.2.0/qa/group
--- pcp-6.2.0.orig/qa/group 2024-02-11 23:48:09.000000000 +1100
+++ pcp-6.2.0/qa/group 2024-09-17 10:11:45.815874621 +1000
@@ -1951,6 +1951,7 @@ x11
1503 pcp pidstat ps python local
1511 pmcd local pmda.sample
1515 pmda.denki local valgrind
+1518 pmcd libpcp local
1530 pmda.zfs local valgrind
1531 pmda.zfs local valgrind
1532 pmda.zfs local
diff -Naurp pcp-5.3.7.orig/src/libpcp/src/endian.c pcp-5.3.7/src/libpcp/src/endian.c
--- pcp-5.3.7.orig/src/libpcp/src/endian.c 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/libpcp/src/endian.c 2024-09-09 13:20:56.967560588 +1000
@@ -268,13 +268,17 @@ ntohEventArray(pmValueBlock * const vb,
diff -Naurp pcp-6.2.0.orig/src/libpcp/src/endian.c pcp-6.2.0/src/libpcp/src/endian.c
--- pcp-6.2.0.orig/src/libpcp/src/endian.c 2023-11-16 17:51:39.000000000 +1100
+++ pcp-6.2.0/src/libpcp/src/endian.c 2024-09-17 10:11:45.820874627 +1000
@@ -275,13 +275,17 @@ ntohEventArray(pmValueBlock * const vb,
}
void
@ -125,7 +125,7 @@ diff -Naurp pcp-5.3.7.orig/src/libpcp/src/endian.c pcp-5.3.7/src/libpcp/src/endi
switch (vb->vtype) {
case PM_TYPE_U64:
case PM_TYPE_64:
@@ -298,6 +302,13 @@ __ntohpmValueBlock(pmValueBlock * const
@@ -305,6 +309,13 @@ __ntohpmValueBlock(pmValueBlock * const
break;
}
}
@ -139,31 +139,31 @@ diff -Naurp pcp-5.3.7.orig/src/libpcp/src/endian.c pcp-5.3.7/src/libpcp/src/endi
#endif
#ifndef __htonpmPDUInfo
diff -Naurp pcp-5.3.7.orig/src/libpcp/src/internal.h pcp-5.3.7/src/libpcp/src/internal.h
--- pcp-5.3.7.orig/src/libpcp/src/internal.h 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/libpcp/src/internal.h 2024-09-09 13:21:44.336608061 +1000
diff -Naurp pcp-6.2.0.orig/src/libpcp/src/internal.h pcp-6.2.0/src/libpcp/src/internal.h
--- pcp-6.2.0.orig/src/libpcp/src/internal.h 2023-11-16 17:51:39.000000000 +1100
+++ pcp-6.2.0/src/libpcp/src/internal.h 2024-09-17 10:11:45.823874630 +1000
@@ -60,6 +60,8 @@ extern int __pmGetDate(struct timespec *
#define __ntohpmLabel(a) /* noop */
#define __htonpmValueBlock(a) /* noop */
#define __ntohpmValueBlock(a) /* noop */
+#define __ntohpmValueBlock_hdr(a) /* noop */
+#define __ntohpmValueBlock_buf(a) /* noop */
#define __htonpmTimespec(a) /* noop */
#define __ntohpmTimespec(a) /* noop */
#define __htonpmTimestamp(a) /* noop */
@@ -94,6 +96,8 @@ extern void __htonpmLabel(pmLabel * cons
#define __htonf(a) /* noop */
#define __ntohf(a) /* noop */
#define __htond(a) /* noop */
@@ -90,6 +92,8 @@ extern void __htonpmLabel(pmLabel * cons
extern void __ntohpmLabel(pmLabel * const) _PCP_HIDDEN;
extern void __htonpmValueBlock(pmValueBlock * const) _PCP_HIDDEN;
extern void __ntohpmValueBlock(pmValueBlock * const) _PCP_HIDDEN;
+extern void __ntohpmValueBlock_hdr(pmValueBlock * const) _PCP_HIDDEN;
+extern void __ntohpmValueBlock_buf(pmValueBlock * const) _PCP_HIDDEN;
extern void __htonpmTimespec(pmTimespec * const ) _PCP_HIDDEN;
extern void __ntohpmTimespec(pmTimespec * const ) _PCP_HIDDEN;
extern void __htonpmTimestamp(__pmTimestamp * const );
diff -Naurp pcp-5.3.7.orig/src/libpcp/src/p_result.c pcp-5.3.7/src/libpcp/src/p_result.c
--- pcp-5.3.7.orig/src/libpcp/src/p_result.c 2022-04-05 09:05:43.000000000 +1000
+++ pcp-5.3.7/src/libpcp/src/p_result.c 2024-09-09 13:27:16.651969214 +1000
@@ -277,6 +277,135 @@ __pmSendHighResResult(int fd, int from,
extern void __htonf(char *) _PCP_HIDDEN; /* float */
#define __ntohf(v) __htonf(v)
#define __htond(v) __htonll(v) /* double */
diff -Naurp pcp-6.2.0.orig/src/libpcp/src/p_result.c pcp-6.2.0/src/libpcp/src/p_result.c
--- pcp-6.2.0.orig/src/libpcp/src/p_result.c 2023-11-16 17:51:39.000000000 +1100
+++ pcp-6.2.0/src/libpcp/src/p_result.c 2024-09-17 10:18:17.264314112 +1000
@@ -323,6 +323,135 @@ __pmSendHighResResult(int fd, int from,
return __pmSendHighResResult_ctx(NULL, fd, from, result);
}
@ -297,9 +297,9 @@ diff -Naurp pcp-5.3.7.orig/src/libpcp/src/p_result.c pcp-5.3.7/src/libpcp/src/p_
+}
+
#if defined(HAVE_64BIT_PTR)
static int
int
__pmDecodeValueSet(__pmPDU *pdubuf, int pdulen, __pmPDU *data, char *pduend,
@@ -290,7 +419,7 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
@@ -336,7 +465,7 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
int i, j;
/*
* Note: all sizes are in units of bytes ... beware that 'data' is in
@ -308,7 +308,7 @@ diff -Naurp pcp-5.3.7.orig/src/libpcp/src/p_result.c pcp-5.3.7/src/libpcp/src/p_
*/
int vsize; /* size of vlist_t's in PDU buffer */
int nvsize; /* size of pmValue's after decode */
@@ -368,11 +497,10 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
@@ -414,11 +543,10 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
return PM_ERR_IPC;
}
vindex = ntohl(pduvp->value.lval);
@ -323,29 +323,45 @@ diff -Naurp pcp-5.3.7.orig/src/libpcp/src/p_result.c pcp-5.3.7/src/libpcp/src/p_
return PM_ERR_IPC;
}
pduvbp = (pmValueBlock *)&pdubuf[vindex];
@@ -387,7 +515,7 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
"__pmDecodeValueSet", i, j);
@@ -427,29 +555,31 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
check = (size_t)(pduend - (char *)pduvbp);
if (sizeof(unsigned int) > check) {
if (pmDebugOptions.pdu && pmDebugOptions.desperate)
- fprintf(stderr, "%s: Bad: pmid[%d] value[%d] "
- "second pduvp past end of "
- "PDU buffer\n",
- "__pmDecodeValueSet", i, j);
+ fprintf(stderr, "__pmDecodeValueSet: PM_ERR_IPC: pmid[%d] value[%d] second pduvp past end of PDU buffer\n",
+ i, j);
return PM_ERR_IPC;
}
-
- __ntohpmValueBlock(pduvbp);
+ __ntohpmValueBlock_hdr(pduvbp);
if (pduvbp->vlen < PM_VAL_HDR_SIZE ||
pduvbp->vlen > pdulen) {
if (pmDebugOptions.pdu && pmDebugOptions.desperate)
@@ -396,13 +524,20 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
i, j, pduvbp->vlen);
- if (pmDebugOptions.pdu && pmDebugOptions.desperate)
- fprintf(stderr, "%s: Bad: pmid[%d] value[%d] "
- "vlen=%d\n", "__pmDecodeValueSet",
- i, j, pduvbp->vlen);
+ if (pmDebugOptions.pdu)
+ fprintf(stderr, "__pmDecodeValueSet: PM_ERR_IPC: pmid[%d] value[%d] vlen=%d\n",
+ i, j, pduvbp->vlen);
return PM_ERR_IPC;
}
- if (pduvbp->vlen > (size_t)(pduend - (char *)pduvbp)) {
- if (pmDebugOptions.pdu && pmDebugOptions.desperate)
- fprintf(stderr, "%s: Bad: pmid[%d] value[%d] "
- "pduvp past end of PDU buffer\n",
- "__pmDecodeValueSet", i, j);
+ if (pduvbp->vlen > check) {
if (pmDebugOptions.pdu && pmDebugOptions.desperate)
fprintf(stderr, "%s: Bad: pmid[%d] value[%d] "
"pduvp past end of PDU buffer\n",
"__pmDecodeValueSet", i, j);
+ if (pmDebugOptions.pdu)
+ fprintf(stderr, "__pmDecodeValueSet: PM_ERR_IPC: pmid[%d] value[%d] pduvp past end of PDU buffer\n",
+ i, j);
return PM_ERR_IPC;
}
+ if (pduvbp->vtype == PM_TYPE_HIGHRES_EVENT ||
+ pduvbp->vtype == PM_TYPE_EVENT) {
+ pduvbp->vtype == PM_TYPE_EVENT) {
+ vindex = (pduvbp->vtype == PM_TYPE_HIGHRES_EVENT);
+ if (__pmEventArrayCheck(pduvbp, vindex, i, j, check) < 0)
+ return PM_ERR_IPC;
@ -354,7 +370,7 @@ diff -Naurp pcp-5.3.7.orig/src/libpcp/src/p_result.c pcp-5.3.7/src/libpcp/src/p_
vbsize += PM_PDU_SIZE_BYTES(pduvbp->vlen);
if (pmDebugOptions.pdu && pmDebugOptions.desperate) {
fprintf(stderr, " len: %d type: %d",
@@ -635,11 +770,10 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
@@ -682,11 +812,10 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
} else {
/* salvage pmValueBlocks from end of PDU */
vindex = ntohl(pduvp->value.lval);
@ -369,17 +385,16 @@ diff -Naurp pcp-5.3.7.orig/src/libpcp/src/p_result.c pcp-5.3.7/src/libpcp/src/p_
return PM_ERR_IPC;
}
pduvbp = (pmValueBlock *)&pdubuf[vindex];
@@ -654,7 +788,8 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
@@ -701,7 +830,7 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
"__pmDecodeValueSet", i, j);
return PM_ERR_IPC;
}
- __ntohpmValueBlock(pduvbp);
+
+ __ntohpmValueBlock_hdr(pduvbp);
if (pduvbp->vlen < PM_VAL_HDR_SIZE ||
pduvbp->vlen > pdulen) {
if (pmDebugOptions.pdu && pmDebugOptions.desperate)
@@ -663,13 +798,20 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
@@ -710,13 +839,20 @@ __pmDecodeValueSet(__pmPDU *pdubuf, int
i, j, pduvbp->vlen);
return PM_ERR_IPC;
}

@ -1,74 +0,0 @@
diff -Naurp pcp-5.3.7.orig/src/selinux/pcp.fc pcp-5.3.7/src/selinux/pcp.fc
--- pcp-5.3.7.orig/src/selinux/pcp.fc 2023-11-21 13:25:11.689247531 +1100
+++ pcp-5.3.7/src/selinux/pcp.fc 2023-11-21 14:12:48.080744232 +1100
@@ -1,36 +1,32 @@
-/etc/rc\.d/init\.d/pmcd -- gen_context(system_u:object_r:pcp_pmcd_initrc_exec_t,s0)
-/etc/rc\.d/init\.d/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_initrc_exec_t,s0)
-/etc/rc\.d/init\.d/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_initrc_exec_t,s0)
-/etc/rc\.d/init\.d/pmie -- gen_context(system_u:object_r:pcp_pmie_initrc_exec_t,s0)
-
/usr/bin/pmie -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
-/usr/bin/pmcd -- gen_context(system_u:object_r:pcp_pmcd_exec_t,s0)
/usr/bin/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
-/usr/bin/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_exec_t,s0)
-
/usr/libexec/pcp/bin/pmcd -- gen_context(system_u:object_r:pcp_pmcd_exec_t,s0)
-/usr/libexec/pcp/bin/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
/usr/libexec/pcp/bin/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_exec_t,s0)
-/usr/libexec/pcp/bin/pmie -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+
+/usr/libexec/pcp/bin/pmie_check -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+/usr/libexec/pcp/bin/pmie_daily -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+/usr/libexec/pcp/bin/pmie_farm -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+/usr/libexec/pcp/bin/pmlogger_check -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
+/usr/libexec/pcp/bin/pmlogger_daily -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
+/usr/libexec/pcp/bin/pmlogger_farm -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
/usr/libexec/pcp/lib/pmcd -- gen_context(system_u:object_r:pcp_pmcd_initrc_exec_t,s0)
/usr/libexec/pcp/lib/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_initrc_exec_t,s0)
/usr/libexec/pcp/lib/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_initrc_exec_t,s0)
/usr/libexec/pcp/lib/pmie -- gen_context(system_u:object_r:pcp_pmie_initrc_exec_t,s0)
-/usr/share/pcp/lib/pmie -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
-
-/usr/share/pcp/lib/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
+/usr/share/pcp/lib/pmcd -- gen_context(system_u:object_r:pcp_pmcd_exec_t,s0)
+/usr/share/pcp/lib/pmproxy -- gen_context(system_u:object_r:pcp_pmproxy_exec_t,s0)
+/usr/share/pcp/lib/pmie -- gen_context(system_u:object_r:pcp_pmie_exec_t,s0)
+/usr/share/pcp/lib/pmlogger -- gen_context(system_u:object_r:pcp_pmlogger_exec_t,s0)
/var/lib/pcp(/.*)? gen_context(system_u:object_r:pcp_var_lib_t,s0)
/var/lib/pcp/pmdas/.*/Install -- gen_context(system_u:object_r:pcp_plugin_exec_t,s0)
-/var/lib/pcp/pmdas/.*/Remove -- gen_context(system_u:object_r:pcp_plugin_exec_t,s0)
+/var/lib/pcp/pmdas/.*/Remove -- gen_context(system_u:object_r:pcp_plugin_exec_t,s0)
/var/lib/pcp/pmdas/.*/Upgrade -- gen_context(system_u:object_r:pcp_plugin_exec_t,s0)
/var/log/pcp(/.*)? gen_context(system_u:object_r:pcp_log_t,s0)
/var/run/pcp(/.*)? gen_context(system_u:object_r:pcp_var_run_t,s0)
-/var/run/pmcd\.socket -- gen_context(system_u:object_r:pcp_var_run_t,s0)
-/var/run/pmlogger\.primary\.socket -l gen_context(system_u:object_r:pcp_var_run_t,s0)
diff -Naurp pcp-5.3.7.orig/src/selinux/pcp.te pcp-5.3.7/src/selinux/pcp.te
--- pcp-5.3.7.orig/src/selinux/pcp.te 2023-11-21 13:25:11.690247528 +1100
+++ pcp-5.3.7/src/selinux/pcp.te 2023-11-21 14:13:03.855770809 +1100
@@ -279,6 +279,7 @@ allow pcp_pmlogger_t pcp_pmcd_t:unix_str
allow pcp_pmlogger_t self:unix_dgram_socket create_socket_perms;
allow pcp_pmlogger_t pcp_pmlogger_exec_t:file execute_no_trans;
+allow pcp_pmlogger_t ldconfig_exec_t:file { execute execute_no_trans };
dontaudit pcp_pmlogger_t self:cap_userns { sys_ptrace };
@@ -313,6 +314,10 @@ optional_policy(`
rpm_script_signal(pcp_pmlogger_t)
')
+optional_policy(`
+ userdom_setattr_user_home_content_files(pcp_pmlogger_t)
+')
+
########################################
#
# pcp_plugin local policy

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save