import libhugetlbfs-2.21-17.el8

c8 imports/c8/libhugetlbfs-2.21-17.el8
CentOS Sources 4 years ago committed by MSVSphere Packaging Team
commit f07c959dde

1
.gitignore vendored

@ -0,0 +1 @@
SOURCES/libhugetlbfs-2.21.tar.gz

@ -0,0 +1 @@
8ed79a12d07be1e858ef4e0148ab1f4115094ef6 SOURCES/libhugetlbfs-2.21.tar.gz

@ -0,0 +1,100 @@
From d42f467a923dfc09309acb7a83b42e3285fbd8f4 Mon Sep 17 00:00:00 2001
Message-Id: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:28 +0530
Subject: [RHEL7 PATCH 01/31] tests: Add utility to check for a minimum number
of online cpus
This adds a test utility to check if a minimum number (N)
of online cpus are available. If available, this will also
provide a list of the first N online cpus.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/hugetests.h | 1 +
tests/testutils.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 49 insertions(+)
diff --git a/tests/hugetests.h b/tests/hugetests.h
index 8b1d8d9..e3179e6 100644
--- a/tests/hugetests.h
+++ b/tests/hugetests.h
@@ -43,6 +43,7 @@ extern char *test_name;
void check_free_huge_pages(int nr_pages_needed);
void check_must_be_root(void);
void check_hugetlb_shm_group(void);
+void check_online_cpus(int[], int);
void test_init(int argc, char *argv[]);
int test_addr_huge(void *p);
unsigned long long get_mapping_page_size(void *p);
diff --git a/tests/testutils.c b/tests/testutils.c
index 6298370..2b47547 100644
--- a/tests/testutils.c
+++ b/tests/testutils.c
@@ -33,6 +33,8 @@
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/stat.h>
+#include <sys/sysinfo.h>
+#include <sys/types.h>
#include <sys/mman.h>
#include <fcntl.h>
@@ -80,6 +82,52 @@ void check_hugetlb_shm_group(void)
CONFIG("Do not have permission to use SHM_HUGETLB");
}
+#define SYSFS_CPU_ONLINE_FMT "/sys/devices/system/cpu/cpu%d/online"
+
+void check_online_cpus(int online_cpus[], int nr_cpus_needed)
+{
+ char cpu_state, path_buf[64];
+ int total_cpus, cpu_idx, fd, ret, i;
+
+ total_cpus = get_nprocs_conf();
+ cpu_idx = 0;
+
+ if (get_nprocs() < nr_cpus_needed)
+ CONFIG("Atleast online %d cpus are required", nr_cpus_needed);
+
+ for (i = 0; i < total_cpus && cpu_idx < nr_cpus_needed; i++) {
+ errno = 0;
+ sprintf(path_buf, SYSFS_CPU_ONLINE_FMT, i);
+ fd = open(path_buf, O_RDONLY);
+ if (fd < 0) {
+ /* If 'online' is absent, the cpu cannot be offlined */
+ if (errno == ENOENT) {
+ online_cpus[cpu_idx] = i;
+ cpu_idx++;
+ continue;
+ } else {
+ FAIL("Unable to open %s: %s", path_buf,
+ strerror(errno));
+ }
+ }
+
+ ret = read(fd, &cpu_state, 1);
+ if (ret < 1)
+ FAIL("Unable to read %s: %s", path_buf,
+ strerror(errno));
+
+ if (cpu_state == '1') {
+ online_cpus[cpu_idx] = i;
+ cpu_idx++;
+ }
+
+ close(fd);
+ }
+
+ if (cpu_idx < nr_cpus_needed)
+ CONFIG("Atleast %d online cpus were not found", nr_cpus_needed);
+}
+
void __attribute__((weak)) cleanup(void)
{
}
--
1.8.3.1

@ -0,0 +1,68 @@
From 192ac21a3c057c5dedca4cdd1bf700f38992030c Mon Sep 17 00:00:00 2001
Message-Id: <192ac21a3c057c5dedca4cdd1bf700f38992030c.1496667760.git.jstancek@redhat.com>
From: Jan Stancek <jstancek@redhat.com>
Date: Thu, 1 Jun 2017 09:48:41 +0200
Subject: [PATCH v2 1/2] testutils: fix range_is_mapped()
It doesn't return correct value when tested region is
completely inside existing mapping:
+--------------------------------------+
^ start ^ end
+----------------+
^ low ^ high
Rather than testing for all combinations of 2 regions overlapping,
flip the condition and test if they don't overlap.
Signed-off-by: Jan Stancek <jstancek@redhat.com>
---
tests/testutils.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
This is a v2 series for:
https://groups.google.com/forum/#!topic/libhugetlbfs/tAsWjuJ7x8k
diff --git a/tests/testutils.c b/tests/testutils.c
index 629837045465..f42852e1938b 100644
--- a/tests/testutils.c
+++ b/tests/testutils.c
@@ -190,19 +190,29 @@ int range_is_mapped(unsigned long low, unsigned long high)
return -1;
}
- if ((start >= low) && (start < high)) {
+ /*
+ * (existing mapping) (tested region)
+ * +----------------+ +.......+
+ * ^start ^end ^ low ^high
+ */
+ if (low >= end) {
fclose(f);
- return 1;
+ return 0;
}
- if ((end >= low) && (end < high)) {
+
+ /*
+ * (tested region) (existing mapping)
+ * +.....+ +----------------+
+ * ^low ^high ^ start ^end
+ */
+ if (high <= start) {
fclose(f);
- return 1;
+ return 0;
}
-
}
fclose(f);
- return 0;
+ return 1;
}
/*
--
1.8.3.1

@ -0,0 +1,52 @@
commit e7b3e6817421763eee37cb35ef8627bdd37a3690
Author: Chunyu Hu <chuhu@redhat.com>
Date: Wed May 6 18:59:43 2020 +0800
Wait child with os.wait()
os.popen() is an async method, it fork() child and exec() in child
with the arg command. If it's slow enough, main process could get
incomplete result.
During our test, we find python3 is faster than python2,after coverting
to python3, 'groupadd' usually doesn't finish when the followed step iter
on groups, we would get '-1' as the groupid and lead to error.
To reproduce it with python3:
/root/rpmbuild/BUILD/libhugetlbfs-2.21/huge_page_setup_helper.py <<EOF
128
hugepages
hugepages root
EOF
...
hugeadm:ERROR: Invalid group specification (-1)
...
Signed-off-by: Chunyu Hu <chuhu@redhat.com>
diff --git a/huge_page_setup_helper.py b/huge_page_setup_helper.py
index a9ba2bf..01fc8dc 100755
--- a/huge_page_setup_helper.py
+++ b/huge_page_setup_helper.py
@@ -169,6 +169,10 @@ else:
os.popen("/usr/sbin/groupadd %s" % userGroupReq)
else:
print("/usr/sbin/groupadd %s" % userGroupReq)
+
+ # wait for the groupadd finish
+ os.wait()
+
groupNames = os.popen("/usr/bin/getent group %s" % userGroupReq).readlines()
for line in groupNames:
curGroupName = line.split(":")[0]
@@ -244,6 +248,9 @@ else:
print("/usr/bin/hugeadm --set-recommended-shmmax")
print()
+# wait for the hugepage setups finish
+os.wait()
+
# figure out what that shmmax value we just set was
hugeadmexplain = os.popen("/usr/bin/hugeadm --explain 2>/dev/null").readlines()
for line in hugeadmexplain:

@ -0,0 +1,173 @@
From a329008ea54056f0ed9d85cc3d0d9129474f7cd5 Mon Sep 17 00:00:00 2001
Message-Id: <a329008ea54056f0ed9d85cc3d0d9129474f7cd5.1496667760.git.jstancek@redhat.com>
In-Reply-To: <192ac21a3c057c5dedca4cdd1bf700f38992030c.1496667760.git.jstancek@redhat.com>
References: <192ac21a3c057c5dedca4cdd1bf700f38992030c.1496667760.git.jstancek@redhat.com>
From: Jan Stancek <jstancek@redhat.com>
Date: Thu, 1 Jun 2017 10:00:47 +0200
Subject: [PATCH v2 2/2] stack_grow_into_huge: don't clobber existing mappings
This test allocates hugepages above stack using MAP_FIXED and then
grows stack while it can. If a MAP_FIXED request is successful,
then mapping established by mmap() replaces any previous mappings
for the process' pages. If there's anything important there (libc
mappings), these can get clobbered as described here:
http://marc.info/?l=linux-arm-kernel&m=149036535209519&w=2.
This patch is creating extra stack for new child and maps
one hugepage above it. The search starts at heap until it
hits existing mapping or until it can successfully map
huge page and stack below it.
If suitable place can't be found, test PASSes as inconclusive.
Signed-off-by: Jan Stancek <jstancek@redhat.com>
---
tests/stack_grow_into_huge.c | 101 ++++++++++++++++++++++++++++---------------
1 file changed, 67 insertions(+), 34 deletions(-)
This is a v2 series for:
https://groups.google.com/forum/#!topic/libhugetlbfs/tAsWjuJ7x8k
diff --git a/tests/stack_grow_into_huge.c b/tests/stack_grow_into_huge.c
index a380da063264..9b8ea8d74887 100644
--- a/tests/stack_grow_into_huge.c
+++ b/tests/stack_grow_into_huge.c
@@ -25,6 +25,7 @@
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/wait.h>
+#include <sched.h>
#include <hugetlbfs.h>
#include "hugetests.h"
@@ -54,7 +55,10 @@
#define STACK_ALLOCATION_SIZE (16*1024*1024)
#endif
-void do_child(void *stop_address)
+#define MIN_CHILD_STACK (2*1024*1024)
+#define STEP (STACK_ALLOCATION_SIZE)
+
+int do_child(void *stop_address)
{
struct rlimit r;
volatile int *x;
@@ -71,15 +75,68 @@ void do_child(void *stop_address)
x = alloca(STACK_ALLOCATION_SIZE);
*x = 1;
} while ((void *)x >= stop_address);
+
+ return 0;
+}
+
+void *try_setup_stack_and_huge(int fd, void *hint)
+{
+ void *mmap_address, *stack_start, *tmp;
+ long hpage_size = gethugepagesize();
+ void *stop = alloca(1);
+
+ /*
+ * Find a spot for huge page. We start at "hint" and
+ * keep going down in "STEP" increments until we find
+ * a place where we can mmap huge page.
+ */
+ mmap_address = PALIGN(hint, hpage_size);
+ do {
+ mmap_address += STEP;
+ if (mmap_address >= stop)
+ return NULL;
+ if (range_is_mapped((unsigned long)mmap_address,
+ (unsigned long)mmap_address + hpage_size))
+ continue;
+ tmp = mmap(mmap_address, hpage_size,
+ PROT_READ|PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
+ } while (tmp == MAP_FAILED);
+ verbose_printf("huge page is at: %p-%p\n",
+ mmap_address, mmap_address + hpage_size);
+
+ /*
+ * Find a spot for stack below huge page. We start at end of
+ * huge page we found above and keep trying to mmap stack
+ * below. Because stack needs to grow into hugepage, we
+ * also have to make sure nothing is mapped in gap between
+ * stack and huge page.
+ */
+ stack_start = mmap_address + hpage_size;
+ do {
+ if (range_is_mapped((unsigned long)stack_start,
+ (unsigned long)stack_start + STEP + MIN_CHILD_STACK)) {
+ verbose_printf("range is mapped: %p-%p\n", stack_start,
+ stack_start + STEP + MIN_CHILD_STACK);
+ munmap(mmap_address, hpage_size);
+ return NULL;
+ }
+ stack_start += STEP;
+ if (stack_start >= stop)
+ return NULL;
+ tmp = mmap(stack_start, MIN_CHILD_STACK, PROT_READ|PROT_WRITE,
+ MAP_GROWSDOWN|MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
+ } while (tmp == MAP_FAILED);
+
+ verbose_printf("Child stack is at %p-%p\n",
+ stack_start, stack_start + MIN_CHILD_STACK);
+ return stack_start + MIN_CHILD_STACK;
}
int main(int argc, char *argv[])
{
int fd, pid, s, ret;
struct rlimit r;
- char *b;
- long hpage_size = gethugepagesize();
- void *stack_address, *mmap_address, *heap_address;
+ void *stack_end;
test_init(argc, argv);
@@ -94,37 +151,13 @@ int main(int argc, char *argv[])
if (fd < 0)
CONFIG("Couldn't get hugepage fd");
- stack_address = alloca(0);
- heap_address = sbrk(0);
+ stack_end = try_setup_stack_and_huge(fd, sbrk(0));
+ if (!stack_end)
+ PASS_INCONCLUSIVE();
- /*
- * paranoia: start mapping two hugepages below the start of the stack,
- * in case the alignment would cause us to map over something if we
- * only used a gap of one hugepage.
- */
- mmap_address = PALIGN(stack_address - 2 * hpage_size, hpage_size);
-
- do {
- b = mmap(mmap_address, hpage_size, PROT_READ|PROT_WRITE,
- MAP_FIXED|MAP_SHARED, fd, 0);
- mmap_address -= hpage_size;
- /*
- * if we get all the way down to the heap, stop trying
- */
- if (mmap_address <= heap_address)
- break;
- } while (b == MAP_FAILED);
-
- if (b == MAP_FAILED)
- FAIL("mmap: %s", strerror(errno));
-
- if ((pid = fork()) < 0)
- FAIL("fork: %s", strerror(errno));
-
- if (pid == 0) {
- do_child(mmap_address);
- exit(0);
- }
+ pid = clone(do_child, stack_end, SIGCHLD, 0);
+ if (pid < 0)
+ FAIL("clone: %s", strerror(errno));
ret = waitpid(pid, &s, 0);
if (ret == -1)
--
1.8.3.1

@ -0,0 +1,74 @@
From 865d160eff7e6c69968d0196272030f206dd3430 Mon Sep 17 00:00:00 2001
Message-Id: <865d160eff7e6c69968d0196272030f206dd3430.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:29 +0530
Subject: [RHEL7 PATCH 02/31] tests: slbpacaflush: Use online cpus only
This ensures that the two cpus between which the thread is
migrated are online. For offline cpus, sched_setaffinity()
will always fail.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/slbpacaflush.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/tests/slbpacaflush.c b/tests/slbpacaflush.c
index 8893c4d..765e069 100644
--- a/tests/slbpacaflush.c
+++ b/tests/slbpacaflush.c
@@ -57,29 +57,32 @@ int main(int argc, char *argv[])
int fd;
void *p;
volatile unsigned long *q;
- int err;
+ int online_cpus[2], err;
cpu_set_t cpu0, cpu1;
test_init(argc, argv);
hpage_size = check_hugepagesize();
+ check_online_cpus(online_cpus, 2);
fd = hugetlbfs_unlinked_fd();
if (fd < 0)
FAIL("hugetlbfs_unlinked_fd()");
CPU_ZERO(&cpu0);
- CPU_SET(0, &cpu0);
+ CPU_SET(online_cpus[0], &cpu0);
CPU_ZERO(&cpu1);
- CPU_SET(1, &cpu1);
+ CPU_SET(online_cpus[1], &cpu1);
err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu0);
if (err != 0)
- CONFIG("sched_setaffinity(cpu0): %s", strerror(errno));
+ CONFIG("sched_setaffinity(cpu%d): %s", online_cpus[0],
+ strerror(errno));
err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu1);
if (err != 0)
- CONFIG("sched_setaffinity(): %s", strerror(errno));
+ CONFIG("sched_setaffinity(cpu%d): %s", online_cpus[1],
+ strerror(errno));
p = mmap(NULL, hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (p == MAP_FAILED)
@@ -87,7 +90,8 @@ int main(int argc, char *argv[])
err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu0);
if (err != 0)
- CONFIG("sched_setaffinity(cpu0): %s", strerror(errno));
+ CONFIG("sched_setaffinity(cpu%d): %s", online_cpus[0],
+ strerror(errno));
q = (volatile unsigned long *)(p + getpagesize());
*q = 0xdeadbeef;
--
1.8.3.1

@ -0,0 +1,70 @@
From 4ba9722027d9aeec173866b5ca12282268594f35 Mon Sep 17 00:00:00 2001
Message-Id: <4ba9722027d9aeec173866b5ca12282268594f35.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:30 +0530
Subject: [RHEL7 PATCH 03/31] tests: alloc-instantiate-race: Use online cpus
only
This ensures that the two processes or threads between which
the race condition is introduced are always running on online
cpus. For offline cpus, sched_setaffinity() will always fail.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/alloc-instantiate-race.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/tests/alloc-instantiate-race.c b/tests/alloc-instantiate-race.c
index 7f84e8a..f55e2f7 100644
--- a/tests/alloc-instantiate-race.c
+++ b/tests/alloc-instantiate-race.c
@@ -121,7 +121,9 @@ static void run_race(void *syncarea, int race_type)
int fd;
void *p;
int status1, status2;
- int ret;
+ int online_cpus[2], ret;
+
+ check_online_cpus(online_cpus, 2);
memset(syncarea, 0, sizeof(*trigger1) + sizeof(*trigger2));
trigger1 = syncarea;
@@ -143,13 +145,13 @@ static void run_race(void *syncarea, int race_type)
if (child1 < 0)
FAIL("fork(): %s", strerror(errno));
if (child1 == 0)
- proc_racer(p, 0, trigger1, trigger2);
+ proc_racer(p, online_cpus[0], trigger1, trigger2);
child2 = fork();
if (child2 < 0)
FAIL("fork(): %s", strerror(errno));
if (child2 == 0)
- proc_racer(p, 1, trigger2, trigger1);
+ proc_racer(p, online_cpus[1], trigger2, trigger1);
/* wait() calls */
ret = waitpid(child1, &status1, 0);
@@ -175,13 +177,13 @@ static void run_race(void *syncarea, int race_type)
} else {
struct racer_info ri1 = {
.p = p,
- .cpu = 0,
+ .cpu = online_cpus[0],
.mytrigger = trigger1,
.othertrigger = trigger2,
};
struct racer_info ri2 = {
.p = p,
- .cpu = 1,
+ .cpu = online_cpus[1],
.mytrigger = trigger2,
.othertrigger = trigger1,
};
--
1.8.3.1

@ -0,0 +1,52 @@
From 2f38664f81e1877f81b16ed327b540d69d175a5b Mon Sep 17 00:00:00 2001
Message-Id: <2f38664f81e1877f81b16ed327b540d69d175a5b.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:31 +0530
Subject: [RHEL7 PATCH 04/31] tests: task-size-overrun: Make test faster on
powerpc64
As of now, powerpc64 supports 64TB, 128TB, 512TB, 1PB, 2PB and
4PB user address space sizes with 4TB being the default for the
newer kernels. With the relatively conservative increments that
this test uses to find the task size, it takes a very long time
but this can be made faster by also increasing the increment
factor in steps of the different supported task sizes.
Fixes: 02df38e ("Defined task size value to be 512T if it is more that 64Tb.")
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/task-size-overrun.c | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/tests/task-size-overrun.c b/tests/task-size-overrun.c
index dc9ce0e..914ef65 100644
--- a/tests/task-size-overrun.c
+++ b/tests/task-size-overrun.c
@@ -83,8 +83,18 @@ static unsigned long find_task_size(void)
munmap(p, getpagesize());
addr += getpagesize();
#if defined(__powerpc64__)
- if (addr > (1UL << 46) && addr < (1UL << 49))
- addr = 1UL << 49;
+ if (addr > (1UL << 46) && addr < (1UL << 47))
+ addr = 1UL << 47; /* 64TB */
+ else if (addr > (1UL << 47) && addr < (1UL << 48))
+ addr = 1UL << 48; /* 128TB */
+ else if (addr > (1UL << 48) && addr < (1UL << 49))
+ addr = 1UL << 49; /* 512TB */
+ else if (addr > (1UL << 49) && addr < (1UL << 50))
+ addr = 1UL << 50; /* 1PB */
+ else if (addr > (1UL << 50) && addr < (1UL << 51))
+ addr = 1UL << 51; /* 2PB */
+ else if (addr > (1UL << 51) && addr < (1UL << 52))
+ addr = 1UL << 52; /* 4PB */
#endif
#if defined(__s390x__)
if (addr > (1UL << 42) && addr < (1UL << 53))
--
1.8.3.1

@ -0,0 +1,50 @@
From 2a63852ac9358cdddce9944aade1d443f686246a Mon Sep 17 00:00:00 2001
Message-Id: <2a63852ac9358cdddce9944aade1d443f686246a.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:32 +0530
Subject: [RHEL7 PATCH 05/31] tests: truncate-above-4GB: Skip if truncation
point is not aligned
Attempting ftruncate() on a hugetlbfs file descriptor requires
the truncation point to be aligned to the huge page size. So,
this test is not applicable for huge page sizes greater that
are either greater than or not a factor of the truncation point.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/truncate_above_4GB.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/tests/truncate_above_4GB.c b/tests/truncate_above_4GB.c
index 4c427fc..2e29931 100644
--- a/tests/truncate_above_4GB.c
+++ b/tests/truncate_above_4GB.c
@@ -79,6 +79,13 @@ int main(int argc, char *argv[])
page_size = getpagesize();
hpage_size = check_hugepagesize();
+ truncate_point = FOURGIG;
+
+ if (hpage_size > truncate_point)
+ CONFIG("Huge page size is too large");
+
+ if (truncate_point % hpage_size > 0)
+ CONFIG("Truncation point is not aligned to huge page size");
check_free_huge_pages(3);
@@ -86,7 +93,6 @@ int main(int argc, char *argv[])
if (fd < 0)
FAIL("hugetlbfs_unlinked_fd()");
- truncate_point = FOURGIG;
buggy_offset = truncate_point / (hpage_size / page_size);
buggy_offset = ALIGN(buggy_offset, hpage_size);
--
1.8.3.1

@ -0,0 +1,49 @@
From 65c07c0f64ef1c97f9aea80d0c8470417e377a6a Mon Sep 17 00:00:00 2001
Message-Id: <65c07c0f64ef1c97f9aea80d0c8470417e377a6a.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:33 +0530
Subject: [RHEL7 PATCH 06/31] tests: map-high-truncate-2: Skip if truncation
point is not aligned
Attempting ftruncate() on a hugetlbfs file descriptor requires
the truncation point to be aligned to the huge page size. So,
this test is not applicable for huge page sizes greater that
are either greater than or not a factor of the truncation point.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/map_high_truncate_2.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/tests/map_high_truncate_2.c b/tests/map_high_truncate_2.c
index 2a2560b..fc44a13 100644
--- a/tests/map_high_truncate_2.c
+++ b/tests/map_high_truncate_2.c
@@ -56,6 +56,7 @@
#define TRUNCATE_POINT 0x60000000UL
#endif
#define HIGH_ADDR 0xa0000000UL
+#define FOURGIG ((off64_t)0x100000000ULL)
int main(int argc, char *argv[])
{
@@ -69,6 +70,12 @@ int main(int argc, char *argv[])
hpage_size = check_hugepagesize();
+ if (hpage_size > TRUNCATE_POINT)
+ CONFIG("Huge page size is too large");
+
+ if (TRUNCATE_POINT % hpage_size)
+ CONFIG("Truncation point is not aligned to huge page size");
+
check_free_huge_pages(4);
fd = hugetlbfs_unlinked_fd();
--
1.8.3.1

@ -0,0 +1,130 @@
From e472e326d31a125e21453d75cb46bba9cf387952 Mon Sep 17 00:00:00 2001
Message-Id: <e472e326d31a125e21453d75cb46bba9cf387952.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:34 +0530
Subject: [RHEL7 PATCH 07/31] morecore: tests: Run tests only for default huge
page size
The morecore tests (malloc, malloc-manysmall and heapshrink)
are not linked against libhugetlbfs and cannot invoke library
functions like gethugepagesize(). Hence, run these tests only
for the kernel's default huge page size.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 81 +++++++++++++++++++++++++++++++++---------------------
1 file changed, 49 insertions(+), 32 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 3c95a03..70c5a6a 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -362,6 +362,16 @@ def do_test_with_rlimit(rtype, limit, cmd, bits=None, **env):
do_test(cmd, bits, **env)
resource.setrlimit(rtype, oldlimit)
+def do_test_with_pagesize(pagesize, cmd, bits=None, **env):
+ """
+ Run a test case, testing with a specified huge page size and
+ each indicated word size.
+ """
+ if bits == None:
+ bits = wordsizes
+ for b in (set(bits) & wordsizes_by_pagesize[pagesize]):
+ run_test(pagesize, b, cmd, **env)
+
def do_elflink_test(cmd, **env):
"""
Run an elflink test case, skipping known-bad configurations.
@@ -563,15 +573,22 @@ def functional_tests():
do_test("private")
do_test("fork-cow")
do_test("direct")
- do_test("malloc")
- do_test("malloc", LD_PRELOAD="libhugetlbfs.so", HUGETLB_MORECORE="yes")
- do_test("malloc", LD_PRELOAD="libhugetlbfs.so", HUGETLB_MORECORE="yes",
- HUGETLB_RESTRICT_EXE="unknown:none")
- do_test("malloc", LD_PRELOAD="libhugetlbfs.so", HUGETLB_MORECORE="yes",
- HUGETLB_RESTRICT_EXE="unknown:malloc")
- do_test("malloc_manysmall")
- do_test("malloc_manysmall", LD_PRELOAD="libhugetlbfs.so",
- HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "malloc")
+ do_test_with_pagesize(system_default_hpage_size, "malloc",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "malloc",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_RESTRICT_EXE="unknown:none")
+ do_test_with_pagesize(system_default_hpage_size, "malloc",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_RESTRICT_EXE="unknown:malloc")
+ do_test_with_pagesize(system_default_hpage_size, "malloc_manysmall")
+ do_test_with_pagesize(system_default_hpage_size, "malloc_manysmall",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes")
# After upstream commit: (glibc-2.25.90-688-gd5c3fafc43) glibc has a
# new per-thread caching mechanism that will NOT allow heapshrink test to
@@ -584,29 +601,29 @@ def functional_tests():
# program context (not even with a constructor function), and the tunable
# is only evaluated during malloc() initialization.
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libheapshrink.so")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libhugetlbfs.so",
- HUGETLB_MORECORE="yes")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
- HUGETLB_MORECORE="yes")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libheapshrink.so",
- HUGETLB_MORECORE="yes",
- HUGETLB_MORECORE_SHRINK="yes")
- do_test("heapshrink",
- GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
- LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
- HUGETLB_MORECORE="yes",
- HUGETLB_MORECORE_SHRINK="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libheapshrink.so")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libhugetlbfs.so",
+ HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
+ HUGETLB_MORECORE="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libheapshrink.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_MORECORE_SHRINK="yes")
+ do_test_with_pagesize(system_default_hpage_size, "heapshrink",
+ GLIBC_TUNABLES="glibc.malloc.tcache_count=0",
+ LD_PRELOAD="libhugetlbfs.so libheapshrink.so",
+ HUGETLB_MORECORE="yes",
+ HUGETLB_MORECORE_SHRINK="yes")
do_test("heap-overflow", HUGETLB_VERBOSE="1", HUGETLB_MORECORE="yes")
--
1.8.3.1

@ -0,0 +1,53 @@
From 4ba60a2f5c3f5405c599caddc5a124c5781c9beb Mon Sep 17 00:00:00 2001
Message-Id: <4ba60a2f5c3f5405c599caddc5a124c5781c9beb.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:35 +0530
Subject: [RHEL7 PATCH 08/31] hugeutils: Make writing a ulong to a file more
reliable
This makes file_write_ulong() more reliable in terms of error
detection for certain cases like writing an invalid value to
a file under procfs or sysfs. Also, using fprintf() does not
guarantee that errno would be set under such circumstances.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
hugeutils.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/hugeutils.c b/hugeutils.c
index 60488e8..fc64946 100644
--- a/hugeutils.c
+++ b/hugeutils.c
@@ -219,17 +219,18 @@ long file_read_ulong(char *file, const char *tag)
int file_write_ulong(char *file, unsigned long val)
{
- FILE *f;
- int ret;
+ int fd, ret, buflen;
+ char buf[20];
- f = fopen(file, "w");
- if (!f) {
+ fd = open(file, O_WRONLY);
+ if (fd < 0) {
ERROR("Couldn't open %s: %s\n", file, strerror(errno));
return -1;
}
- ret = fprintf(f, "%lu", val);
- fclose(f);
+ buflen = sprintf(buf, "%lu", val);
+ ret = write(fd, buf, buflen);
+ close(fd);
return ret > 0 ? 0 : -1;
}
--
1.8.3.1

@ -0,0 +1,59 @@
From a4879cc4f88b560958950d9277ba0df487b145f4 Mon Sep 17 00:00:00 2001
Message-Id: <a4879cc4f88b560958950d9277ba0df487b145f4.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:36 +0530
Subject: [RHEL7 PATCH 09/31] tests: Add utility to check if huge pages are
gigantic
This adds a test utility to check if the currently selected
huge page size corresponds to that of a gigantic page.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/hugetests.h | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/tests/hugetests.h b/tests/hugetests.h
index e3179e6..bc4e16a 100644
--- a/tests/hugetests.h
+++ b/tests/hugetests.h
@@ -22,6 +22,7 @@
#include <errno.h>
#include <string.h>
+#include <unistd.h>
#include "libhugetlbfs_privutils.h"
#include "libhugetlbfs_testprobes.h"
@@ -136,6 +137,24 @@ static inline long check_hugepagesize()
return __hpage_size;
}
+static inline void check_if_gigantic_page(void)
+{
+ long page_size, hpage_size, max_order;
+ FILE *fp;
+
+ page_size = sysconf(_SC_PAGESIZE);
+ hpage_size = gethugepagesize();
+ fp = popen("cat /proc/pagetypeinfo | "
+ "awk '/Free pages count per migrate type at order/ "
+ "{print $NF}'", "r");
+ if (!fp || fscanf(fp, "%lu", &max_order) < 0)
+ FAIL("Couldn't determine max page allocation order");
+
+ pclose(fp);
+ if (hpage_size > ((1 << max_order) * page_size))
+ CONFIG("Gigantic pages are not supported");
+}
+
int using_system_hpage_size(const char *mount);
/* WARNING: Racy -- use for test cases only! */
--
1.8.3.1

@ -0,0 +1,49 @@
From 2d41ec367199f9f9d4b7caf00c3be25030a7a873 Mon Sep 17 00:00:00 2001
Message-Id: <2d41ec367199f9f9d4b7caf00c3be25030a7a873.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:37 +0530
Subject: [RHEL7 PATCH 10/31] tests: counters: Skip if using gigantic huge
pages
The kernel does not allow setting an overcommit limit for
gigantic huge pages, i.e. any page size beyond the max page
allocation order. For such cases, nr_overcommit_hugepages
cannot be modified and is always zero. So, skip this test
as mmap() using a hugetlbfs file descriptor will fail when
both nr_hugepages and nr_overcommit_hugepages are zero.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/counters.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/tests/counters.c b/tests/counters.c
index 0284809..34b1ef3 100644
--- a/tests/counters.c
+++ b/tests/counters.c
@@ -83,7 +83,17 @@ void verify_dynamic_pool_support(void)
saved_oc_hugepages = get_huge_page_counter(hpage_size, HUGEPAGES_OC);
if (saved_oc_hugepages < 0)
FAIL("Kernel appears to lack dynamic hugetlb pool support");
- set_nr_overcommit_hugepages(hpage_size, 10);
+ if (set_nr_overcommit_hugepages(hpage_size, 10) < 0) {
+ /*
+ * In case writing to nr_overcommit_hugepages failed with the
+ * reason that it was an attempt to write an invalid argument,
+ * it might be because the page size corresponds to gigantic
+ * pages which do not support this feature.
+ */
+ if (errno == EINVAL)
+ check_if_gigantic_page();
+ FAIL("Couldn't set overcommit limit");
+ }
}
void bad_value(int line, const char *name, long expect, long actual)
--
1.8.3.1

@ -0,0 +1,72 @@
From 8cc33a134681892a71a4f67397bb13a541bb463e Mon Sep 17 00:00:00 2001
Message-Id: <8cc33a134681892a71a4f67397bb13a541bb463e.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:38 +0530
Subject: [RHEL7 PATCH 11/31] hugeutils: Add utility to check if slices are
supported
This adds an utility to check if the current processor
architecture supports slices. Slices are used to divide
up a virtual address space and put certain restrictions
like on powerpc64 with Hash MMU where one can have only
one page size per slice.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
hugeutils.c | 21 +++++++++++++++++++++
libhugetlbfs_privutils.h | 3 +++
2 files changed, 24 insertions(+)
diff --git a/hugeutils.c b/hugeutils.c
index fc64946..e573622 100644
--- a/hugeutils.c
+++ b/hugeutils.c
@@ -800,6 +800,27 @@ int hpool_sizes(struct hpage_pool *pools, int pcnt)
return (which < pcnt) ? which : -1;
}
+int arch_has_slice_support(void)
+{
+#ifdef __powerpc64__
+ char mmu_type[16];
+ FILE *fp;
+
+ fp = popen("cat /proc/cpuinfo | grep MMU | awk '{ print $3}'", "r");
+ if (!fp || fscanf(fp, "%s", mmu_type) < 0) {
+ ERROR("Failed to determine MMU type\n");
+ abort();
+ }
+
+ pclose(fp);
+ return strcmp(mmu_type, "Hash") == 0;
+#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
/*
* If we have a default page size then we support hugepages.
*/
diff --git a/libhugetlbfs_privutils.h b/libhugetlbfs_privutils.h
index 149e42f..8b12fed 100644
--- a/libhugetlbfs_privutils.h
+++ b/libhugetlbfs_privutils.h
@@ -53,6 +53,9 @@ int set_nr_hugepages(long pagesize, unsigned long val);
#define set_nr_overcommit_hugepages __pu_set_nr_overcommit_hugepages
int set_nr_overcommit_hugepages(long pagesize, unsigned long val);
+#define arch_has_slice_support __pu_arch_has_slice_support
+int arch_has_slice_support(void);
+
#define kernel_has_hugepages __pu_kernel_has_hugepages
int kernel_has_hugepages(void);
--
1.8.3.1

@ -0,0 +1,38 @@
From 1329c4f5f4d201724d379d43dc5d516d1c9356dc Mon Sep 17 00:00:00 2001
Message-Id: <1329c4f5f4d201724d379d43dc5d516d1c9356dc.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Thu, 15 Aug 2019 13:08:39 +0530
Subject: [RHEL7 PATCH 12/31] tests: brk-near-huge: Fix next chunk computation
for powerpc64
For powerpc64, the use of slices applies only to Hash MMU.
Hence, when determining the next chunk size, ensure that
the address is aligned to the slice size for Hash MMU and
the huge page size otherwise.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/brk_near_huge.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tests/brk_near_huge.c b/tests/brk_near_huge.c
index f6d1e07..c9662f4 100644
--- a/tests/brk_near_huge.c
+++ b/tests/brk_near_huge.c
@@ -40,6 +40,9 @@
#ifdef __powerpc64__
void *next_chunk(void *addr)
{
+ if (!arch_has_slice_support())
+ return PALIGN(addr, gethugepagesize());
+
if ((unsigned long)addr < 0x100000000UL)
/* 256M segments below 4G */
return PALIGN(addr, 0x10000000UL);
--
1.8.3.1

@ -0,0 +1,143 @@
From 9fe6594da91e86280c9d71877a91cee83aaedae6 Mon Sep 17 00:00:00 2001
Message-Id: <9fe6594da91e86280c9d71877a91cee83aaedae6.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:30 +0530
Subject: [RHEL7 PATCH 13/31] elflink: Fix program header address calculation
This fixes the virtual address calculation for the ELF program
header. Based on the man page of dl_iterate_phdr(), the location
of a particular program header in virtual memory should be the
sum of the base address of the shared object and the segment's
virtual address.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
elflink.c | 40 ++++++++++++++++++++++++----------------
1 file changed, 24 insertions(+), 16 deletions(-)
diff --git a/elflink.c b/elflink.c
index ffc84dd..1150bde 100644
--- a/elflink.c
+++ b/elflink.c
@@ -374,7 +374,8 @@ static int get_shared_file_name(struct seg_info *htlb_seg_info, char *file_path)
}
/* Find the .dynamic program header */
-static int find_dynamic(Elf_Dyn **dyntab, const Elf_Phdr *phdr, int phnum)
+static int find_dynamic(Elf_Dyn **dyntab, const ElfW(Addr) addr,
+ const Elf_Phdr *phdr, int phnum)
{
int i = 1;
@@ -382,7 +383,7 @@ static int find_dynamic(Elf_Dyn **dyntab, const Elf_Phdr *phdr, int phnum)
++i;
}
if (phdr[i].p_type == PT_DYNAMIC) {
- *dyntab = (Elf_Dyn *)phdr[i].p_vaddr;
+ *dyntab = (Elf_Dyn *)(addr + phdr[i].p_vaddr);
return 0;
} else {
DEBUG("No dynamic segment found\n");
@@ -473,7 +474,8 @@ ElfW(Word) __attribute__ ((weak)) plt_extrasz(ElfW(Dyn) *dyntab)
* include these initialized variables in our copy.
*/
-static void get_extracopy(struct seg_info *seg, const Elf_Phdr *phdr, int phnum)
+static void get_extracopy(struct seg_info *seg, const ElfW(Addr) addr,
+ const Elf_Phdr *phdr, int phnum)
{
Elf_Dyn *dyntab; /* dynamic segment table */
Elf_Sym *symtab = NULL; /* dynamic symbol table */
@@ -492,7 +494,7 @@ static void get_extracopy(struct seg_info *seg, const Elf_Phdr *phdr, int phnum)
goto bail2;
/* Find dynamic program header */
- ret = find_dynamic(&dyntab, phdr, phnum);
+ ret = find_dynamic(&dyntab, addr, phdr, phnum);
if (ret < 0)
goto bail;
@@ -608,7 +610,8 @@ static unsigned long hugetlb_prev_slice_end(unsigned long addr)
/*
* Store a copy of the given program header
*/
-static int save_phdr(int table_idx, int phnum, const ElfW(Phdr) *phdr)
+static int save_phdr(int table_idx, int phnum, const ElfW(Addr) addr,
+ const ElfW(Phdr) *phdr)
{
int prot = 0;
@@ -626,7 +629,7 @@ static int save_phdr(int table_idx, int phnum, const ElfW(Phdr) *phdr)
if (phdr->p_flags & PF_X)
prot |= PROT_EXEC;
- htlb_seg_table[table_idx].vaddr = (void *) phdr->p_vaddr;
+ htlb_seg_table[table_idx].vaddr = (void *)(addr + phdr->p_vaddr);
htlb_seg_table[table_idx].filesz = phdr->p_filesz;
htlb_seg_table[table_idx].memsz = phdr->p_memsz;
htlb_seg_table[table_idx].prot = prot;
@@ -634,8 +637,8 @@ static int save_phdr(int table_idx, int phnum, const ElfW(Phdr) *phdr)
INFO("Segment %d (phdr %d): %#0lx-%#0lx (filesz=%#0lx) "
"(prot = %#0x)\n", table_idx, phnum,
- (unsigned long) phdr->p_vaddr,
- (unsigned long) phdr->p_vaddr + phdr->p_memsz,
+ (unsigned long) addr + phdr->p_vaddr,
+ (unsigned long) addr + phdr->p_vaddr + phdr->p_memsz,
(unsigned long) phdr->p_filesz, (unsigned int) prot);
return 0;
@@ -718,16 +721,19 @@ int parse_elf_normal(struct dl_phdr_info *info, size_t size, void *data)
seg_psize = segment_requested_page_size(&info->dlpi_phdr[i]);
if (seg_psize != page_size) {
- if (save_phdr(htlb_num_segs, i, &info->dlpi_phdr[i]))
+ if (save_phdr(htlb_num_segs, i, info->dlpi_addr,
+ &info->dlpi_phdr[i]))
return 1;
get_extracopy(&htlb_seg_table[htlb_num_segs],
- &info->dlpi_phdr[0], info->dlpi_phnum);
+ info->dlpi_addr, info->dlpi_phdr,
+ info->dlpi_phnum);
htlb_seg_table[htlb_num_segs].page_size = seg_psize;
htlb_num_segs++;
}
- start = ALIGN_DOWN(info->dlpi_phdr[i].p_vaddr, seg_psize);
- end = ALIGN(info->dlpi_phdr[i].p_vaddr +
- info->dlpi_phdr[i].p_memsz, seg_psize);
+ start = ALIGN_DOWN(info->dlpi_addr +
+ info->dlpi_phdr[i].p_vaddr, seg_psize);
+ end = ALIGN(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr +
+ info->dlpi_phdr[i].p_memsz, seg_psize);
segments[num_segs].page_size = seg_psize;
segments[num_segs].start = start;
@@ -771,8 +777,9 @@ int parse_elf_partial(struct dl_phdr_info *info, size_t size, void *data)
* in this forced way won't violate any contiguity
* constraints.
*/
- vaddr = hugetlb_next_slice_start(info->dlpi_phdr[i].p_vaddr);
- gap = vaddr - info->dlpi_phdr[i].p_vaddr;
+ vaddr = hugetlb_next_slice_start(info->dlpi_addr +
+ info->dlpi_phdr[i].p_vaddr);
+ gap = vaddr - (info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
slice_end = hugetlb_slice_end(vaddr);
/*
* we should stop remapping just before the slice
@@ -795,7 +802,8 @@ int parse_elf_partial(struct dl_phdr_info *info, size_t size, void *data)
}
memsz = hugetlb_prev_slice_end(vaddr + memsz) - vaddr;
- if (save_phdr(htlb_num_segs, i, &info->dlpi_phdr[i]))
+ if (save_phdr(htlb_num_segs, i, info->dlpi_addr,
+ &info->dlpi_phdr[i]))
return 1;
/*
--
1.8.3.1

@ -0,0 +1,64 @@
From 5022d5f86d02882a11700825258ecdba8dee683c Mon Sep 17 00:00:00 2001
Message-Id: <5022d5f86d02882a11700825258ecdba8dee683c.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:31 +0530
Subject: [RHEL7 PATCH 14/31] elflink: powerpc64: Use slices based on MMU type
For powerpc64, the concept of slices is not applicable to the
recently introduced Radix MMU. So, slice boundaries should be
calculated based on the MMU type.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
elflink.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/elflink.c b/elflink.c
index 1150bde..a6bd44c 100644
--- a/elflink.c
+++ b/elflink.c
@@ -569,6 +569,10 @@ bail2:
*/
static unsigned long hugetlb_slice_start(unsigned long addr)
{
+ if (!arch_has_slice_support()) {
+ return ALIGN_DOWN(addr, gethugepagesize());
+ }
+
#if defined(__powerpc64__)
if (addr < SLICE_LOW_TOP)
return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
@@ -578,13 +582,15 @@ static unsigned long hugetlb_slice_start(unsigned long addr)
return ALIGN_DOWN(addr, SLICE_HIGH_SIZE);
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
-#else
- return ALIGN_DOWN(addr, gethugepagesize());
#endif
}
static unsigned long hugetlb_slice_end(unsigned long addr)
{
+ if (!arch_has_slice_support()) {
+ return ALIGN_UP(addr, gethugepagesize()) - 1;
+ }
+
#if defined(__powerpc64__)
if (addr < SLICE_LOW_TOP)
return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
@@ -592,8 +598,6 @@ static unsigned long hugetlb_slice_end(unsigned long addr)
return ALIGN_UP(addr, SLICE_HIGH_SIZE) - 1;
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
-#else
- return ALIGN_UP(addr, gethugepagesize()) - 1;
#endif
}
--
1.8.3.1

@ -0,0 +1,62 @@
From adb3feea5dde087d7bb8017e5b8da2da548473bf Mon Sep 17 00:00:00 2001
Message-Id: <adb3feea5dde087d7bb8017e5b8da2da548473bf.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:32 +0530
Subject: [RHEL7 PATCH 15/31] ld.hugetlbfs: powerpc64: Add support for
different huge page sizes
This ensures that the page and slice sizes are determined by
looking at the default huge page size and MMU type rather than
having them hardcoded.
This is important because powerpc64 supports different huge
page sizes based on the MMU type. Hash MMU supports 16MB and
16GB whereas Radix MMU supports 2MB and 1GB.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
ld.hugetlbfs | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/ld.hugetlbfs b/ld.hugetlbfs
index 388f7b4..6ee8238 100755
--- a/ld.hugetlbfs
+++ b/ld.hugetlbfs
@@ -105,8 +105,16 @@ fi
MB=$((1024*1024))
case "$EMU" in
-elf32ppclinux|elf64ppc) HPAGE_SIZE=$((16*$MB)) SLICE_SIZE=$((256*$MB)) ;;
-elf64lppc) HPAGE_SIZE=$((16*$MB)) SLICE_SIZE=$((256*$MB)) ;;
+elf32ppclinux) HPAGE_SIZE=$((16*$MB)) SLICE_SIZE=$((256*$MB)) ;;
+elf64ppc|elf64lppc)
+ hpage_kb=$(cat /proc/meminfo | grep Hugepagesize: | awk '{print $2}')
+ MMU_TYPE=$(cat /proc/cpuinfo | grep MMU | awk '{ print $3}')
+ HPAGE_SIZE=$((hpage_kb * 1024))
+ if [ "$MMU_TYPE" == "Hash" ] ; then
+ SLICE_SIZE=$((256*$MB))
+ else
+ SLICE_SIZE=$HPAGE_SIZE
+ fi ;;
elf_i386|elf_x86_64) HPAGE_SIZE=$((4*$MB)) SLICE_SIZE=$HPAGE_SIZE ;;
elf_s390|elf64_s390) HPAGE_SIZE=$((1*$MB)) SLICE_SIZE=$HPAGE_SIZE ;;
armelf*_linux_eabi|aarch64elf*|aarch64linux*)
@@ -124,6 +132,11 @@ if [ "$HTLB_ALIGN" == "slice" ]; then
case "$EMU" in
armelf*_linux_eabi|aarch64elf*|aarch64linux*) HTLBOPTS="$HTLBOPTS -Ttext-segment=$SLICE_SIZE" ;;
elf_i386) HTLBOPTS="$HTLBOPTS -Ttext-segment=0x08000000" ;;
+ elf64ppc|elf64lppc)
+ if [ "$MMU_TYPE" == "Hash" ] ; then
+ printf -v TEXTADDR "%x" "$SLICE_SIZE"
+ HTLBOPTS="$HTLBOPTS -Ttext-segment=$TEXTADDR"
+ fi ;;
esac
fi
--
1.8.3.1

@ -0,0 +1,102 @@
From 4dfdd96a6b4bd019210c9a44de42369aae772b98 Mon Sep 17 00:00:00 2001
Message-Id: <4dfdd96a6b4bd019210c9a44de42369aae772b98.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Wed, 12 Jun 2019 12:34:33 +0530
Subject: [RHEL7 PATCH 16/31] elflink: tests: Run tests only for default huge
page size
The elflink tests (linkhuge, linkhuge-nofd, linkhuge-rw and
linkshare) are usually linked in a way that ensures that the
ELF segment boundaries are aligned to the kernel's default
huge page size. Hence, run these tests only for the kernel's
default huge page size as the program segments will not be
remapped otherwise.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <emunson@mgebm.net>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 41 ++++++++++++++++++++++++++---------------
1 file changed, 26 insertions(+), 15 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 70c5a6a..94000ea 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -376,10 +376,11 @@ def do_elflink_test(cmd, **env):
"""
Run an elflink test case, skipping known-bad configurations.
"""
- for p in pagesizes:
- for b in wordsizes_by_pagesize[p]:
- if b in linkhuge_wordsizes: run_test(p, b, cmd, **env)
- else: skip_test(p, b, cmd, **env)
+ for b in wordsizes_by_pagesize[system_default_hpage_size]:
+ if b in linkhuge_wordsizes:
+ run_test(system_default_hpage_size, b, cmd, **env)
+ else:
+ skip_test(system_default_hpage_size, b, cmd, **env)
def elflink_test(cmd, **env):
"""
@@ -388,9 +389,10 @@ def elflink_test(cmd, **env):
Test various combinations of: preloading libhugetlbfs, B vs. BDT link
modes, minimal copying on or off, and disabling segment remapping.
"""
- do_test(cmd, **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd, **env)
# Test we don't blow up if not linked for hugepage
- do_test(cmd, LD_PRELOAD="libhugetlbfs.so", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ LD_PRELOAD="libhugetlbfs.so", **env)
# Only run custom ldscript tests when -l option is set
if not custom_ldscripts:
@@ -412,16 +414,23 @@ def elflink_rw_test(cmd, **env):
Test various combinations of: remapping modes and minimal copy on or off.
"""
# Basic tests: None, Read-only, Write-only, Read-Write, exlicit disable
- do_test(cmd, **env)
- do_test(cmd, HUGETLB_ELFMAP="R", **env)
- do_test(cmd, HUGETLB_ELFMAP="W", **env)
- do_test(cmd, HUGETLB_ELFMAP="RW", **env)
- do_test(cmd, HUGETLB_ELFMAP="no", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd, **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP="R", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP="W", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP="RW", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP="no", **env)
# Test we don't blow up if HUGETLB_MINIMAL_COPY is disabled
- do_test(cmd, HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="R", **env)
- do_test(cmd, HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="W", **env)
- do_test(cmd, HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="RW", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="R", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="W", **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_MINIMAL_COPY="no", HUGETLB_ELFMAP="RW", **env)
def elfshare_test(cmd, **env):
"""
@@ -458,7 +467,9 @@ def elflink_rw_and_share_test(cmd, **env):
clear_hpages()
for mode in ("R", "W", "RW"):
for i in range(2):
- do_test(cmd, HUGETLB_ELFMAP=mode, HUGETLB_SHARE=repr(i), **env)
+ do_test_with_pagesize(system_default_hpage_size, cmd,
+ HUGETLB_ELFMAP=mode, HUGETLB_SHARE=repr(i),
+ **env)
clear_hpages()
def setup_shm_sysctl(limit):
--
1.8.3.1

@ -0,0 +1,73 @@
From 421dbc6d9dfc66f249dde787a69327d22979ca74 Mon Sep 17 00:00:00 2001
Message-Id: <421dbc6d9dfc66f249dde787a69327d22979ca74.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Fri, 16 Aug 2019 11:45:07 +0530
Subject: [RHEL7 PATCH 17/31] tests: Update utility to get free and total huge
pages by size
This makes the utilities to get the number of free and total
huge pages multi-size aware. If a page size is specified, they
will return counts corresponding to that. Otherwise, they will
return counts for the kernel's default huge page size.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 30 ++++++++++++++++++------------
1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 94000ea..f19024f 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -172,26 +172,32 @@ def results_summary():
print_per_size("Strange test result", R["strange"])
print "**********"
-def free_hpages():
+def free_hpages(size=None):
"""
- Return the number of free huge pages.
+ Return the number of free huge pages for a given size. If size is not
+ passed, use the default huge page size.
- Parse /proc/meminfo to obtain the number of free huge pages for
- the default page size.
- XXX: This function is not multi-size aware yet.
+ Parse /sys/kernel/mm/hugepages/hugepages-<size-in-kB>/free_hugepages to
+ obtain the number of free huge pages for the given page size.
"""
- (rc, out) = bash("grep 'HugePages_Free:' /proc/meminfo | cut -f2 -d:")
+ if size == None: size = system_default_hpage_size
+ size_kb = size / 1024
+ cmd = "cat /sys/kernel/mm/hugepages/hugepages-%dkB/free_hugepages" % size_kb
+ (rc, out) = bash(cmd)
return (rc, int(out))
-def total_hpages():
+def total_hpages(size=None):
"""
- Return the total number of huge pages in the pool.
+ Return the total number of huge pages in the pool for a given size. If
+ size is not passed, use the default huge page size.
- Parse /proc/meminfo to obtain the number of huge pages for the default
- page size.
- XXX: This function is not multi-size aware yet.
+ Parse /sys/kernel/mm/hugepages/hugepages-<size-in-kB>/nr_hugepages to
+ obtain the number of huge pages for the given page size.
"""
- (rc, out) = bash("grep 'HugePages_Total:' /proc/meminfo | cut -f2 -d:")
+ if size == None: size = system_default_hpage_size
+ size_kb = size / 1024
+ cmd = "cat /sys/kernel/mm/hugepages/hugepages-%dkB/nr_hugepages" % size_kb
+ (rc, out) = bash(cmd)
return (rc, int(out))
def hpage_size():
--
1.8.3.1

@ -0,0 +1,54 @@
From d228c0e688e7a0771d30457d21b38d745cea63bf Mon Sep 17 00:00:00 2001
Message-Id: <d228c0e688e7a0771d30457d21b38d745cea63bf.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Fri, 16 Aug 2019 11:45:08 +0530
Subject: [RHEL7 PATCH 18/31] mmap: tests: Run tests with correct huge page
count
This ensures that the mmap-gettest and mmap-cow tests are run
with the correct count of free huge pages. Previously, it was
always using the free page count for the default huge page size
for all huge page sizes. Since these counts can differ, trying
to get more pages via mmap() than what is available in the pool
can make these tests fail.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index f19024f..b132da2 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -697,14 +697,19 @@ def stress_tests():
# Don't update NRPAGES every time like above because we want to catch the
# failures that happen when the kernel doesn't release all of the huge pages
# after a stress test terminates
- (rc, nr_pages) = free_hpages()
+ nr_pages = {p: free_hpages(p)[1] for p in pagesizes}
- do_test(("mmap-gettest", repr(iterations), repr(nr_pages)))
+ for p in pagesizes:
+ cmd = ("mmap-gettest", repr(iterations), repr(nr_pages[p]))
+ do_test_with_pagesize(p, cmd)
- # mmap-cow needs a hugepages for each thread plus one extra
- do_test(("mmap-cow", repr(nr_pages-1), repr(nr_pages)))
+ for p in pagesizes:
+ # mmap-cow needs a hugepage for each thread plus one extra
+ cmd = ("mmap-cow", repr(nr_pages[p]-1), repr(nr_pages[p]))
+ do_test_with_pagesize(p, cmd)
(rc, tot_pages) = total_hpages()
+ nr_pages = nr_pages[system_default_hpage_size]
limit = system_default_hpage_size * tot_pages
threads = 10 # Number of threads for shm-fork
--
1.8.3.1

@ -0,0 +1,32 @@
From 4326f49e3c3246443b52f319cefbc3d296e09e64 Mon Sep 17 00:00:00 2001
Message-Id: <4326f49e3c3246443b52f319cefbc3d296e09e64.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:43 +1000
Subject: [RHEL7 PATCH 19/31] Be explicit about using Python2 in the test
script
Since Python2 is now end-of-life, distros are increasingly not having bare
"python" refer to the Python2 interpreter.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index b132da2..721c1af 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/python2
import subprocess
import types
--
1.8.3.1

@ -0,0 +1,216 @@
From 85b75e22bf685948f417044676de42f2da66a902 Mon Sep 17 00:00:00 2001
Message-Id: <85b75e22bf685948f417044676de42f2da66a902.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:44 +1000
Subject: [RHEL7 PATCH 20/31] Switch test runner script to print function
This is the more modern Python style, and reduces difference to Python3.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 84 ++++++++++++++++++++++++++++--------------------------
1 file changed, 44 insertions(+), 40 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 721c1af..47eb183 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -1,5 +1,7 @@
#! /usr/bin/python2
+from __future__ import print_function
+
import subprocess
import types
import os
@@ -60,7 +62,7 @@ def snapshot_pool_state():
def run_test_prog(bits, pagesize, cmd, **env):
if paranoid_pool_check:
beforepool = snapshot_pool_state()
- print "Pool state: %s" % str(beforepool)
+ print("Pool state: %s" % str(beforepool))
local_env = os.environ.copy()
local_env.update(env)
@@ -83,9 +85,9 @@ def run_test_prog(bits, pagesize, cmd, **env):
if paranoid_pool_check:
afterpool = snapshot_pool_state()
if afterpool != beforepool:
- print >>sys.stderr, "Hugepage pool state not preserved!"
- print >>sys.stderr, "BEFORE: %s" % str(beforepool)
- print >>sys.stderr, "AFTER: %s" % str(afterpool)
+ print("Hugepage pool state not preserved!", file=sys.stderr)
+ print("BEFORE: %s" % str(beforepool), file=sys.stderr)
+ print("AFTER: %s" % str(afterpool), file=sys.stderr)
sys.exit(98)
return (rc, out)
@@ -143,22 +145,24 @@ def print_per_size(title, values):
Print the results of a given result type on one line. The results for all
page sizes and word sizes are written in a table format.
"""
- print "*%20s: " % title,
+ print("*%20s: " % title, end=" ")
for sz in pagesizes:
- print "%4s %4s " % (values[sz][32], values[sz][64]),
- print
+ print("%4s %4s " % (values[sz][32], values[sz][64]), end="")
+ print()
def results_summary():
"""
Display a summary of the test results
"""
- print "********** TEST SUMMARY"
- print "*%21s" % "",
- for p in pagesizes: print "%-13s " % pretty_page_size(p),
- print
- print "*%21s" % "",
- for p in pagesizes: print "32-bit 64-bit ",
- print
+ print("********** TEST SUMMARY")
+ print("*%21s" % "", end=" ")
+ for p in pagesizes:
+ print("%-13s " % pretty_page_size(p), end="")
+ print()
+ print("*%21s" % "", end=" ")
+ for p in pagesizes:
+ print("32-bit 64-bit ", end="")
+ print()
print_per_size("Total testcases", R["total"])
print_per_size("Skipped", R["skip"])
@@ -170,7 +174,7 @@ def results_summary():
print_per_size("Unexpected PASS", R["xpass"])
print_per_size("Test not present", R["nofile"])
print_per_size("Strange test result", R["strange"])
- print "**********"
+ print("**********")
def free_hpages(size=None):
"""
@@ -276,13 +280,13 @@ def check_hugetlbfs_path():
okbits.append(b)
mounts.append(out)
if len(okbits) == 0:
- print "run_tests.py: No mountpoints available for page size %s" % \
- pretty_page_size(p)
+ print("run_tests.py: No mountpoints available for page size %s" %
+ pretty_page_size(p))
wordsizes_by_pagesize[p] = set()
continue
for b in wordsizes - set(okbits):
- print "run_tests.py: The %i bit word size is not compatible with " \
- "%s pages" % (b, pretty_page_size(p))
+ print("run_tests.py: The %i bit word size is not compatible with " \
+ "%s pages" % (b, pretty_page_size(p)))
wordsizes_by_pagesize[p] = set(okbits)
def check_linkhuge_tests():
@@ -304,10 +308,10 @@ def check_linkhuge_tests():
def print_cmd(pagesize, bits, cmd, env):
if env:
- print ' '.join(['%s=%s' % (k, v) for k, v in env.items()]),
+ print(' '.join(['%s=%s' % (k, v) for k, v in env.items()]), end=" ")
if type(cmd) != types.StringType:
cmd = ' '.join(cmd)
- print "%s (%s: %i):\t" % (cmd, pretty_page_size(pagesize), bits),
+ print("%s (%s: %i):\t" % (cmd, pretty_page_size(pagesize), bits), end="")
sys.stdout.flush()
def run_test(pagesize, bits, cmd, **env):
@@ -327,7 +331,7 @@ def run_test(pagesize, bits, cmd, **env):
print_cmd(pagesize, bits, cmd, env)
(rc, out) = run_test_prog(bits, pagesize, cmd, **env)
- print out
+ print(out)
R["total"][pagesize][bits] += 1
if rc == 0: R["pass"][pagesize][bits] += 1
@@ -348,7 +352,7 @@ def skip_test(pagesize, bits, cmd, **env):
R["total"][pagesize][bits] += 1
R["skip"][pagesize][bits] += 1
print_cmd(pagesize, bits, cmd, env)
- print "SKIPPED"
+ print("SKIPPED")
def do_test(cmd, bits=None, **env):
"""
@@ -495,7 +499,7 @@ def setup_shm_sysctl(limit):
fh = open(f, "w")
fh.write(`limit`)
fh.close()
- print "set shmmax limit to %s" % limit
+ print("set shmmax limit to %s" % limit)
return sysctls
def restore_shm_sysctl(sysctls):
@@ -725,17 +729,17 @@ def stress_tests():
do_test("fallocate_stress.sh")
def print_help():
- print "Usage: %s [options]" % sys.argv[0]
- print "Options:"
- print " -v \t Verbose output."
- print " -V \t Highly verbose output."
- print " -f \t Force all tests."
- print " -t <set> Run test set, allowed are func and stress."
- print " -b <wordsize> Define wordsizes to be used. "
- print " -p <pagesize> Define the page sizes to be used."
- print " -c \t Do a paranoid pool check."
- print " -l \t Use custom ld scripts."
- print " -h \t This help."
+ print("Usage: %s [options]" % sys.argv[0])
+ print("Options:")
+ print(" -v \t Verbose output.")
+ print(" -V \t Highly verbose output.")
+ print(" -f \t Force all tests.")
+ print(" -t <set> Run test set, allowed are func and stress.")
+ print(" -b <wordsize> Define wordsizes to be used. ")
+ print(" -p <pagesize> Define the page sizes to be used.")
+ print(" -c \t Do a paranoid pool check.")
+ print(" -l \t Use custom ld scripts.")
+ print(" -h \t This help.")
sys.exit(0)
def main():
@@ -752,7 +756,7 @@ def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "vVft:b:p:c:lh")
except getopt.GetoptError, err:
- print str(err)
+ print(str(err))
sys.exit(1)
for opt, arg in opts:
if opt == "-v":
@@ -781,8 +785,8 @@ def main():
if len(pagesizes) == 0: pagesizes = get_pagesizes()
if len(pagesizes) == 0:
- print "Unable to find available page sizes, are you sure hugetlbfs"
- print "is mounted and there are available huge pages?"
+ print("Unable to find available page sizes, are you sure hugetlbfs")
+ print("is mounted and there are available huge pages?")
return 1
setup_env(env_override, env_defaults)
@@ -790,8 +794,8 @@ def main():
(rc, system_default_hpage_size) = hpage_size()
if rc != 0:
- print "Unable to find system default hugepage size."
- print "Is hugepage supported included in this kernel?"
+ print("Unable to find system default hugepage size.")
+ print("Is hugepage supported included in this kernel?")
return 1
check_hugetlbfs_path()
--
1.8.3.1

@ -0,0 +1,43 @@
From 5246d996e621274a2cc22282451bb60c10d59227 Mon Sep 17 00:00:00 2001
Message-Id: <5246d996e621274a2cc22282451bb60c10d59227.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:45 +1000
Subject: [RHEL7 PATCH 21/31] Remove backtick operator from test runner script
The `` operator doesn't exist in Python3, so remove it to avoid future
porting problems.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 47eb183..13a404a 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -226,7 +226,7 @@ def clear_hpages():
cleaned up automatically and must be removed to free up the huge pages.
"""
for mount in mounts:
- dir = mount + "/elflink-uid-" + `os.getuid()`
+ dir = mount + "/elflink-uid-" + repr(os.getuid())
for root, dirs, files in os.walk(dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
@@ -497,7 +497,7 @@ def setup_shm_sysctl(limit):
sysctls[f] = fh.read()
fh.close()
fh = open(f, "w")
- fh.write(`limit`)
+ fh.write(repr(limit))
fh.close()
print("set shmmax limit to %s" % limit)
return sysctls
--
1.8.3.1

@ -0,0 +1,35 @@
From 2f88d3a2b29f181e744cc59f5e0889588f67588f Mon Sep 17 00:00:00 2001
Message-Id: <2f88d3a2b29f181e744cc59f5e0889588f67588f.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:46 +1000
Subject: [RHEL7 PATCH 22/31] tests: Avoid old-style except syntax in the test
runner script
The "except Foo as bar" syntax is the modern style and will be easier to
port to Python3.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 13a404a..f812923 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -755,7 +755,7 @@ def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "vVft:b:p:c:lh")
- except getopt.GetoptError, err:
+ except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
for opt, arg in opts:
--
1.8.3.1

@ -0,0 +1,35 @@
From e5f91fcc3e6bd0a610e47e51891f4c1669d2f8b1 Mon Sep 17 00:00:00 2001
Message-Id: <e5f91fcc3e6bd0a610e47e51891f4c1669d2f8b1.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:47 +1000
Subject: [RHEL7 PATCH 23/31] tests: Avoid explicit type() comparison in runner
script
Using isinstance() is the more modern idiom, and won't cause complications
in porting to Python3.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index f812923..e2025fe 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -309,7 +309,7 @@ def check_linkhuge_tests():
def print_cmd(pagesize, bits, cmd, env):
if env:
print(' '.join(['%s=%s' % (k, v) for k, v in env.items()]), end=" ")
- if type(cmd) != types.StringType:
+ if not isinstance(cmd, str):
cmd = ' '.join(cmd)
print("%s (%s: %i):\t" % (cmd, pretty_page_size(pagesize), bits), end="")
sys.stdout.flush()
--
1.8.3.1

@ -0,0 +1,62 @@
From 3482dcfe74102da1e2d95d8adbc29940c06b1fef Mon Sep 17 00:00:00 2001
Message-Id: <3482dcfe74102da1e2d95d8adbc29940c06b1fef.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:48 +1000
Subject: [RHEL7 PATCH 24/31] tests: Explicitly decode subprocess output
The output we get from subprocesses is logically a sequence of bytes, but
we want to treat it as Python strings, which means decoding it into Unicode
based on some encoding.
In Python2 we can get away with skipping that step, but in Python3 we won't
be able to. So, to get ready, add an explicit decode() step, using the
system default encoding (probably UTF-8 in most cases).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index e2025fe..79e0385 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -48,7 +48,7 @@ def bash(cmd):
except KeyboardInterrupt:
# Abort and mark this a strange test result
return (127, "")
- out = p.stdout.read().strip()
+ out = p.stdout.read().decode().strip()
return (rc, out)
def snapshot_pool_state():
@@ -80,7 +80,7 @@ def run_test_prog(bits, pagesize, cmd, **env):
return (None, "")
except OSError as e:
return (-e.errno, "")
- out = p.stdout.read().strip()
+ out = p.stdout.read().decode().strip()
if paranoid_pool_check:
afterpool = snapshot_pool_state()
@@ -247,9 +247,11 @@ def get_pagesizes():
sizes = set()
out = ""
(rc, out) = bash("../obj/hugeadm --page-sizes")
- if rc != 0 or out == "": return sizes
+ if rc != 0 or out == "":
+ return sizes
- for size in out.split("\n"): sizes.add(int(size))
+ for size in out.split("\n"):
+ sizes.add(int(size))
return sizes
def get_wordsizes():
--
1.8.3.1

@ -0,0 +1,41 @@
From b9b3e12c7be2c5a9ff67b3cdaad8679dbd1fe938 Mon Sep 17 00:00:00 2001
Message-Id: <b9b3e12c7be2c5a9ff67b3cdaad8679dbd1fe938.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:49 +1000
Subject: [RHEL7 PATCH 25/31] tests: Use modern style division in runner script
This is the current norm and will reduce changes for moving to Python3.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 79e0385..2847417 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -1,6 +1,7 @@
#! /usr/bin/python2
from __future__ import print_function
+from __future__ import division
import subprocess
import types
@@ -723,7 +724,7 @@ def stress_tests():
# This is to catch off-by-ones or races in the kernel allocated that
# can make allocating all hugepages a problem
if nr_pages > 1:
- do_shm_test(("shm-fork", repr(threads), repr(nr_pages / 2)), limit)
+ do_shm_test(("shm-fork", repr(threads), repr(nr_pages // 2)), limit)
do_shm_test(("shm-fork", repr(threads), repr(nr_pages)), limit)
do_shm_test(("shm-getraw", repr(nr_pages), "/dev/full"), limit)
--
1.8.3.1

@ -0,0 +1,34 @@
From 9380eba133bcc941437e2b0d664f550f6854d63b Mon Sep 17 00:00:00 2001
Message-Id: <9380eba133bcc941437e2b0d664f550f6854d63b.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sat, 17 Aug 2019 20:59:50 +1000
Subject: [RHEL7 PATCH 26/31] tests: Switch test runner to Python3
Python2 has been end-of-life for a while now, and some distros are no
longer installing it by default.
Previous cleanups mean the script is now both valid Python2 and Python3,
so we can simply change the interpreter.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/run_tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 2847417..018264d 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -1,4 +1,4 @@
-#! /usr/bin/python2
+#! /usr/bin/python3
from __future__ import print_function
from __future__ import division
--
1.8.3.1

@ -0,0 +1,115 @@
From 96efdf51429812ec9b09f5ddb6ff24c80719e628 Mon Sep 17 00:00:00 2001
Message-Id: <96efdf51429812ec9b09f5ddb6ff24c80719e628.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: David Gibson <david@gibson.dropbear.id.au>
Date: Sun, 18 Aug 2019 16:03:21 +1000
Subject: [RHEL7 PATCH 27/31] tests: Improve TASK_SIZE detection in
task-size-overrun
task-size-overrun is designed to test kernel behaviour in some edge cases
involving making a hugepage mapping right near the address space limits.
In order to do that, it needs to know the TASK_SIZE of the kernel it's
running on.
Currently it does that with a linear search from the last extant mapping.
But with kernels supporting a very large address space that can take
prohibitively long. We've had problems with that before, resulting in some
hacks to skip a large chunk of address space.
Those hacks are dependent on platform, though, which is ugly and fragile.
Case in point, recent powerpc kernels now support a 4PiB address space,
so the logic we have there is insufficient to finish the search in
reasonable time.
To handle this in a more robust way, this replaces the linear search with
a binary search between the last extant mapping and (2^wordsize).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/task-size-overrun.c | 57 +++++++++++++++++++++++------------------------
1 file changed, 28 insertions(+), 29 deletions(-)
diff --git a/tests/task-size-overrun.c b/tests/task-size-overrun.c
index 914ef65..29b6045 100644
--- a/tests/task-size-overrun.c
+++ b/tests/task-size-overrun.c
@@ -64,45 +64,44 @@ static unsigned long find_last_mapped(void)
return end;
}
+#define ALIGN_DOWN(x,a) ((x) & ~((a) - 1))
+
static unsigned long find_task_size(void)
{
- unsigned long addr;
+ unsigned long low, high; /* PFNs */
void *p;
- addr = find_last_mapped();
- if (!addr || ((addr % getpagesize()) != 0))
- FAIL("Bogus stack end address, 0x%lx!?", addr);
+ low = find_last_mapped();
+ if (!low || ((low % getpagesize()) != 0))
+ FAIL("Bogus stack end address, 0x%lx!?", low);
+ low = low / getpagesize();
+
+ /* This sum should get us (2^(wordsize) - 2 pages) */
+ high = (unsigned long)(-2 * getpagesize()) / getpagesize();
+
+ verbose_printf("Binary searching for task size PFNs 0x%lx..0x%lx\n",
+ low, high);
+
+ while (high > low + 1) {
+ unsigned long pfn = (low + high) / 2;
+ unsigned long addr = pfn * getpagesize();
+
+ assert((pfn >= low) && (pfn <= high));
- while (addr) {
p = mmap64((void *)addr, getpagesize(), PROT_READ,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
if (p == MAP_FAILED) {
- verbose_printf("Searching map failed: %s\n", strerror(errno));
- return addr;
+ verbose_printf("Map failed at 0x%lx (%s)\n",
+ addr, strerror(errno));
+ high = pfn;
+ } else {
+ verbose_printf("Map succeeded at 0x%lx\n", addr);
+ munmap(p, getpagesize());
+ low = pfn;
}
- munmap(p, getpagesize());
- addr += getpagesize();
-#if defined(__powerpc64__)
- if (addr > (1UL << 46) && addr < (1UL << 47))
- addr = 1UL << 47; /* 64TB */
- else if (addr > (1UL << 47) && addr < (1UL << 48))
- addr = 1UL << 48; /* 128TB */
- else if (addr > (1UL << 48) && addr < (1UL << 49))
- addr = 1UL << 49; /* 512TB */
- else if (addr > (1UL << 49) && addr < (1UL << 50))
- addr = 1UL << 50; /* 1PB */
- else if (addr > (1UL << 50) && addr < (1UL << 51))
- addr = 1UL << 51; /* 2PB */
- else if (addr > (1UL << 51) && addr < (1UL << 52))
- addr = 1UL << 52; /* 4PB */
-#endif
-#if defined(__s390x__)
- if (addr > (1UL << 42) && addr < (1UL << 53))
- addr = 1UL << 53;
-#endif
}
- /* addr wrapped around */
- return 0;
+
+ return low * getpagesize();
}
int main(int argc, char *argv[])
--
1.8.3.1

@ -0,0 +1,143 @@
From 0d29e25727e5e112de48ea2d4efbd99d378ba3ed Mon Sep 17 00:00:00 2001
Message-Id: <0d29e25727e5e112de48ea2d4efbd99d378ba3ed.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Eric B Munson <eric@munsonfam.org>
Date: Sat, 17 Aug 2019 13:59:58 -0400
Subject: [RHEL7 PATCH 28/31] Remove man page for cpupcstat
This script was deleted some time ago, remove the man page.
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
man/cpupcstat.8 | 117 --------------------------------------------------------
1 file changed, 117 deletions(-)
delete mode 100644 man/cpupcstat.8
diff --git a/man/cpupcstat.8 b/man/cpupcstat.8
deleted file mode 100644
index d84a726..0000000
--- a/man/cpupcstat.8
+++ /dev/null
@@ -1,117 +0,0 @@
-.\" Hey, EMACS: -*- nroff -*-
-.\" First parameter, NAME, should be all caps
-.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
-.\" other parameters are allowed: see man(7), man(1)
-.TH CPUCPSTAT 8 "9 June, 2009"
-.\" Please adjust this date whenever revising the manpage.
-.\"
-.\" Some roff macros, for reference:
-.\" .nh disable hyphenation
-.\" .hy enable hyphenation
-.\" .ad l left justify
-.\" .ad b justify to both left and right margins
-.\" .nf disable filling
-.\" .fi enable filling
-.\" .br insert line break
-.\" .sp <n> insert n+1 empty lines
-.\" for manpage-specific macros, see man(7)
-.SH NAME
-cpupcstat \- Measure the DTLB miss rate
-.SH SYNOPSIS
-.B cpupcstat [options] [target]
-.SH DESCRIPTION
-\fBcpupcstat\fP uses oprofile to measure the DTLB miss rate of a
-specified application or the kernel. It configures oprofile to count the
-number of DTLB misses, optionally starts the \fBtarget\fP, and reports on the
-miss rate over a specified interval as \fBtarget\fP executes.
-
-The following options can be used to configure how \fBcpupcstat\fP works:
-
-.TP
-.B --vmlinux </path/to/vmlinux>
-
-This allows the user to specify where the appropriate vmlinux file is for their
-kernel. If this is not specified, /boot/vmlinux\-\`uname \-r\` will be used.
-
-.TP
-.B --delay <seconds>
-
-This allows the user to specify the reporting interval. The default is 10
-seconds.
-
-.TP
-.B --target-global
-
-Gather statistics for all processes and the kernel running in the system.
-
-.TP
-.B --target-pid <pid>
-
-This allows the user to specify the pid of a process already that is already
-running. If this option is specified, \fBtarget\fP will be ignored.
-
-.TP
-.B --real-target <real-target>
-
-Use this to specify the real name of the program to monitor if the \fBtarget\fP
-is a launcher script. When this is specified, \fBtarget\fP is executed but the
-report will be for \fBreal-target\fP.
-
-.TP
-.B --time-limit <sec>
-
-This option sets the time limit for monitoring. If this is specified the
-\fBtarget\fP or \fBpid\fP will only be monitored for \fBsec\fP seconds. The
-default continues monitoring while \fBtarget\fP or \fBpid\fP are still alive.
-
-.TP
-.B --kernel
-
-This allows the user to request DTLB miss rate data be collected for the kernel
-as well as the \fBtarget\fP.
-
-.TP
-.B --misses-per-instruction
-
-This option requests that the ratio of instructions retired per TLB miss.
-
-.TP
-.B --misses-per-cycle
-
-This option requests that the ratio of CPU cycles per TLB miss.
-
-.TP
-.B --time-servicing
-
-This option requests that the percentage of CPU cycles spent servicing TLB
-misses is displayed when \fBcpupcstat\fB exits. To use this option the cost
-in CPU cycles for a single TLB miss must be specified using either the
-\fB--cost-config\fB option or the \fBtlbmiss_cost.sh\fB script.
-
-.TP
-.B --cost-config </path/to/config>
-
-This option tells \fBcpupcstat\fB that the cost in CPU cycles of a TLB miss
-can be found in the specified file, it should be specified as:
-
-TLB_MISS_COST=XX
-
-Where XX is the cost in cycles. This option is only used with the
-\fB--time-servicing\fB option.
-
-.TP
-.B --force-oprofile
-
-\fBcpupcstat\fP prefers the perf tool for data collection, only using oprofile
-if perf is not present or supported. This option will force \fBcpupcstat\fP to
-use oprofile for data collection.
-
-.SH SEE ALSO
-.I oprofile(1)
-.I perf(1)
-.I tlbmiss_cost.sh(8)
-.br
-.SH AUTHORS
-Eric B Munson <ebmunson@us.ibm.com> is the primary author. See the documentation
-for other contributors.
-
--
1.8.3.1

@ -0,0 +1,44 @@
From 413573f442f1abbea47e54683758281e2a770a68 Mon Sep 17 00:00:00 2001
Message-Id: <413573f442f1abbea47e54683758281e2a770a68.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Eric B Munson <eric@munsonfam.org>
Date: Sat, 17 Aug 2019 13:59:05 -0400
Subject: [RHEL7 PATCH 29/31] Fix spelling of khugepaged options in hugeadm
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
hugeadm.c | 2 +-
man/hugeadm.8 | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/hugeadm.c b/hugeadm.c
index fe4211d..62e13ec 100644
--- a/hugeadm.c
+++ b/hugeadm.c
@@ -112,7 +112,7 @@ void print_usage()
CONT("should scan on each pass");
OPTION("--thp-khugepaged-scan-sleep <milliseconds>", "Time in ms to sleep between");
CONT("khugepaged passes");
- OPTION("--thp-khugepages-alloc-sleep <milliseconds>", "Time in ms for khugepaged");
+ OPTION("--thp-khugepaged-alloc-sleep <milliseconds>", "Time in ms for khugepaged");
CONT("to wait if there was a huge page allocation failure");
OPTION("--pool-pages-max <size|DEFAULT>:[+|-]<pagecount|memsize<G|M|K>>", "");
CONT("Adjust pool 'size' upper bound");
diff --git a/man/hugeadm.8 b/man/hugeadm.8
index 28de91e..6f17800 100644
--- a/man/hugeadm.8
+++ b/man/hugeadm.8
@@ -266,7 +266,7 @@ Configure the number of pages that khugepaged should scan on each pass
Configure how many milliseconds khugepaged should wait between passes
.TP
-.B --thp-khugepages-alloc-sleep <milliseconds>
+.B --thp-khugepaged-alloc-sleep <milliseconds>
Configure how many milliseconds khugepaged should wait after failing to
allocate a huge page to throttle the next attempt.
--
1.8.3.1

@ -0,0 +1,36 @@
From 1c69af9d9c53361f64c181d7b8ed7936299f9201 Mon Sep 17 00:00:00 2001
Message-Id: <1c69af9d9c53361f64c181d7b8ed7936299f9201.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Mon, 19 Aug 2019 11:46:25 +0530
Subject: [RHEL7 PATCH 30/31] Makefile: Remove cpupcstat from man page target
This fixes the man page installation target by removing
a reference to the man page for the deprecated cpupcstat
script.
Fixes: 0d29e25 ("Remove man page for cpupcstat")
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 51e41f0..a107a62 100644
--- a/Makefile
+++ b/Makefile
@@ -14,7 +14,7 @@ INSTALL_MAN3 = get_huge_pages.3 get_hugepage_region.3 gethugepagesize.3 \
gethugepagesizes.3 getpagesizes.3 hugetlbfs_find_path.3 \
hugetlbfs_test_path.3 hugetlbfs_unlinked_fd.3
INSTALL_MAN7 = libhugetlbfs.7
-INSTALL_MAN8 = hugectl.8 hugeedit.8 hugeadm.8 cpupcstat.8
+INSTALL_MAN8 = hugectl.8 hugeedit.8 hugeadm.8
LDSCRIPT_TYPES = B BDT
LDSCRIPT_DIST_ELF = elf32ppclinux elf64ppc elf_i386 elf_x86_64
INSTALL_OBJSCRIPT = ld.hugetlbfs
--
1.8.3.1

@ -0,0 +1,51 @@
From e9482399d7eee7199a4a31fe943c940f52a245ba Mon Sep 17 00:00:00 2001
Message-Id: <e9482399d7eee7199a4a31fe943c940f52a245ba.1566225007.git.aquini@redhat.com>
In-Reply-To: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
References: <d42f467a923dfc09309acb7a83b42e3285fbd8f4.1566225007.git.aquini@redhat.com>
From: Sandipan Das <sandipan@linux.ibm.com>
Date: Mon, 19 Aug 2019 14:48:38 +0530
Subject: [RHEL7 PATCH 31/31] tests: noresv-preserve-resv-page: Fix failure in
case of overcommit
This adds an additional check to see if the mapping created
with MAP_NORESERVE does not raise a SIGBUS upon being written
to because nr_overcommit_pages is set to a non-zero value and
surplus pages gets provisioned. In this case, the test should
pass.
Signed-off-by: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Eric B Munson <eric@munsonfam.org>
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/noresv-preserve-resv-page.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/tests/noresv-preserve-resv-page.c b/tests/noresv-preserve-resv-page.c
index b7b8043..b93bf36 100644
--- a/tests/noresv-preserve-resv-page.c
+++ b/tests/noresv-preserve-resv-page.c
@@ -66,6 +66,7 @@ int main(int argc, char *argv[])
{
long hpage_size;
int nr_hugepages;
+ int surp_hugepages;
int fd1, fd2, err;
char *p, *q;
struct sigaction sa = {
@@ -104,6 +105,13 @@ int main(int argc, char *argv[])
verbose_printf("Write to %p to steal reserved page\n", q);
+ surp_hugepages = get_huge_page_counter(hpage_size, HUGEPAGES_SURP);
test_write(q);
+
+ /* Provisioning succeeded because of overcommit */
+ if (get_huge_page_counter(hpage_size, HUGEPAGES_SURP) ==
+ surp_hugepages + 1)
+ PASS();
+
FAIL("Steal reserved page");
}
--
1.8.3.1

@ -0,0 +1,56 @@
diff --git a/Makefile b/Makefile
index 51e41f0..573a799 100644
--- a/Makefile
+++ b/Makefile
@@ -25,9 +25,6 @@ NODEPTARGETS=<version.h> <clean>
INSTALL = install
-LDFLAGS += -ldl
-CFLAGS ?= -O2 -g
-CFLAGS += -Wall -fPIC
CPPFLAGS += -D__LIBHUGETLBFS__
ARCH ?= $(shell uname -m | sed -e s/i.86/i386/)
@@ -279,22 +276,22 @@ snapshot: $(VERSION)
obj32/%.o: %.c
@$(VECHO) CC32 $@
@mkdir -p obj32
- $(CC32) $(CPPFLAGS) $(CFLAGS) -o $@ -c $<
+ $(CC32) $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ -c $<
obj64/%.o: %.c
@$(VECHO) CC64 $@
@mkdir -p obj64
- $(CC64) $(CPPFLAGS) $(CFLAGS) -o $@ -c $<
+ $(CC64) $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ -c $<
obj32/%.o: %.S
@$(VECHO) AS32 $@
@mkdir -p obj32
- $(CC32) $(CPPFLAGS) -o $@ -c $<
+ $(CC32) $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ -c $<
obj64/%.o: %.S
@$(VECHO) AS64 $@
@mkdir -p obj64
- $(CC64) $(CPPFLAGS) -o $@ -c $<
+ $(CC64) $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ -c $<
obj32/libhugetlbfs.a: $(LIBOBJS32)
@$(VECHO) AR32 $@
diff --git a/tests/Makefile b/tests/Makefile
index 073df96..508a6ec 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -33,8 +33,8 @@ HELPERS = get_hugetlbfs_path compare_kvers
HELPER_LIBS = libheapshrink.so
BADTOOLCHAIN = bad-toolchain.sh
-CFLAGS = -O2 -Wall -g
-CPPFLAGS = -I..
+CFLAGS ?= -O2 -Wall -g
+CPPFLAGS += -I..
STATIC_LIBHUGE = -Wl,--whole-archive -lhugetlbfs -Wl,--no-whole-archive
STATIC_LDLIBS = -Wl,--no-as-needed -lpthread
LDLIBS = $(STATIC_LDLIBS) -ldl -lhugetlbfs_privutils

@ -0,0 +1,59 @@
diff --git a/elflink.c b/elflink.c
index a6bd44c..953e843 100644
--- a/elflink.c
+++ b/elflink.c
@@ -569,36 +569,34 @@ bail2:
*/
static unsigned long hugetlb_slice_start(unsigned long addr)
{
- if (!arch_has_slice_support()) {
- return ALIGN_DOWN(addr, gethugepagesize());
- }
-
+ if (arch_has_slice_support()) {
#if defined(__powerpc64__)
- if (addr < SLICE_LOW_TOP)
- return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
- else if (addr < SLICE_HIGH_SIZE)
- return SLICE_LOW_TOP;
- else
- return ALIGN_DOWN(addr, SLICE_HIGH_SIZE);
+ if (addr < SLICE_LOW_TOP)
+ return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
+ else if (addr < SLICE_HIGH_SIZE)
+ return SLICE_LOW_TOP;
+ else
+ return ALIGN_DOWN(addr, SLICE_HIGH_SIZE);
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
- return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
+ return ALIGN_DOWN(addr, SLICE_LOW_SIZE);
#endif
+ }
+ return ALIGN_DOWN(addr, gethugepagesize());
}
static unsigned long hugetlb_slice_end(unsigned long addr)
{
- if (!arch_has_slice_support()) {
- return ALIGN_UP(addr, gethugepagesize()) - 1;
- }
-
+ if (arch_has_slice_support()) {
#if defined(__powerpc64__)
- if (addr < SLICE_LOW_TOP)
- return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
- else
- return ALIGN_UP(addr, SLICE_HIGH_SIZE) - 1;
+ if (addr < SLICE_LOW_TOP)
+ return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
+ else
+ return ALIGN_UP(addr, SLICE_HIGH_SIZE) - 1;
#elif defined(__powerpc__) && !defined(PPC_NO_SEGMENTS)
- return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
+ return ALIGN_UP(addr, SLICE_LOW_SIZE) - 1;
#endif
+ }
+ return ALIGN_UP(addr, gethugepagesize()) - 1;
}
static unsigned long hugetlb_next_slice_start(unsigned long addr)

@ -0,0 +1,304 @@
diff --git a/huge_page_setup_helper.py b/huge_page_setup_helper.py
index 43c9916..7ba0c92 100755
--- a/huge_page_setup_helper.py
+++ b/huge_page_setup_helper.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
#
# Tool to set up Linux large page support with minimal effort
@@ -14,13 +14,13 @@ debug = False
# must be executed under the root to operate
if os.geteuid() != 0:
- print "You must be root to setup hugepages!"
+ print("You must be root to setup hugepages!")
os._exit(1)
# config files we need access to
sysctlConf = "/etc/sysctl.conf"
if not os.access(sysctlConf, os.W_OK):
- print "Cannot access %s" % sysctlConf
+ print("Cannot access %s" % sysctlConf)
if debug == False:
os._exit(1)
@@ -41,7 +41,7 @@ for line in hugeadmexplain:
break
if memTotal == 0:
- print "Your version of libhugetlbfs' hugeadm utility is too old!"
+ print("Your version of libhugetlbfs' hugeadm utility is too old!")
os._exit(1)
@@ -54,7 +54,7 @@ for line in poolList:
break
if hugePageSize == 0:
- print "Aborting, cannot determine system huge page size!"
+ print("Aborting, cannot determine system huge page size!")
os._exit(1)
# Get initial sysctl settings
@@ -83,22 +83,22 @@ for line in groupNames:
# dump system config as we see it before we start tweaking it
-print "Current configuration:"
-print " * Total System Memory......: %6d MB" % memTotal
-print " * Shared Mem Max Mapping...: %6d MB" % (shmmax / (1024 * 1024))
-print " * System Huge Page Size....: %6d MB" % (hugePageSize / (1024 * 1024))
-print " * Number of Huge Pages.....: %6d" % hugePages
-print " * Total size of Huge Pages.: %6d MB" % (hugePages * hugePageSize / (1024 * 1024))
-print " * Remaining System Memory..: %6d MB" % (memTotal - (hugePages * hugePageSize / (1024 * 1024)))
-print " * Huge Page User Group.....: %s (%d)" % (hugeGIDName, hugeGID)
-print
+print("Current configuration:")
+print(" * Total System Memory......: %6d MB" % memTotal)
+print(" * Shared Mem Max Mapping...: %6d MB" % (shmmax / (1024 * 1024)))
+print(" * System Huge Page Size....: %6d MB" % (hugePageSize / (1024 * 1024)))
+print(" * Number of Huge Pages.....: %6d" % hugePages)
+print(" * Total size of Huge Pages.: %6d MB" % (hugePages * hugePageSize / (1024 * 1024)))
+print(" * Remaining System Memory..: %6d MB" % (memTotal - (hugePages * hugePageSize / (1024 * 1024))))
+print(" * Huge Page User Group.....: %s (%d)" % (hugeGIDName, hugeGID))
+print()
# ask how memory they want to allocate for huge pages
userIn = None
while not userIn:
try:
- userIn = raw_input("How much memory would you like to allocate for huge pages? "
+ userIn = input("How much memory would you like to allocate for huge pages? "
"(input in MB, unless postfixed with GB): ")
if userIn[-2:] == "GB":
userHugePageReqMB = int(userIn[0:-2]) * 1024
@@ -113,19 +113,19 @@ while not userIn:
# As a sanity safeguard, require at least 128M not be allocated to huge pages
if userHugePageReqMB > (memTotal - 128):
userIn = None
- print "Refusing to allocate %d, you must leave at least 128MB for the system" % userHugePageReqMB
+ print("Refusing to allocate %d, you must leave at least 128MB for the system" % userHugePageReqMB)
elif userHugePageReqMB < (hugePageSize / (1024 * 1024)):
userIn = None
- print "Sorry, allocation must be at least a page's worth!"
+ print("Sorry, allocation must be at least a page's worth!")
else:
break
except ValueError:
userIn = None
- print "Input must be an integer, please try again!"
+ print("Input must be an integer, please try again!")
userHugePageReqKB = userHugePageReqMB * 1024
userHugePagesReq = userHugePageReqKB / (hugePageSize / 1024)
-print "Okay, we'll try to allocate %d MB for huge pages..." % userHugePageReqMB
-print
+print("Okay, we'll try to allocate %d MB for huge pages..." % userHugePageReqMB)
+print()
# some basic user input validation
@@ -134,24 +134,24 @@ inputIsValid = False
# ask for the name of the group allowed access to huge pages
while inputIsValid == False:
foundbad = False
- userGroupReq = raw_input("What group should have access to the huge pages?"
+ userGroupReq = input("What group should have access to the huge pages?"
"(The group will be created, if need be) [hugepages]: ")
if userGroupReq is '':
userGroupReq = 'hugepages'
if userGroupReq[0].isdigit() or userGroupReq[0] == "-":
foundbad = True
- print "Group names cannot start with a number or dash, please try again!"
+ print("Group names cannot start with a number or dash, please try again!")
for char in badchars:
if char in userGroupReq:
foundbad = True
- print "Illegal characters in group name, please try again!"
+ print("Illegal characters in group name, please try again!")
break
if len(userGroupReq) > 16:
foundbad = True
- print "Group names can't be more than 16 characaters, please try again!"
+ print("Group names can't be more than 16 characaters, please try again!")
if foundbad == False:
inputIsValid = True
-print "Okay, we'll give group %s access to the huge pages" % userGroupReq
+print("Okay, we'll give group %s access to the huge pages" % userGroupReq)
# see if group already exists, use it if it does, if not, create it
@@ -163,20 +163,20 @@ for line in groupNames:
break
if userGIDReq > -1:
- print "Group %s (gid %d) already exists, we'll use it" % (userGroupReq, userGIDReq)
+ print("Group %s (gid %d) already exists, we'll use it" % (userGroupReq, userGIDReq))
else:
if debug == False:
os.popen("/usr/sbin/groupadd %s" % userGroupReq)
else:
- print "/usr/sbin/groupadd %s" % userGroupReq
+ print("/usr/sbin/groupadd %s" % userGroupReq)
groupNames = os.popen("/usr/bin/getent group %s" % userGroupReq).readlines()
for line in groupNames:
curGroupName = line.split(":")[0]
if curGroupName == userGroupReq:
userGIDReq = int(line.split(":")[2])
break
- print "Created group %s (gid %d) for huge page use" % (userGroupReq, userGIDReq)
-print
+ print("Created group %s (gid %d) for huge page use" % (userGroupReq, userGIDReq))
+print()
# basic user input validation, take 2
@@ -186,20 +186,20 @@ inputIsValid = False
# ask for user(s) that should be in the huge page access group
while inputIsValid == False:
foundbad = False
- userUsersReq = raw_input("What user(s) should have access to the huge pages (space-delimited list, users created as needed)? ")
+ userUsersReq = input("What user(s) should have access to the huge pages (space-delimited list, users created as needed)? ")
for char in badchars:
if char in userUsersReq:
foundbad = True
- print "Illegal characters in user name(s) or invalid list format, please try again!"
+ print("Illegal characters in user name(s) or invalid list format, please try again!")
break
for n in userUsersReq.split():
if len(n) > 32:
foundbad = True
- print "User names can't be more than 32 characaters, please try again!"
+ print("User names can't be more than 32 characaters, please try again!")
break
if n[0] == "-":
foundbad = True
- print "User names cannot start with a dash, please try again!"
+ print("User names cannot start with a dash, please try again!")
break
if foundbad == False:
inputIsValid = True
@@ -211,24 +211,24 @@ for hugeUser in hugePageUserList:
for line in curUserList:
curUser = line.split(":")[0]
if curUser == hugeUser:
- print "Adding user %s to huge page group" % hugeUser
+ print("Adding user %s to huge page group" % hugeUser)
userExists = True
if debug == False:
os.popen("/usr/sbin/usermod -a -G %s %s" % (userGroupReq, hugeUser))
else:
- print "/usr/sbin/usermod -a -G %s %s" % (userGroupReq, hugeUser)
+ print("/usr/sbin/usermod -a -G %s %s" % (userGroupReq, hugeUser))
if userExists == True:
break
if userExists == False:
- print "Creating user %s with membership in huge page group" % hugeUser
+ print("Creating user %s with membership in huge page group" % hugeUser)
if debug == False:
if hugeUser == userGroupReq:
os.popen("/usr/sbin/useradd %s -g %s" % (hugeUser, userGroupReq))
else:
os.popen("/usr/sbin/useradd %s -G %s" % (hugeUser, userGroupReq))
else:
- print "/usr/sbin/useradd %s -G %s" % (hugeUser, userGroupReq)
-print
+ print("/usr/sbin/useradd %s -G %s" % (hugeUser, userGroupReq))
+print()
# set values for the current running environment
@@ -238,11 +238,11 @@ if debug == False:
os.popen("/usr/bin/hugeadm --set-shm-group %d" % userGIDReq)
os.popen("/usr/bin/hugeadm --set-recommended-shmmax")
else:
- print "/usr/bin/hugeadm --pool-pages-min DEFAULT:%sM" % userHugePageReqMB
- print "/usr/bin/hugeadm --pool-pages-max DEFAULT:%sM" % userHugePageReqMB
- print "/usr/bin/hugeadm --set-shm-group %d" % userGIDReq
- print "/usr/bin/hugeadm --set-recommended-shmmax"
- print
+ print("/usr/bin/hugeadm --pool-pages-min DEFAULT:%sM" % userHugePageReqMB)
+ print("/usr/bin/hugeadm --pool-pages-max DEFAULT:%sM" % userHugePageReqMB)
+ print("/usr/bin/hugeadm --set-shm-group %d" % userGIDReq)
+ print("/usr/bin/hugeadm --set-recommended-shmmax")
+ print()
# figure out what that shmmax value we just set was
hugeadmexplain = os.popen("/usr/bin/hugeadm --explain 2>/dev/null").readlines()
@@ -258,7 +258,7 @@ if debug == False:
try:
sysctlConfLines = open(sysctlConf).readlines()
os.rename(sysctlConf, sysctlConf + ".backup")
- print("Saved original %s as %s.backup" % (sysctlConf, sysctlConf))
+ print(("Saved original %s as %s.backup" % (sysctlConf, sysctlConf)))
except:
pass
@@ -279,11 +279,11 @@ if debug == False:
fd.close()
else:
- print "Add to %s:" % sysctlConf
- print "kernel.shmmax = %d" % shmmax
- print "vm.nr_hugepages = %d" % userHugePagesReq
- print "vm.hugetlb_shm_group = %d" % userGIDReq
- print
+ print("Add to %s:" % sysctlConf)
+ print("kernel.shmmax = %d" % shmmax)
+ print("vm.nr_hugepages = %d" % userHugePagesReq)
+ print("vm.hugetlb_shm_group = %d" % userGIDReq)
+ print()
# write out limits.conf changes to persist across reboot
@@ -293,7 +293,7 @@ if debug == False:
try:
limitsConfLines = open(limitsConf).readlines()
os.rename(limitsConf, limitsConf + ".backup")
- print("Saved original %s as %s.backup" % (limitsConf, limitsConf))
+ print(("Saved original %s as %s.backup" % (limitsConf, limitsConf)))
except:
pass
@@ -319,25 +319,25 @@ if debug == False:
fd.close()
else:
- print "Add to %s:" % limitsConf
+ print("Add to %s:" % limitsConf)
for hugeUser in hugePageUserList:
- print "%s soft memlock %d" % (hugeUser, userHugePageReqKB)
- print "%s hard memlock %d" % (hugeUser, userHugePageReqKB)
+ print("%s soft memlock %d" % (hugeUser, userHugePageReqKB))
+ print("%s hard memlock %d" % (hugeUser, userHugePageReqKB))
# dump the final configuration of things now that we're done tweaking
-print
-print "Final configuration:"
-print " * Total System Memory......: %6d MB" % memTotal
+print()
+print("Final configuration:")
+print(" * Total System Memory......: %6d MB" % memTotal)
if debug == False:
- print " * Shared Mem Max Mapping...: %6d MB" % (shmmax / (1024 * 1024))
+ print(" * Shared Mem Max Mapping...: %6d MB" % (shmmax / (1024 * 1024)))
else:
# This should be what we *would* have set it to, had we actually run hugeadm --set-recommended-shmmax
- print " * Shared Mem Max Mapping...: %6d MB" % (userHugePagesReq * hugePageSize / (1024 * 1024))
-print " * System Huge Page Size....: %6d MB" % (hugePageSize / (1024 * 1024))
-print " * Available Huge Pages.....: %6d" % userHugePagesReq
-print " * Total size of Huge Pages.: %6d MB" % (userHugePagesReq * hugePageSize / (1024 * 1024))
-print " * Remaining System Memory..: %6d MB" % (memTotal - userHugePageReqMB)
-print " * Huge Page User Group.....: %s (%d)" % (userGroupReq, userGIDReq)
-print
+ print(" * Shared Mem Max Mapping...: %6d MB" % (userHugePagesReq * hugePageSize / (1024 * 1024)))
+print(" * System Huge Page Size....: %6d MB" % (hugePageSize / (1024 * 1024)))
+print(" * Available Huge Pages.....: %6d" % userHugePagesReq)
+print(" * Total size of Huge Pages.: %6d MB" % (userHugePagesReq * hugePageSize / (1024 * 1024)))
+print(" * Remaining System Memory..: %6d MB" % (memTotal - userHugePageReqMB))
+print(" * Huge Page User Group.....: %s (%d)" % (userGroupReq, userGIDReq))
+print()

@ -0,0 +1,212 @@
From 815072b9163cae73671baae448f974cc8f8a84be Mon Sep 17 00:00:00 2001
From: Rafael Aquini <aquini@redhat.com>
Date: Sun, 12 Apr 2020 21:08:01 -0400
Subject: [PATCH] tests: fix covscan SHELLCHECK_WARNING complaints
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/counters.sh | 2 +-
tests/fadvise_reserve.sh | 2 +-
tests/fallocate_align.sh | 2 +-
tests/fallocate_basic.sh | 2 +-
tests/fallocate_stress.sh | 2 +-
tests/madvise_reserve.sh | 2 +-
tests/mremap-expand-slice-collision.sh | 2 +-
tests/mremap-fixed-huge-near-normal.sh | 2 +-
tests/mremap-fixed-normal-near-huge.sh | 2 +-
tests/quota.sh | 2 +-
tests/readahead_reserve.sh | 2 +-
tests/wrapper-utils.sh | 18 +++++++++---------
12 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/tests/counters.sh b/tests/counters.sh
index e3ffabe..27bfca3 100755
--- a/tests/counters.sh
+++ b/tests/counters.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# Huge page overcommit was not available until 2.6.24
-compare_kvers `uname -r` "2.6.24"
+compare_kvers "$(uname -r)" "2.6.24"
if [ $? -eq 1 ]; then
EXP_RC=$RC_FAIL
else
diff --git a/tests/fadvise_reserve.sh b/tests/fadvise_reserve.sh
index 74496ec..ff96003 100755
--- a/tests/fadvise_reserve.sh
+++ b/tests/fadvise_reserve.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# fadvise is known broken before 2.6.30
-compare_kvers `uname -r` "2.6.30"
+compare_kvers "$(uname -r)" "2.6.30"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/fallocate_align.sh b/tests/fallocate_align.sh
index 5105151..4397cd3 100755
--- a/tests/fallocate_align.sh
+++ b/tests/fallocate_align.sh
@@ -5,7 +5,7 @@
#
# hugetlbfs fallocate support was not available until 4.3
#
-compare_kvers `uname -r` "4.3.0"
+compare_kvers "$(uname -r)" "4.3.0"
if [ $? -eq 1 ]; then
echo "FAIL no fallocate support in kernels before 4.3.0"
exit $RC_FAIL
diff --git a/tests/fallocate_basic.sh b/tests/fallocate_basic.sh
index 904dfd6..1af6196 100755
--- a/tests/fallocate_basic.sh
+++ b/tests/fallocate_basic.sh
@@ -5,7 +5,7 @@
#
# hugetlbfs fallocate support was not available until 4.3
#
-compare_kvers `uname -r` "4.3.0"
+compare_kvers "$(uname -r)" "4.3.0"
if [ $? -eq 1 ]; then
echo "FAIL no fallocate support in kernels before 4.3.0"
exit $RC_FAIL
diff --git a/tests/fallocate_stress.sh b/tests/fallocate_stress.sh
index 622084f..3b5b70a 100755
--- a/tests/fallocate_stress.sh
+++ b/tests/fallocate_stress.sh
@@ -5,7 +5,7 @@
#
# hugetlbfs fallocate support was not available until 4.3
#
-compare_kvers `uname -r` "4.3.0"
+compare_kvers "$(uname -r)" "4.3.0"
if [ $? -eq 1 ]; then
echo "FAIL no fallocate support in kernels before 4.3.0"
exit $RC_FAIL
diff --git a/tests/madvise_reserve.sh b/tests/madvise_reserve.sh
index cfe582d..eb289d6 100755
--- a/tests/madvise_reserve.sh
+++ b/tests/madvise_reserve.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# madvise is known broken before 2.6.30
-compare_kvers `uname -r` "2.6.30"
+compare_kvers "$(uname -r)" "2.6.30"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/mremap-expand-slice-collision.sh b/tests/mremap-expand-slice-collision.sh
index 8c9d98a..dd4eba3 100755
--- a/tests/mremap-expand-slice-collision.sh
+++ b/tests/mremap-expand-slice-collision.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# mremap-expand-slice-collision is known broken before 2.6.33
-compare_kvers `uname -r` "2.6.33"
+compare_kvers "$(uname -r)" "2.6.33"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/mremap-fixed-huge-near-normal.sh b/tests/mremap-fixed-huge-near-normal.sh
index 4b89c35..22fde79 100755
--- a/tests/mremap-fixed-huge-near-normal.sh
+++ b/tests/mremap-fixed-huge-near-normal.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# mremap-fixed-huge-near-normal is known broken before 2.6.33
-compare_kvers `uname -r` "2.6.33"
+compare_kvers "$(uname -r)" "2.6.33"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/mremap-fixed-normal-near-huge.sh b/tests/mremap-fixed-normal-near-huge.sh
index 9ed058f..45b8f26 100755
--- a/tests/mremap-fixed-normal-near-huge.sh
+++ b/tests/mremap-fixed-normal-near-huge.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# mremap-fixed-normal-near-huge is known broken before 2.6.33
-compare_kvers `uname -r` "2.6.33"
+compare_kvers "$(uname -r)" "2.6.33"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/quota.sh b/tests/quota.sh
index 398d442..55c764a 100755
--- a/tests/quota.sh
+++ b/tests/quota.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# There are known bugs in quota accounting prior to 2.6.24
-compare_kvers `uname -r` "2.6.24"
+compare_kvers "$(uname -r)" "2.6.24"
if [ $? -eq 1 ]; then
EXP_RC=$RC_FAIL
else
diff --git a/tests/readahead_reserve.sh b/tests/readahead_reserve.sh
index 5ab7400..861ef5a 100755
--- a/tests/readahead_reserve.sh
+++ b/tests/readahead_reserve.sh
@@ -3,7 +3,7 @@
. wrapper-utils.sh
# readahead is known broken before 2.6.30
-compare_kvers `uname -r` "2.6.30"
+compare_kvers "$(uname -r)" "2.6.30"
if [ $? -eq 1 ]; then
echo "FAIL (assumed) kernel bug"
exit $RC_FAIL
diff --git a/tests/wrapper-utils.sh b/tests/wrapper-utils.sh
index 2f6451d..79e7ed1 100644
--- a/tests/wrapper-utils.sh
+++ b/tests/wrapper-utils.sh
@@ -1,12 +1,12 @@
#!/bin/bash
# Standard return codes
-RC_PASS=0
-RC_CONFIG=1
-RC_FAIL=2
-RC_XFAIL=3
-RC_XPASS=4
-RC_BUG=99
+export RC_PASS=0
+export RC_CONFIG=1
+export RC_FAIL=2
+export RC_XFAIL=3
+export RC_XPASS=4
+export RC_BUG=99
function unexpected_pass()
{
@@ -28,10 +28,10 @@ function check_rc()
EXP_RC=$1
ACT_RC=$2
- if [ $ACT_RC -eq $RC_PASS -a $EXP_RC -ne $RC_PASS ]; then
+ if [[ ($ACT_RC -eq $RC_PASS) && ($EXP_RC -ne $RC_PASS) ]]; then
unexpected_pass
return $RC_XPASS
- elif [ $EXP_RC -ne $RC_PASS -a $EXP_RC -eq $ACT_RC ]; then
+ elif [[ ($EXP_RC -ne $RC_PASS) && ($EXP_RC -eq $ACT_RC) ]]; then
expected_fail
return $RC_XFAIL
else
@@ -47,7 +47,7 @@ function exec_and_check()
EXP_RC=$1
shift
- OUTPUT=`$@`
+ OUTPUT=$("$@")
check_rc $EXP_RC $?
RC=$?
echo $OUTPUT
--
2.25.2

@ -0,0 +1,56 @@
From 112f4b7266cae313e5a7f3d720360cdb294db496 Mon Sep 17 00:00:00 2001
From: Rafael Aquini <aquini@redhat.com>
Date: Sun, 12 Apr 2020 22:59:32 -0400
Subject: [PATCH] tests: include missing LDFLAGS to make targets
Signed-off-by: Rafael Aquini <aquini@redhat.com>
---
tests/Makefile | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/tests/Makefile b/tests/Makefile
index 9fd15eb..216942e 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -120,32 +120,32 @@ shmoverride_linked.c: shmoverride_unlinked.c
obj32/%.o: %.c
@$(VECHO) CC32 $@
@mkdir -p obj32
- $(CC32) $(CPPFLAGS) $(CFLAGS) -o $@ -c $<
+ $(CC32) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o $@ -c $<
obj64/%.o: %.c
@$(VECHO) CC64 $@
@mkdir -p obj64
- $(CC64) $(CPPFLAGS) $(CFLAGS) -o $@ -c $<
+ $(CC64) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -o $@ -c $<
obj32/%-pic.o: %.c
@$(VECHO) CC32 $@
@mkdir -p obj32
- $(CC32) $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ -c $<
+ $(CC32) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -fPIC -o $@ -c $<
obj64/%-pic.o: %.c
@$(VECHO) CC64 $@
@mkdir -p obj64
- $(CC64) $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ -c $<
+ $(CC64) $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) -fPIC -o $@ -c $<
obj32/libheapshrink.so: obj32/heapshrink-helper-pic.o
@$(VECHO) LD32 "(shared)" $@
@mkdir -p obj32
- $(CC32) -Wl,-soname,$(notdir $@) -shared -o $@ $^
+ $(CC32) $(LDFLAGS) -Wl,-soname,$(notdir $@) -shared -o $@ $^
obj64/libheapshrink.so: obj64/heapshrink-helper-pic.o
@$(VECHO) LD64 "(shared)" $@
@mkdir -p obj64
- $(CC64) -Wl,-soname,$(notdir $@) -shared -o $@ $^
+ $(CC64) $(LDFLAGS) -Wl,-soname,$(notdir $@) -shared -o $@ $^
$(LIB_TESTS:%=obj32/%): %: %.o obj32/testutils.o obj32/libtestutils.o
@$(VECHO) LD32 "(lib test)" $@
--
2.25.2

@ -0,0 +1,12 @@
diff --git a/tests/Makefile b/tests/Makefile
index 508a6ec..9fd15eb 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -295,7 +295,6 @@ obj64/install:
$(INSTALL) -m 755 wrapper-utils.sh $(DESTDIR)$(INST_TESTSDIR64)/obj64
$(INSTALL) -m 755 $(HELPERS:%=obj64/%) $(DESTDIR)$(INST_TESTSDIR64)/obj64
$(INSTALL) -m 755 $(HELPER_LIBS:%=obj64/%) $(DESTDIR)$(INST_TESTSDIR64)/obj64
- $(INSTALL) -m 755 $(TESTS_64:%=obj64/%) $(DESTDIR)$(INST_TESTSDIR64)/obj64
$(INSTALL) -m 755 run_tests.py $(DESTDIR)$(INST_TESTSDIR64)
install: $(OBJDIRS:%=%/install)

@ -0,0 +1,13 @@
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 5c10f6d..b6f73bb 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -247,7 +247,7 @@ def get_pagesizes():
"""
sizes = set()
out = ""
- (rc, out) = bash("../obj/hugeadm --page-sizes")
+ (rc, out) = bash("hugeadm --page-sizes")
if rc != 0 or out == "":
return sizes

@ -0,0 +1,26 @@
diff --git a/tests/run_tests.py b/tests/run_tests.py
index 3c95a03..f88e1e2 100755
--- a/tests/run_tests.py
+++ b/tests/run_tests.py
@@ -557,7 +557,7 @@ def functional_tests():
do_test("mremap-expand-slice-collision.sh")
do_test("mremap-fixed-normal-near-huge.sh")
do_test("mremap-fixed-huge-near-normal.sh")
- do_shm_test("shm-perms", 64*1024*1024)
+ do_shm_test("shm-perms", 1024*1024*1024)
# Tests requiring an active mount and hugepage COW
do_test("private")
diff --git a/tests/shm-perms.c b/tests/shm-perms.c
index 590a101..12d7609 100644
--- a/tests/shm-perms.c
+++ b/tests/shm-perms.c
@@ -32,7 +32,7 @@
"* to a segment with different permissions. A segment is created *\n"\
"* and children attach read-only to check reservation accounting. *"
-#define SEGMENT_SIZE ((size_t)0x4000000)
+#define SEGMENT_SIZE ((size_t)0x40000000)
#define SEGMENT_KEY 0x82ba15ff
#define STRIDE 0x200000

@ -0,0 +1,434 @@
Name: libhugetlbfs
Version: 2.21
Release: 17%{?dist}
Summary: A library which provides easy access to huge pages of memory
Group: System Environment/Libraries
License: LGPLv2+
URL: https://github.com/libhugetlbfs/libhugetlbfs
Source0: https://github.com/%{name}/%{name}/releases/download/%{version}/%{name}-%{version}.tar.gz
BuildRequires: glibc-devel
BuildRequires: glibc-static
BuildRequires: python3-devel
BuildRequires: execstack
%global _hardened_build 1
%define ldscriptdir %{_datadir}/%{name}/ldscripts
# Patch0: build flags adjusts to build in stricter RHEL-8 buildroots
Patch0: build_flags.patch
# Downstream patch testcases to avoid bogus annoying failures
# RHBZ#1611780 && RHBZ#1611782
Patch1: 0001-testutils-fix-range_is_mapped.patch
Patch2: 0002-stack_grow_into_huge-don-t-clobber-existing-mappings.patch
# RHBZ#1628794 undersized SHMMAX when running on aarch64
# https://github.com/libhugetlbfs/libhugetlbfs/issues/39
Patch3: tests_shm-perms_adjust_max_segment_size_for_bigger_hugepages.patch
# Downstream fix for Covscan CI error.
Patch4: elflink-return-type-fix.patch
# Downstream patches to remove an IA-64 target leftover that breaks the
# tests install and fix run_tests.py path for hugeadm tool call
Patch5: tests-makefile-fix.patch
Patch6: tests-run_tests-fix-hugeadm-path.patch
# Patch7: huge_page_setup_helper.py Python3 conversion
# Upstream tickets:
# Fedora: https://bugzilla.redhat.com/show_bug.cgi?id=1598570
# libhugetlbfs: https://github.com/libhugetlbfs/libhugetlbfs/issues/35
Patch7: huge_page_setup_helper-python3-convert.patch
# Fixes for downstream COVSCAN and RPMDiff execshield complaints:
Patch8: tests-fix-covscan-SHELLCHECK_WARNING-complaints.patch
Patch9: tests-include-missing-LDFLAGS-to-make-targets.patch
# Patch10: RHBZ#1832243 "hugeadm: ERROR: Invalid group specification (-1)" fix
# upstream pull request
# https://github.com/libhugetlbfs/libhugetlbfs/pull/48/commits/e7b3e6817421763eee37cb35ef8627bdd37a3690
Patch10: 0001-wait-child-with-os-wait.patch
# Upstream follow-ups for libhugetlbfs-2.21
Patch50: 0001-tests-Add-utility-to-check-for-a-minimum-number-of-o.patch
Patch51: 0002-tests-slbpacaflush-Use-online-cpus-only.patch
Patch52: 0003-tests-alloc-instantiate-race-Use-online-cpus-only.patch
Patch53: 0004-tests-task-size-overrun-Make-test-faster-on-powerpc6.patch
Patch54: 0005-tests-truncate-above-4GB-Skip-if-truncation-point-is.patch
Patch55: 0006-tests-map-high-truncate-2-Skip-if-truncation-point-i.patch
Patch56: 0007-morecore-tests-Run-tests-only-for-default-huge-page-.patch
Patch57: 0008-hugeutils-Make-writing-a-ulong-to-a-file-more-reliab.patch
Patch58: 0009-tests-Add-utility-to-check-if-huge-pages-are-giganti.patch
Patch59: 0010-tests-counters-Skip-if-using-gigantic-huge-pages.patch
Patch60: 0011-hugeutils-Add-utility-to-check-if-slices-are-support.patch
Patch61: 0012-tests-brk-near-huge-Fix-next-chunk-computation-for-p.patch
Patch62: 0013-elflink-Fix-program-header-address-calculation.patch
Patch63: 0014-elflink-powerpc64-Use-slices-based-on-MMU-type.patch
Patch64: 0015-ld.hugetlbfs-powerpc64-Add-support-for-different-hug.patch
Patch65: 0016-elflink-tests-Run-tests-only-for-default-huge-page-s.patch
Patch66: 0017-tests-Update-utility-to-get-free-and-total-huge-page.patch
Patch67: 0018-mmap-tests-Run-tests-with-correct-huge-page-count.patch
Patch68: 0019-Be-explicit-about-using-Python2-in-the-test-script.patch
Patch69: 0020-Switch-test-runner-script-to-print-function.patch
Patch70: 0021-Remove-backtick-operator-from-test-runner-script.patch
Patch71: 0022-tests-Avoid-old-style-except-syntax-in-the-test-runn.patch
Patch72: 0023-tests-Avoid-explicit-type-comparison-in-runner-scrip.patch
Patch73: 0024-tests-Explicitly-decode-subprocess-output.patch
Patch74: 0025-tests-Use-modern-style-division-in-runner-script.patch
Patch75: 0026-tests-Switch-test-runner-to-Python3.patch
Patch76: 0027-tests-Improve-TASK_SIZE-detection-in-task-size-overr.patch
Patch77: 0028-Remove-man-page-for-cpupcstat.patch
Patch78: 0029-Fix-spelling-of-khugepaged-options-in-hugeadm.patch
Patch79: 0030-Makefile-Remove-cpupcstat-from-man-page-target.patch
Patch80: 0031-tests-noresv-preserve-resv-page-Fix-failure-in-case-.patch
%description
libhugetlbfs is a library which provides easy access to huge pages of memory.
It is a wrapper for the hugetlbfs file system. Applications can use huge pages
to fulfill malloc() requests without being recompiled by using LD_PRELOAD.
Alternatively, applications can be linked against libhugetlbfs without source
modifications to load BSS or BSS, data, and text segments into large pages.
%package devel
Summary: Header files for libhugetlbfs
Group: Development/Libraries
Requires: %{name} = %{version}-%{release}
%description devel
Contains header files for building with libhugetlbfs.
%package utils
Summary: Userspace utilities for configuring the hugepage environment
Group: Applications/System
Requires: %{name} = %{version}-%{release}
%description utils
This packages contains a number of utilities that will help administrate the
use of huge pages on your system. hugeedit modifies binaries to set default
segment remapping behavior. hugectl sets environment variables for using huge
pages and then execs the target program. hugeadm gives easy access to huge page
pool size control. pagesize lists page sizes available on the machine.
%package tests
Summary: Test cases to help on validating the library environment
Group: Development/Libraries
Requires: %{name}-utils = %{version}-%{release}
%description tests
This packages contains a number of testcases that will help developers
to verify the libhugetlbfs functionality and validate the library.
%prep
%setup -q -n %{name}-%{version}
# apply upstream patchset first
%patch50 -p1
%patch51 -p1
%patch52 -p1
%patch53 -p1
%patch54 -p1
%patch55 -p1
%patch56 -p1
%patch57 -p1
%patch58 -p1
%patch59 -p1
%patch60 -p1
%patch61 -p1
%patch62 -p1
%patch63 -p1
%patch64 -p1
%patch65 -p1
%patch66 -p1
%patch67 -p1
%patch68 -p1
%patch69 -p1
%patch70 -p1
%patch71 -p1
%patch72 -p1
%patch73 -p1
%patch74 -p1
%patch75 -p1
%patch76 -p1
%patch77 -p1
%patch78 -p1
%patch79 -p1
%patch80 -p1
# downstream patches
%patch0 -p1
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%patch5 -p1
%patch6 -p1
%patch7 -p1
%patch8 -p1
%patch9 -p1
%patch10 -p1
pathfix.py -i %{__python3} -pn huge_page_setup_helper.py \
tests/run_tests.py
%build
%set_build_flags
# Parallel builds are not reliable
make all BUILDTYPE=NATIVEONLY V=1
%install
make install PREFIX=%{_prefix} DESTDIR=$RPM_BUILD_ROOT LDSCRIPTDIR=%{ldscriptdir} BUILDTYPE=NATIVEONLY
make install-helper PREFIX=%{_prefix} DESTDIR=$RPM_BUILD_ROOT LDSCRIPTDIR=%{ldscriptdir} BUILDTYPE=NATIVEONLY
make install-tests PREFIX=%{_prefix} DESTDIR=$RPM_BUILD_ROOT LDSCRIPTDIR=%{ldscriptdir} BUILDTYPE=NATIVEONLY
mkdir -p -m755 $RPM_BUILD_ROOT%{_sysconfdir}/security/limits.d
touch $RPM_BUILD_ROOT%{_sysconfdir}/security/limits.d/hugepages.conf
# clear execstack flag
execstack --clear-execstack %{buildroot}/%{_libdir}/libhugetlbfs.so
execstack --clear-execstack %{buildroot}/%{_libdir}/libhugetlbfs_privutils.so
# remove statically built libraries:
rm -f $RPM_BUILD_ROOT/%{_libdir}/*.a
rm -f $RPM_BUILD_ROOT/%{_libdir}/libhugetlbfs/tests/*/*.link*
# remove unused sbin directory
rm -fr $RPM_BUILD_ROOT/%{_sbindir}/
%post -p /sbin/ldconfig
%postun -p /sbin/ldconfig
%files
%{_libdir}/libhugetlbfs.so*
%{_libdir}/libhugetlbfs_privutils.so*
%{_datadir}/%{name}/
%{_mandir}/man7/libhugetlbfs.7.gz
%ghost %config(noreplace) %{_sysconfdir}/security/limits.d/hugepages.conf
%doc README HOWTO LGPL-2.1 NEWS
%files devel
%{_includedir}/hugetlbfs.h
%{_mandir}/man3/getpagesizes.3.gz
%{_mandir}/man3/free_huge_pages.3.gz
%{_mandir}/man3/get_huge_pages.3.gz
%{_mandir}/man3/gethugepagesize.3.gz
%{_mandir}/man3/gethugepagesizes.3.gz
%{_mandir}/man3/free_hugepage_region.3.gz
%{_mandir}/man3/get_hugepage_region.3.gz
%{_mandir}/man3/hugetlbfs_find_path.3.gz
%{_mandir}/man3/hugetlbfs_find_path_for_size.3.gz
%{_mandir}/man3/hugetlbfs_test_path.3.gz
%{_mandir}/man3/hugetlbfs_unlinked_fd.3.gz
%{_mandir}/man3/hugetlbfs_unlinked_fd_for_size.3.gz
%files utils
%{_bindir}/hugeedit
%{_bindir}/hugeadm
%{_bindir}/hugectl
%{_bindir}/pagesize
%{_bindir}/huge_page_setup_helper.py
%exclude %{_bindir}/cpupcstat
%exclude %{_bindir}/oprofile_map_events.pl
%exclude %{_bindir}/oprofile_start.sh
%{_mandir}/man8/hugeedit.8.gz
%{_mandir}/man8/hugectl.8.gz
%{_mandir}/man8/hugeadm.8.gz
%{_mandir}/man1/pagesize.1.gz
%{_mandir}/man1/ld.hugetlbfs.1.gz
%exclude %{_mandir}/man8/cpupcstat.8.gz
%exclude %{_libdir}/perl5/TLBC
%files tests
%{_libdir}/libhugetlbfs
%changelog
* Tue May 26 2020 Rafael Aquini <aquini@redhat.com> - 2.21-17
- hugeadm: "ERROR: Invalid group specification" fix (1832243)
* Mon Apr 13 2020 Rafael Aquini <aquini@redhat.com> - 2.21-16
- libhugetlbfs-tests: harden the testcases to satisfy EXECSHIELD RPMDiff checks (1785296)
* Thu Apr 9 2020 Rafael Aquini <aquini@redhat.com> - 2.21-14
- Follow up fix for harden the testcases (1785296)
* Thu Apr 9 2020 Rafael Aquini <aquini@redhat.com> - 2.21-13
- Fix: huge_page_setup_helper.py: SyntaxError: Missing parentheses in call to 'print' (1821938)
- libhugetlbfs-tests: harden the testcases to satisfy EXECSHIELD RPMDiff checks (1785296)
* Tue Oct 29 2019 Rafael Aquini <aquini@redhat.com> - 2.21-12
- Fix: Introduce libhugetlbfs-tests subpkg for CI tests (1688930)
- trim repetitive changelogs for interim debug builds
* Mon Oct 28 2019 Rafael Aquini <aquini@redhat.com> - 2.21-4
- Fix: task-size-overrun hung over 8 hours on ppc64le (1737370)
- Introduce libhugetlbfs-tests subpkg for CI tests (1688930)
* Tue Apr 2 2019 Rafael Aquini <aquini@redhat.com> - 2.21-3
- Fix: Adding CI gating basic infrastructure (1680621)
* Mon Apr 1 2019 Rafael Aquini <aquini@redhat.com> - 2.21-2
- Adding CI gating basic infrastructure (1680621)
* Wed Oct 3 2018 Rafael Aquini <aquini@redhat.com> - 2.21-1
- Fix small_const/small_data is not hugepage test failures (1628794)
* Tue Sep 11 2018 Rafael Aquini <aquini@redhat.com> - 2.20-12
- Finish up Python3 conversion fo tests/run_tests.py (1620250)
* Mon Sep 10 2018 Rafael Aquini <aquini@redhat.com> - 2.20-11
- Fix up rpmdiff execshield flag failures (1627532)
* Tue Sep 04 2018 Rafael Aquini <aquini@redhat.com> - 2.20-10
- Fix up annocheck distro flag failures (1624131)
- Convert libhugetlbfs run_tests.py to Python3 (1620250)
* Thu Aug 02 2018 Rafael Aquini <aquini@redhat.com> - 2.20-9
- Fix up libhugetlbfs testcase problems (1611780 1611782)
* Wed Aug 01 2018 Charalampos Stratakis <cstratak@redhat.com> - 2.20-8
- Fix python shebangs
* Thu Jul 05 2018 Rafael Aquini <aquini@redhat.com> - 2.20-7
- Remove python2 dependency for RHEL8 mass rebuilds (1561516 1580761)
* Wed Feb 07 2018 Fedora Release Engineering <releng@fedoraproject.org> - 2.20-6
- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild
* Thu Aug 03 2017 Fedora Release Engineering <releng@fedoraproject.org> - 2.20-5
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
* Wed Jul 26 2017 Fedora Release Engineering <releng@fedoraproject.org> - 2.20-4
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
* Fri Feb 10 2017 Fedora Release Engineering <releng@fedoraproject.org> - 2.20-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild
* Thu Feb 04 2016 Fedora Release Engineering <releng@fedoraproject.org> - 2.20-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild
* Thu Dec 03 2015 Eric B Munson <emunson@mgebm.net> - 2.20-1
- Update to 2.20 upstream
* Wed Jul 01 2015 Eric B Munson <emunson@mgebm.net> - 2.19-1
- Update to 2.19 upstream
* Wed Jun 17 2015 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 2.18-5
- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild
* Sun Aug 17 2014 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 2.18-4
- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild
* Sat Jun 07 2014 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 2.18-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild
* Mon Apr 07 2014 Eric B Munson <emunson@mgebm.net> - 2.18-2
- Remove unnecessary ppc makefile patch
* Sun Apr 06 2014 Eric B Munson <emunson@mgebm.net> - 2.18-1
- Update to 2.18 upstream
* Sat Mar 15 2014 Eric B Munson <emunson@mgebm.net> - 2.12-2
- Add Patch to support building on ppc64le
* Wed Jan 29 2014 Kyle McMartin <kyle@fedoraproject.org> - 2.17-1
- Update for upstream 2.17 release (adds AArch64 support)
- update libhugetlbfs-2.16-s390.patch for 2.17 changes to Makefile
- add libhugetlbfs-2.17-ppc.patch to fix powerpc{,64}
* Thu Jul 25 2013 Dan Horák <dan[at]danny.cz> - 2.16-2
- Fix build on s390/s390x (patch by aarapov@rh.c)
- Use Fedora CFLAGS for build
* Mon Apr 29 2013 Peter Robinson <pbrobinson@fedoraproject.org> 2.16-1
- Upstream 2.16 release (adds ARM support)
* Thu Feb 14 2013 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 2.15-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild
* Sat Dec 08 2012 Eric B Munson <emunson@mgebm.net> - 2.15
- Update for upstream 2.15 release
* Thu Jul 19 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 2.13-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
* Sat Mar 24 2012 Eric B Munson <emunson@mgebm.net>
- Update for upstream 2.13 release
* Wed Jul 20 2011 Eric B Munson <emunson@mgebm.net>
- Update for upstream 2.12 release
* Tue Feb 08 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 2.9-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
* Mon Apr 05 2010 Eric B Munson <ebmunson@us.ibm.com> 2.8-1
- Update for upstream 2.8 release
* Wed Feb 10 2010 Eric B Munson <ebmunson@us.ibm.com> 2.7-2
- Include patch that fixes build on ppc
* Tue Jan 05 2010 Eric B Munson <ebmunson@us.ibm.com> 2.7-1
- Update for upstream 2.7 release
* Fri Oct 02 2009 Jarod Wilson <jarod@redhat.com> 2.6-3
- Add hopefully-about-to-be-merged-upstream hugeadm enhancements
- Add huge pages setup helper script, using new hugeadm enhancements
* Thu Sep 03 2009 Nils Philippsen <nils@redhat.com> 2.6-2
- fix building on s390x
* Mon Aug 31 2009 Eric Munson <ebmunson@us.ibm.com> 2.6-1
- Updating for the libhugetlbfs-2.6 release
* Fri Jul 24 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 2.5-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild
* Mon Jul 20 2009 Eric Munson <ebmunson@us.ibm.com> 2.5-2
- Update Group for -utils package to Applications/System
* Tue Jun 30 2009 Eric Munson <ebmunson@us.ibm.com> 2.5-1
- Updating for the libhugetlbfs-2.5 release
* Tue Jun 02 2009 Eric Munson <ebmunson@us.ibm.com> 2.4-2
- Adding patch to remove S390 32 bit build
* Fri May 29 2009 Eric Munson <ebmunson@us.ibm.com> 2.4-1
- Updating for the libhugetlbfs-2.4 release
* Wed Apr 15 2009 Eric Munson <ebmunson@us.ibm.com> 2.3-1
- Updating for the libhugetlbfs-2.3 release
* Wed Feb 11 2009 Eric Munson <ebmunson@us.ibm.com> 2.2-1
- Updating for the libhugetlbfs-2.2 release
* Fri Dec 19 2008 Eric Munson <ebmunson@us.ibm.com> 2.1.2-1
- Updating for libhugetlbfs-2.1.2 release
* Fri Dec 19 2008 Eric Munson <ebmunson@us.ibm.com> 2.1.1-1
- Updating for libhugetlbfs-2.1.1 release
* Thu Dec 18 2008 Josh Boyer <jwboyer@gmail.com> 2.1-2
- Fix broken dependency caused by just dropping -test
subpackage
* Thu Oct 16 2008 Eric Munson <ebmunson@us.ibm.com> 2.1-1
- Updating for libhuge-2.1 release
- Adding -devel and -utils subpackages for various utilities
and devel files.
* Wed May 14 2008 Eric Munson <ebmunson@us.ibm.com> 1.3-1
- Updating for libhuge-1.3 release
* Tue Mar 25 2008 Eric Munson <ebmunson@us.ibm.com> 1.2-1
- Removing test rpm target, and excluding test files
* Mon Mar 26 2007 Steve Fox <drfickle@k-lug.org> - 1.1-1
- New release (1.1)
- Fix directory ownership
* Wed Aug 30 2006 Steve Fox <drfickle@k-lug.org> - 0.20060825-1
- New release (1.0-preview4)
- patch0 (Makefile-ldscript.diff) merged upstream
* Tue Jul 25 2006 Steve Fox <drfickle@k-lug.org> - 0.20060706-4
- Bump for build system
* Tue Jul 25 2006 Steve Fox <drfickle@k-lug.org> - 0.20060706-3
- Don't use parallel build as it has random failures
* Thu Jul 20 2006 Steve Fox <drfickle@k-lug.org> - 0.20060706-2
- Fix the Makefile so that the ld.hugetlbfs script doesn't store the
DESTDIR in the path to the ldscripts dir
* Fri Jul 7 2006 Steve Fox <drfickle@k-lug.org> - 0.20060706-1
- New release which includes a fix for the syscall macro removal in the
Rawhide kernels
* Thu Jun 29 2006 Steve Fox <drfickle@k-lug.org> - 0.20060628-1
- First Fedora package
Loading…
Cancel
Save