Compare commits

...

No commits in common. 'c9' and 'i9c-beta' have entirely different histories.
c9 ... i9c-beta

@ -0,0 +1,26 @@
From 9dcd32dd85f7f45c3fe6c8d7b1de3b4c322c6807 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Mon, 11 Sep 2023 13:50:24 +0200
Subject: [PATCH] nvme: Require additional rpms for dracut
The '95nvmf' dracut module needs a couple more packages
for the NBFT (NVMe over TCP) to work - such as networking.
Local PCIe NVMe devices have no special needs.
---
blivet/devices/disk.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 2b49ef685..5053f7bb8 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -725,7 +725,8 @@ class NVMeFabricsNamespaceDevice(NVMeNamespaceDevice, NetworkStorageDevice):
""" NVMe fabrics namespace """
_type = "nvme-fabrics"
- _packages = ["nvme-cli"]
+ # dracut '95nvmf' module dependencies
+ _packages = ["nvme-cli", "dracut-network"]
def __init__(self, device, **kwargs):
"""

@ -0,0 +1,107 @@
From 06597099906be55b106c234b3bf0c87ec7d90a07 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Thu, 17 Aug 2023 14:45:18 +0200
Subject: [PATCH] nvme: Align HostNQN and HostID format to TP4126
Also don't overwrite existing files during startup() since they
might have been supplied by early boot stages.
---
blivet/nvme.py | 62 +++++++++++++++++++++++++++++++-------------------
1 file changed, 39 insertions(+), 23 deletions(-)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 17bead15e..5ac41cffa 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -18,16 +18,20 @@
#
import os
-import shutil
from . import errors
-from . import util
+
+import gi
+gi.require_version("BlockDev", "2.0")
+
+from gi.repository import BlockDev as blockdev
import logging
log = logging.getLogger("blivet")
-HOSTNQN_FILE = "/etc/nvme/hostnqn"
-HOSTID_FILE = "/etc/nvme/hostid"
+ETC_NVME_PATH = "/etc/nvme/"
+HOSTNQN_FILE = ETC_NVME_PATH + "hostnqn"
+HOSTID_FILE = ETC_NVME_PATH + "hostid"
class NVMe(object):
@@ -40,6 +44,8 @@ class NVMe(object):
def __init__(self):
self.started = False
+ self._hostnqn = None
+ self._hostid = None
# So that users can write nvme() to get the singleton instance
def __call__(self):
@@ -52,28 +58,38 @@ def startup(self):
if self.started:
return
- rc, nqn = util.run_program_and_capture_output(["nvme", "gen-hostnqn"])
- if rc != 0:
- raise errors.NVMeError("Failed to generate hostnqn")
-
- with open(HOSTNQN_FILE, "w") as f:
- f.write(nqn)
-
- rc, hid = util.run_program_and_capture_output(["dmidecode", "-s", "system-uuid"])
- if rc != 0:
- raise errors.NVMeError("Failed to generate host ID")
-
- with open(HOSTID_FILE, "w") as f:
- f.write(hid)
+ self._hostnqn = blockdev.nvme_get_host_nqn()
+ self._hostid = blockdev.nvme_get_host_id()
+ if not self._hostnqn:
+ self._hostnqn = blockdev.nvme_generate_host_nqn()
+ if not self._hostnqn:
+ raise errors.NVMeError("Failed to generate HostNQN")
+ if not self._hostid:
+ if 'uuid:' not in self._hostnqn:
+ raise errors.NVMeError("Missing UUID part in the HostNQN string '%s'" % self._hostnqn)
+ # derive HostID from HostNQN's UUID part
+ self._hostid = self._hostnqn.split('uuid:')[1]
+
+ # do not overwrite existing files, taken e.g. from initramfs
+ self.write("/", overwrite=False)
self.started = True
- def write(self, root): # pylint: disable=unused-argument
- # copy the hostnqn and hostid files
- if not os.path.isdir(root + "/etc/nvme"):
- os.makedirs(root + "/etc/nvme", 0o755)
- shutil.copyfile(HOSTNQN_FILE, root + HOSTNQN_FILE)
- shutil.copyfile(HOSTID_FILE, root + HOSTID_FILE)
+ def write(self, root, overwrite=True): # pylint: disable=unused-argument
+ # write down the hostnqn and hostid files
+ p = root + ETC_NVME_PATH
+ if not os.path.isdir(p):
+ os.makedirs(p, 0o755)
+ p = root + HOSTNQN_FILE
+ if overwrite or not os.path.isfile(p):
+ with open(p, "w") as f:
+ f.write(self._hostnqn)
+ f.write("\n")
+ p = root + HOSTID_FILE
+ if overwrite or not os.path.isfile(p):
+ with open(p, "w") as f:
+ f.write(self._hostid)
+ f.write("\n")
# Create nvme singleton

@ -0,0 +1,58 @@
From 63da3cb8a40500c889c8faa4326f81d16997a3c8 Mon Sep 17 00:00:00 2001
From: Tomas Bzatek <tbzatek@redhat.com>
Date: Mon, 27 Nov 2023 18:55:55 +0100
Subject: [PATCH] nvme: Retrieve HostNQN from a first active fabrics connection
When no /etc/hostnqn exists, look for any active NVMe over Fabrics
connections and take the values from a first one, rather than
generating new ones.
---
blivet/nvme.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/blivet/nvme.py b/blivet/nvme.py
index 5ac41cffa..2e4686e68 100644
--- a/blivet/nvme.py
+++ b/blivet/nvme.py
@@ -18,6 +18,7 @@
#
import os
+import glob
from . import errors
@@ -54,6 +55,22 @@ def __call__(self):
def __deepcopy__(self, memo_dict): # pylint: disable=unused-argument
return self
+ def _retrieve_fabrics_hostnqn(self):
+ for d in glob.glob('/sys/class/nvme-fabrics/ctl/nvme*/'):
+ try:
+ # invalidate old values
+ self._hostnqn = None
+ self._hostid = None
+ # read from sysfs
+ with open(os.path.join(d, 'hostnqn')) as f:
+ self._hostnqn = f.readline().strip()
+ with open(os.path.join(d, 'hostid')) as f:
+ self._hostid = f.readline().strip()
+ if self._hostnqn:
+ break
+ except Exception: # pylint: disable=broad-except
+ pass
+
def startup(self):
if self.started:
return
@@ -61,6 +78,10 @@ def startup(self):
self._hostnqn = blockdev.nvme_get_host_nqn()
self._hostid = blockdev.nvme_get_host_id()
if not self._hostnqn:
+ # see if there are any active fabrics connections and take their values over
+ self._retrieve_fabrics_hostnqn()
+ if not self._hostnqn:
+ # generate new values
self._hostnqn = blockdev.nvme_generate_host_nqn()
if not self._hostnqn:
raise errors.NVMeError("Failed to generate HostNQN")

@ -0,0 +1,67 @@
From c807e234dfd07f3d0005c71501f0300284cd580b Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 6 Dec 2023 11:47:31 +0100
Subject: [PATCH] tests: Add a simple unit test for the NVMe module
---
tests/unit_tests/__init__.py | 1 +
tests/unit_tests/nvme_test.py | 38 +++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+)
create mode 100644 tests/unit_tests/nvme_test.py
diff --git a/tests/unit_tests/__init__.py b/tests/unit_tests/__init__.py
index 589366e0f..62bef67f5 100644
--- a/tests/unit_tests/__init__.py
+++ b/tests/unit_tests/__init__.py
@@ -9,6 +9,7 @@
from .devicetree_test import *
from .events_test import *
from .misc_test import *
+from .nvme_test import *
from .parentlist_test import *
from .populator_test import *
from .size_test import *
diff --git a/tests/unit_tests/nvme_test.py b/tests/unit_tests/nvme_test.py
new file mode 100644
index 000000000..cb948687f
--- /dev/null
+++ b/tests/unit_tests/nvme_test.py
@@ -0,0 +1,38 @@
+import unittest
+
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
+from blivet.nvme import nvme
+
+
+class NVMeModuleTestCase(unittest.TestCase):
+
+ host_nqn = "nqn.2014-08.org.nvmexpress:uuid:01234567-8900-abcd-efff-abcdabcdabcd"
+
+ @patch("blivet.nvme.os")
+ @patch("blivet.nvme.blockdev")
+ def test_nvme_module(self, bd, os):
+ self.assertIsNotNone(nvme)
+ bd.nvme_get_host_nqn.return_value = self.host_nqn
+ bd.nvme_get_host_id.return_value = None # None = generate from host_nqn
+ os.path.isdir.return_value = False
+
+ # startup
+ with patch.object(nvme, "write") as write:
+ nvme.startup()
+ write.assert_called_once_with("/", overwrite=False)
+
+ self.assertTrue(nvme.started)
+ self.assertEqual(nvme._hostnqn, self.host_nqn)
+ self.assertEqual(nvme._hostid, "01234567-8900-abcd-efff-abcdabcdabcd")
+
+ # write
+ with patch("blivet.nvme.open") as op:
+ nvme.write("/test")
+
+ os.makedirs.assert_called_with("/test/etc/nvme/", 0o755)
+ op.assert_any_call("/test/etc/nvme/hostnqn", "w")
+ op.assert_any_call("/test/etc/nvme/hostid", "w")

@ -0,0 +1,206 @@
From c20296b2df89a9edc4ea9cc41f94df89a8fbfd26 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 Apr 2023 12:35:30 +0200
Subject: [PATCH] Add support for creating shared LVM setups
This feature is requested by GFS2 for the storage role. This adds
support for creating shared VGs and activating LVs in shared mode.
Resolves: RHEL-324
---
blivet/devices/lvm.py | 44 +++++++++++++++++++----
blivet/tasks/availability.py | 9 +++++
tests/unit_tests/devices_test/lvm_test.py | 25 +++++++++++++
3 files changed, 72 insertions(+), 6 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index ca45c4b5..068c5368 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -97,7 +97,8 @@ class LVMVolumeGroupDevice(ContainerDevice):
def __init__(self, name, parents=None, size=None, free=None,
pe_size=None, pe_count=None, pe_free=None, pv_count=None,
- uuid=None, exists=False, sysfs_path='', exported=False):
+ uuid=None, exists=False, sysfs_path='', exported=False,
+ shared=False):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -124,6 +125,11 @@ class LVMVolumeGroupDevice(ContainerDevice):
:type pv_count: int
:keyword uuid: the VG UUID
:type uuid: str
+
+ For non-existing VGs only:
+
+ :keyword shared: whether to create this VG as shared
+ :type shared: bool
"""
# These attributes are used by _add_parent, so they must be initialized
# prior to instantiating the superclass.
@@ -137,6 +143,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
self.pe_count = util.numeric_type(pe_count)
self.pe_free = util.numeric_type(pe_free)
self.exported = exported
+ self._shared = shared
# TODO: validate pe_size if given
if not self.pe_size:
@@ -254,7 +261,19 @@ class LVMVolumeGroupDevice(ContainerDevice):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
pv_list = [pv.path for pv in self.parents]
- blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size)
+ extra = dict()
+ if self._shared:
+ extra["shared"] = ""
+ blockdev.lvm.vgcreate(self.name, pv_list, self.pe_size, **extra)
+
+ if self._shared:
+ if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
+ try:
+ blockdev.lvm.vglock_start(self.name)
+ except blockdev.LVMError as err:
+ raise errors.LVMError(err)
+ else:
+ raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
def _post_create(self):
self._complete = True
@@ -661,7 +680,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
fmt=None, exists=False, sysfs_path='', grow=None, maxsize=None,
percent=None, cache_request=None, pvs=None, from_lvs=None,
- stripe_size=0):
+ stripe_size=0, shared=False):
if not exists:
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
@@ -690,6 +709,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
self.seg_type = seg_type or "linear"
self._raid_level = None
self.ignore_skip_activation = 0
+ self._shared = shared
self.req_grow = None
self.req_max_size = Size(0)
@@ -2306,7 +2326,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
parent_lv=None, int_type=None, origin=None, vorigin=False,
metadata_size=None, chunk_size=None, profile=None, from_lvs=None,
compression=False, deduplication=False, index_memory=0,
- write_policy=None, cache_mode=None, attach_to=None, stripe_size=0):
+ write_policy=None, cache_mode=None, attach_to=None, stripe_size=0,
+ shared=False):
"""
:param name: the device name (generally a device node's basename)
:type name: str
@@ -2337,6 +2358,8 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
:type cache_request: :class:`~.devices.lvm.LVMCacheRequest`
:keyword pvs: list of PVs to allocate extents from (size could be specified for each PV)
:type pvs: list of :class:`~.devices.StorageDevice` or :class:`LVPVSpec` objects (tuples)
+ :keyword shared: whether to activate the newly create LV in shared mode
+ :type shared: bool
For internal LVs only:
@@ -2412,7 +2435,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
LVMLogicalVolumeBase.__init__(self, name, parents, size, uuid, seg_type,
fmt, exists, sysfs_path, grow, maxsize,
percent, cache_request, pvs, from_lvs,
- stripe_size)
+ stripe_size, shared)
LVMVDOPoolMixin.__init__(self, compression, deduplication, index_memory,
write_policy)
LVMVDOLogicalVolumeMixin.__init__(self)
@@ -2634,7 +2657,13 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
ignore_skip_activation = self.is_snapshot_lv or self.ignore_skip_activation > 0
- blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
+ if self._shared:
+ if availability.BLOCKDEV_LVM_PLUGIN_SHARED.available:
+ blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation, shared=True)
+ else:
+ raise errors.LVMError("Shared LVM is not fully supported: %s" % ",".join(availability.BLOCKDEV_LVM_PLUGIN_SHARED.availability_errors))
+ else:
+ blockdev.lvm.lvactivate(self.vg.name, self._name, ignore_skip=ignore_skip_activation)
@type_specific
def _pre_create(self):
@@ -2672,6 +2701,9 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
if self._stripe_size:
extra["stripesize"] = str(int(self._stripe_size.convert_to("KiB")))
+ if self._shared:
+ extra["activate"] = "sy"
+
blockdev.lvm.lvcreate(self.vg.name, self._name, self.size,
type=self.seg_type, pv_list=pvs, **extra)
else:
diff --git a/blivet/tasks/availability.py b/blivet/tasks/availability.py
index bba1ba84..85945c77 100644
--- a/blivet/tasks/availability.py
+++ b/blivet/tasks/availability.py
@@ -435,6 +435,14 @@ if hasattr(blockdev.LVMTech, "VDO"):
else:
BLOCKDEV_LVM_TECH_VDO = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support LVM VDO technology")
+if hasattr(blockdev.LVMTech, "SHARED"):
+ BLOCKDEV_LVM_SHARED = BlockDevTechInfo(plugin_name="lvm",
+ check_fn=blockdev.lvm_is_tech_avail,
+ technologies={blockdev.LVMTech.SHARED: blockdev.LVMTechMode.MODIFY}) # pylint: disable=no-member
+ BLOCKDEV_LVM_TECH_SHARED = BlockDevMethod(BLOCKDEV_LVM_SHARED)
+else:
+ BLOCKDEV_LVM_TECH_SHARED = _UnavailableMethod(error_msg="Installed version of libblockdev doesn't support shared LVM technology")
+
# libblockdev mdraid plugin required technologies and modes
BLOCKDEV_MD_ALL_MODES = (blockdev.MDTechMode.CREATE |
blockdev.MDTechMode.DELETE |
@@ -476,6 +484,7 @@ BLOCKDEV_DM_PLUGIN_RAID = blockdev_plugin("libblockdev dm plugin (raid technolog
BLOCKDEV_LOOP_PLUGIN = blockdev_plugin("libblockdev loop plugin", BLOCKDEV_LOOP_TECH)
BLOCKDEV_LVM_PLUGIN = blockdev_plugin("libblockdev lvm plugin", BLOCKDEV_LVM_TECH)
BLOCKDEV_LVM_PLUGIN_VDO = blockdev_plugin("libblockdev lvm plugin (vdo technology)", BLOCKDEV_LVM_TECH_VDO)
+BLOCKDEV_LVM_PLUGIN_SHARED = blockdev_plugin("libblockdev lvm plugin (shared LVM technology)", BLOCKDEV_LVM_TECH_SHARED)
BLOCKDEV_MDRAID_PLUGIN = blockdev_plugin("libblockdev mdraid plugin", BLOCKDEV_MD_TECH)
BLOCKDEV_MPATH_PLUGIN = blockdev_plugin("libblockdev mpath plugin", BLOCKDEV_MPATH_TECH)
BLOCKDEV_SWAP_PLUGIN = blockdev_plugin("libblockdev swap plugin", BLOCKDEV_SWAP_TECH)
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index d7b55224..e645309f 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -476,6 +476,31 @@ class LVMDeviceTest(unittest.TestCase):
lv.setup()
lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False)
+ @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
+ new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
+ def test_lv_activate_shared(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], exists=True)
+ lv = LVMLogicalVolumeDevice("data_lv", parents=[vg], size=Size("500 MiB"), exists=True, shared=True)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ with patch.object(lv, "_pre_setup"):
+ lv.setup()
+ lvm.lvactivate.assert_called_with(vg.name, lv.lvname, ignore_skip=False, shared=True)
+
+ @patch("blivet.tasks.availability.BLOCKDEV_LVM_PLUGIN_SHARED",
+ new=blivet.tasks.availability.ExternalResource(blivet.tasks.availability.AvailableMethod, ""))
+ def test_vg_create_shared(self):
+ pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
+ size=Size("1 GiB"), exists=True)
+ vg = LVMVolumeGroupDevice("testvg", parents=[pv], shared=True)
+
+ with patch("blivet.devices.lvm.blockdev.lvm") as lvm:
+ vg._create()
+ lvm.vgcreate.assert_called_with(vg.name, [pv.path], Size("4 MiB"), shared="")
+ lvm.vglock_start.assert_called_with(vg.name)
+
def test_vg_is_empty(self):
pv = StorageDevice("pv1", fmt=blivet.formats.get_format("lvmpv"),
size=Size("1024 MiB"))
--
2.41.0

@ -0,0 +1,60 @@
From d7708bca72f4a7d0bfa732912e2087bd6aa8f379 Mon Sep 17 00:00:00 2001
From: Steffen Maier <maier@linux.ibm.com>
Date: Thu, 23 Feb 2023 13:28:50 +0100
Subject: [PATCH] add udev-builtin-path_id property to zfcp-attached SCSI disks
so anaconda can use it to display path_id information for multipath
members
Signed-off-by: Steffen Maier <maier@linux.ibm.com>
---
blivet/devices/disk.py | 2 ++
blivet/populator/helpers/disk.py | 1 +
tests/unit_tests/tags_test.py | 2 +-
3 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 8842b4dc..746f6d58 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -556,10 +556,12 @@ class ZFCPDiskDevice(DiskDevice):
:keyword hba_id: ???
:keyword wwpn: ???
:keyword fcp_lun: ???
+ :keyword id_path: string from udev-builtin-path_id
"""
self.hba_id = kwargs.pop("hba_id")
self.wwpn = kwargs.pop("wwpn")
self.fcp_lun = kwargs.pop("fcp_lun")
+ self.id_path = kwargs.pop("id_path")
DiskDevice.__init__(self, device, **kwargs)
self._clear_local_tags()
self.tags.add(Tags.remote)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index cf20d302..92e85688 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -223,6 +223,7 @@ class ZFCPDevicePopulator(DiskDevicePopulator):
def _get_kwargs(self):
kwargs = super(ZFCPDevicePopulator, self)._get_kwargs()
+ kwargs["id_path"] = udev.device_get_path(self.data)
for attr in ['hba_id', 'wwpn', 'fcp_lun']:
kwargs[attr] = udev.device_get_zfcp_attribute(self.data, attr=attr)
diff --git a/tests/unit_tests/tags_test.py b/tests/unit_tests/tags_test.py
index 49a2d72e..15fa2a40 100644
--- a/tests/unit_tests/tags_test.py
+++ b/tests/unit_tests/tags_test.py
@@ -72,7 +72,7 @@ class DeviceTagsTest(unittest.TestCase):
fcoe_device = FcoeDiskDevice('test6', nic=None, identifier=None, id_path=None)
self.assertIn(Tags.remote, fcoe_device.tags)
self.assertNotIn(Tags.local, fcoe_device.tags)
- zfcp_device = ZFCPDiskDevice('test7', hba_id=None, wwpn=None, fcp_lun=None)
+ zfcp_device = ZFCPDiskDevice('test7', hba_id=None, wwpn=None, fcp_lun=None, id_path=None)
self.assertIn(Tags.remote, zfcp_device.tags)
self.assertNotIn(Tags.local, zfcp_device.tags)
--
2.43.0

@ -0,0 +1,172 @@
From 517f17481685afbabea6750b57d71a736f9a157e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 25 May 2023 17:02:39 +0200
Subject: [PATCH] Do not add new PVs to the LVM devices file if it doesn't
exist and VGs are present
If there is a preexisting VG on the system when we create a new PV
and the LVM devices file doesn't exist we will create it and add
only the new PV to it which means the preexisting VG will now be
ignored by LVM tools. This change skips adding newly created PVs
to the devices file in the same way 'pvcreate' and 'vgcreate' do.
---
blivet/devicelibs/lvm.py | 3 +
blivet/formats/lvmpv.py | 17 ++++-
tests/unit_tests/formats_tests/__init__.py | 1 +
tests/unit_tests/formats_tests/lvmpv_test.py | 73 ++++++++++++++++++++
4 files changed, 91 insertions(+), 3 deletions(-)
create mode 100644 tests/unit_tests/formats_tests/lvmpv_test.py
diff --git a/blivet/devicelibs/lvm.py b/blivet/devicelibs/lvm.py
index 16a8e8f8..dc7d0cbe 100644
--- a/blivet/devicelibs/lvm.py
+++ b/blivet/devicelibs/lvm.py
@@ -84,6 +84,9 @@ if hasattr(blockdev.LVMTech, "DEVICES"):
else:
HAVE_LVMDEVICES = False
+
+LVM_DEVICES_FILE = "/etc/lvm/devices/system.devices"
+
# list of devices that LVM is allowed to use
# with LVM >= 2.0.13 we'll use this for the --devices option and when creating
# the /etc/lvm/devices/system.devices file
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index cb01b2f3..65acedbe 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -36,7 +36,7 @@ from ..size import Size
from ..errors import PhysicalVolumeError
from . import DeviceFormat, register_device_format
from .. import udev
-from ..static_data.lvm_info import pvs_info
+from ..static_data.lvm_info import pvs_info, vgs_info
import logging
log = logging.getLogger("blivet")
@@ -121,10 +121,21 @@ class LVMPhysicalVolume(DeviceFormat):
def supported(self):
return super(LVMPhysicalVolume, self).supported and self._plugin.available
- def lvmdevices_add(self):
+ def lvmdevices_add(self, force=True):
+ """ Add this PV to the LVM system devices file
+ :keyword force: whether to add the PV even if the system devices file doesn't exist and
+ VGs are present in the system
+ :type force: bool
+ """
+
if not lvm.HAVE_LVMDEVICES:
raise PhysicalVolumeError("LVM devices file feature is not supported")
+ if not os.path.exists(lvm.LVM_DEVICES_FILE) and vgs_info.cache and not force:
+ log.debug("Not adding %s to devices file: %s doesn't exist and there are VGs present in the system",
+ self.device, lvm.LVM_DEVICES_FILE)
+ return
+
try:
blockdev.lvm.devices_add(self.device)
except blockdev.LVMError as e:
@@ -151,7 +162,7 @@ class LVMPhysicalVolume(DeviceFormat):
# with lvmdbusd we need to call the pvcreate without --devices otherwise lvmdbusd
# wouldn't be able to find the newly created pv and the call would fail
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
- self.lvmdevices_add()
+ self.lvmdevices_add(force=False)
else:
blockdev.lvm.pvcreate(self.device, data_alignment=self.data_alignment, extra=[ea_yes])
diff --git a/tests/unit_tests/formats_tests/__init__.py b/tests/unit_tests/formats_tests/__init__.py
index d678900b..95c7a25b 100644
--- a/tests/unit_tests/formats_tests/__init__.py
+++ b/tests/unit_tests/formats_tests/__init__.py
@@ -2,6 +2,7 @@ from .device_test import *
from .disklabel_test import *
from .init_test import *
from .luks_test import *
+from .lvmpv_test import *
from .methods_test import *
from .misc_test import *
from .selinux_test import *
diff --git a/tests/unit_tests/formats_tests/lvmpv_test.py b/tests/unit_tests/formats_tests/lvmpv_test.py
new file mode 100644
index 00000000..6490c7d4
--- /dev/null
+++ b/tests/unit_tests/formats_tests/lvmpv_test.py
@@ -0,0 +1,73 @@
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch
+
+from contextlib import contextmanager
+
+import unittest
+
+from blivet.formats.lvmpv import LVMPhysicalVolume
+
+
+class LVMPVNodevTestCase(unittest.TestCase):
+
+ @contextmanager
+ def patches(self):
+ patchers = dict()
+ mocks = dict()
+
+ patchers["blockdev"] = patch("blivet.formats.lvmpv.blockdev")
+ patchers["lvm"] = patch("blivet.formats.lvmpv.lvm")
+ patchers["vgs_info"] = patch("blivet.formats.lvmpv.vgs_info")
+ patchers["os"] = patch("blivet.formats.lvmpv.os")
+
+ for name, patcher in patchers.items():
+ mocks[name] = patcher.start()
+
+ yield mocks
+
+ for patcher in patchers.values():
+ patcher.stop()
+
+ def test_lvm_devices(self):
+ fmt = LVMPhysicalVolume(device="/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file not enabled/supported -> devices_add should not be called
+ mock["lvm"].HAVE_LVMDEVICES = False
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_not_called()
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file exists -> devices_add should be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = True
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file doesn't exist
+ # and no existing VGs present -> devices_add should be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = False
+ mock["vgs_info"].cache = {}
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_called_with("/dev/test")
+
+ with self.patches() as mock:
+ # LVM devices file enabled and devices file doesn't exist
+ # and existing VGs present -> devices_add should not be called
+ mock["lvm"].HAVE_LVMDEVICES = True
+ mock["os"].path.exists.return_value = False
+ mock["vgs_info"].cache = {"fake_vg_uuid": "fake_vg_data"}
+
+ fmt._create()
+
+ mock["blockdev"].lvm.devices_add.assert_not_called()
--
2.43.0

@ -0,0 +1,129 @@
From 0777b9d519421f3c46f6dcd51e39ecdc2956e2e0 Mon Sep 17 00:00:00 2001
From: Jan Pokorny <japokorn@redhat.com>
Date: Thu, 25 Apr 2024 14:06:13 +0200
Subject: [PATCH] Added support for PV grow
Storage role requires support for a case when PV has to be resized to
fill all available space when its device's size changes (usually on VM).
A new flag 'grow_to_fill' was added, which marks the device for size
expansion (all available space it taken).
Proper size is determined by LVM, avoiding inaccurate size
calculations in blivet.
---
blivet/formats/__init__.py | 4 +++-
blivet/formats/lvmpv.py | 23 ++++++++++++++++++-
blivet/tasks/pvtask.py | 7 +++++-
.../storage_tests/formats_test/lvmpv_test.py | 10 ++++++++
4 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/blivet/formats/__init__.py b/blivet/formats/__init__.py
index b1ad740e..eb8b6ab3 100644
--- a/blivet/formats/__init__.py
+++ b/blivet/formats/__init__.py
@@ -424,7 +424,9 @@ class DeviceFormat(ObjectID):
if not self.resizable:
raise FormatResizeError("format not resizable", self.device)
- if self.target_size == self.current_size:
+ # skip if sizes are equal unless grow to fill on lvmpv is requested
+ if (self.target_size == self.current_size and
+ (self.type != "lvmpv" or not self.grow_to_fill)): # pylint: disable=no-member
return
if not self._resize.available:
diff --git a/blivet/formats/lvmpv.py b/blivet/formats/lvmpv.py
index 65acedbe..51fa4a3c 100644
--- a/blivet/formats/lvmpv.py
+++ b/blivet/formats/lvmpv.py
@@ -33,7 +33,7 @@ from ..devicelibs import lvm
from ..tasks import availability, pvtask
from ..i18n import N_
from ..size import Size
-from ..errors import PhysicalVolumeError
+from ..errors import DeviceFormatError, PhysicalVolumeError
from . import DeviceFormat, register_device_format
from .. import udev
from ..static_data.lvm_info import pvs_info, vgs_info
@@ -98,6 +98,9 @@ class LVMPhysicalVolume(DeviceFormat):
self.inconsistent_vg = False
+ # when set to True, blivet will try to resize the PV to fill all available space
+ self._grow_to_fill = False
+
def __repr__(self):
s = DeviceFormat.__repr__(self)
s += (" vg_name = %(vg_name)s vg_uuid = %(vg_uuid)s"
@@ -106,6 +109,24 @@ class LVMPhysicalVolume(DeviceFormat):
"pe_start": self.pe_start, "data_alignment": self.data_alignment})
return s
+ @property
+ def grow_to_fill(self):
+ """
+ Can be set to True to mark format for resize so it matches size of its device.
+ (Main usecase is disk size increase on VM)
+ Uses blockdev/lvm for exact new size calculation.
+ ActionResizeFormat has to be executed to apply the change.
+ Format has to be resizable (i.e. run format.update_size_info() first) to allow this.
+ """
+ return self._grow_to_fill
+
+ @grow_to_fill.setter
+ def grow_to_fill(self, fill: bool):
+ if fill is True:
+ if not self.resizable:
+ raise DeviceFormatError("format is not resizable")
+ self._grow_to_fill = fill
+
@property
def dict(self):
d = super(LVMPhysicalVolume, self).dict
diff --git a/blivet/tasks/pvtask.py b/blivet/tasks/pvtask.py
index 04c8a4d1..b5bd72e0 100644
--- a/blivet/tasks/pvtask.py
+++ b/blivet/tasks/pvtask.py
@@ -82,6 +82,11 @@ class PVResize(task.BasicApplication, dfresize.DFResizeTask):
def do_task(self): # pylint: disable=arguments-differ
""" Resizes the LVMPV format. """
try:
- blockdev.lvm.pvresize(self.pv.device, self.pv.target_size.convert_to(self.unit))
+ if self.pv.grow_to_fill:
+ # resize PV to fill all available space on device by omitting
+ # the size parameter
+ blockdev.lvm.pvresize(self.pv.device, 0)
+ else:
+ blockdev.lvm.pvresize(self.pv.device, self.pv.target_size.convert_to(self.unit))
except blockdev.LVMError as e:
raise PhysicalVolumeError(e)
diff --git a/tests/storage_tests/formats_test/lvmpv_test.py b/tests/storage_tests/formats_test/lvmpv_test.py
index cdc33ec4..d2811f3e 100644
--- a/tests/storage_tests/formats_test/lvmpv_test.py
+++ b/tests/storage_tests/formats_test/lvmpv_test.py
@@ -37,6 +37,9 @@ class LVMPVTestCase(loopbackedtestcase.LoopBackedTestCase):
self.fmt.update_size_info()
self.assertTrue(self.fmt.resizable)
+ # save the pv maximum size
+ maxpvsize = self.fmt.current_size
+
# resize the format
new_size = Size("50 MiB")
self.fmt.target_size = new_size
@@ -46,5 +49,12 @@ class LVMPVTestCase(loopbackedtestcase.LoopBackedTestCase):
self.fmt.update_size_info()
self.assertEqual(self.fmt.current_size, new_size)
+ # Test growing PV to fill all available space on the device
+ self.fmt.grow_to_fill = True
+ self.fmt.do_resize()
+
+ self.fmt.update_size_info()
+ self.assertEqual(self.fmt.current_size, maxpvsize)
+
def _pvremove(self):
self.fmt._destroy()
--
2.45.0

File diff suppressed because it is too large Load Diff

@ -0,0 +1,76 @@
From c2e247fe953568a65c73f5408a6da7af12c4d6a1 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Jun 2024 14:47:39 +0200
Subject: [PATCH 1/2] tests: Try waiting after partition creation for XFS
resize test
The test randomly fails to find the newly created partition so
lets try waiting a bit with udev settle.
---
tests/skip.yml | 6 ------
tests/storage_tests/formats_test/fs_test.py | 2 ++
2 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/tests/skip.yml b/tests/skip.yml
index c0ca0eaf..8d353b1b 100644
--- a/tests/skip.yml
+++ b/tests/skip.yml
@@ -23,9 +23,3 @@
# - all "skips" can specified as a list, for example 'version: [10, 11]'
---
-
-- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
- skip_on:
- - distro: ["centos", "enterprise_linux"]
- version: "9"
- reason: "Creating partitions on loop devices is broken on CentOS/RHEL 9 latest kernel"
diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
index 1d42dc21..59c0f998 100644
--- a/tests/storage_tests/formats_test/fs_test.py
+++ b/tests/storage_tests/formats_test/fs_test.py
@@ -10,6 +10,7 @@ from blivet.errors import DeviceFormatError, FSError
from blivet.formats import get_format
from blivet.devices import PartitionDevice, DiskDevice
from blivet.flags import flags
+from blivet import udev
from .loopbackedtestcase import LoopBackedTestCase
@@ -107,6 +108,7 @@ class XFSTestCase(fstesting.FSAsRoot):
pend = pstart + int(Size(size) / disk.format.parted_device.sectorSize)
disk.format.add_partition(pstart, pend, parted.PARTITION_NORMAL)
disk.format.parted_disk.commit()
+ udev.settle()
part = disk.format.parted_disk.getPartitionBySector(pstart)
device = PartitionDevice(os.path.basename(part.path))
--
2.45.2
From 511d64c69618de0e7bb567353e5e0c92b61da10e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 7 Mar 2024 09:45:28 +0100
Subject: [PATCH 2/2] Fix util.detect_virt on Amazon
---
blivet/util.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/util.py b/blivet/util.py
index 3040ee5a..15d41b4f 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -1137,7 +1137,7 @@ def detect_virt():
except (safe_dbus.DBusCallError, safe_dbus.DBusPropertyError):
return False
else:
- return vm[0] in ('qemu', 'kvm', 'xen')
+ return vm[0] in ('qemu', 'kvm', 'xen', 'microsoft', 'amazon')
def natural_sort_key(device):
--
2.45.2

@ -23,7 +23,7 @@ Version: 3.6.0
#%%global prerelease .b2
# prerelease, if defined, should be something like .a1, .b1, .b2.dev1, or .c2
Release: 9%{?prerelease}%{?dist}
Release: 17%{?prerelease}%{?dist}
Epoch: 1
License: LGPLv2+
%global realname blivet
@ -46,6 +46,16 @@ Patch12: 0013-Fix-setting-kickstart-data.patch
Patch13: 0014-Do-not-set-memory-limit-for-LUKS2-when-running-in-FI.patch
Patch14: 0015-Add-support-for-filesystem-online-resize.patch
Patch15: 0016-Backport-iSCSI-initiator-name-related-fixes.patch
Patch16: 0017-nvme-additional-rpms-for-dracut.patch
Patch17: 0018-nvme-TP4126-fixes-1.patch
Patch18: 0019-nvme-hostnqn_from_active_fabrics_connection.patch
Patch19: 0020-nvme-add_unit_tests.patch
Patch20: 0021-Add-support-for-creating-shared-LVM-setups.patch
Patch21: 0022-add-udev-builtin-path_id-property-to-zfcp-attached-S.patch
Patch22: 0023-Do-not-add-new-PVs-to-the-LVM-devices-file-if-it-doe.patch
Patch23: 0024-Added-support-for-PV-grow.patch
Patch24: 0025-Stratis-fixes-backport.patch
Patch25: 0026-XFS-resize-test-fix.patch
# Versions of required components (done so we make sure the buildrequires
# match the requires versions of things).
@ -209,6 +219,42 @@ configuration.
%endif
%changelog
* Mon Jul 22 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-17
- Fix 'Try waiting after partition creation for XFS resize test'
Resolves: RHEL-8009
* Thu Jun 27 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-16
- tests: Try waiting after partition creation for XFS resize test
Resolves: RHEL-8009
* Thu May 16 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-15
- Backport fixes for Stratis support needed for storage role
Resolves: RHEL-35382
- Add support for resizing PVs to the size of the underlying block device
Resolves: RHEL-35386
* Fri Feb 09 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-14
- Do not add new PVs to the LVM devices file if it doesn't exist and VGs are present
Resolves: RHEL-473
* Thu Jan 18 2024 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-13
- add udev-builtin-path_id property to zfcp-attached SCSI disks
Resolves: RHEL-22007
* Wed Dec 13 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-12
- Add support for creating shared LVM setups
Resolves: RHEL-324
* Mon Dec 11 2023 Tomas Bzatek <tbzatek@redhat.com> - 3.6.0-11
- nvme: Retrieve HostNQN from a first active fabrics connection
- tests: Add a simple unit test for the NVMe module
Resolves: RHEL-11541
* Tue Sep 26 2023 Tomas Bzatek <tbzatek@redhat.com> - 3.6.0-10
- nvme: Require additional rpms for dracut
Resolves: RHEL-2855
- nvme: Align HostNQN and HostID format to TP-4126
Resolves: RHEL-1254
* Mon Jul 24 2023 Jan Pokorny <japokorn@redhat.com> - 3.6.0-9
Backport iSCSI initiator name related fixes:
@ -237,6 +283,9 @@ configuration.
- Add support for specifying stripe size for RAID LVs
Resolves: RHEL-327
* Fri Apr 14 2023 MSVSphere Packaging Team <packager@msvsphere.ru> - 3.6.0-5
- Rebuilt for MSVSphere 9.2 beta
* Thu Jan 19 2023 Vojtech Trefny <vtrefny@redhat.com> - 3.6.0-5
- Default to encryption sector size 512 for LUKS devices
Resolves: rhbz#2103800

Loading…
Cancel
Save