import python-blivet-3.6.0-5.el9

c9-beta imports/c9-beta/python-blivet-3.6.0-5.el9
CentOS Sources 2 years ago committed by MSVSphere Packaging Team
commit 4d75113c31

2
.gitignore vendored

@ -0,0 +1,2 @@
SOURCES/blivet-3.6.0-tests.tar.gz
SOURCES/blivet-3.6.0.tar.gz

@ -0,0 +1,2 @@
8393baa22cb433d1012e3923ad0bc232401116c6 SOURCES/blivet-3.6.0-tests.tar.gz
e9d95c1165703fed3da1f35a9199197bfff68f98 SOURCES/blivet-3.6.0.tar.gz

@ -0,0 +1,35 @@
From 2759aaa9cbee38f80819bc136bb893184429380c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 11 Jul 2018 15:36:24 +0200
Subject: [PATCH] Force command line based libblockdev LVM plugin
---
blivet/__init__.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index dd8d0f54..62cc539a 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -63,11 +63,16 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import GLib
from gi.repository import BlockDev as blockdev
if arch.is_s390():
- _REQUESTED_PLUGIN_NAMES = set(("lvm", "btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvdimm"))
+ _REQUESTED_PLUGIN_NAMES = set(("btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvdimm"))
else:
- _REQUESTED_PLUGIN_NAMES = set(("lvm", "btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
+ _REQUESTED_PLUGIN_NAMES = set(("btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
+# XXX force non-dbus LVM plugin
+lvm_plugin = blockdev.PluginSpec()
+lvm_plugin.name = blockdev.Plugin.LVM
+lvm_plugin.so_name = "libbd_lvm.so.2"
+_requested_plugins.append(lvm_plugin)
try:
# do not check for dependencies during libblockdev initializtion, do runtime
# checks instead
--
2.37.3

@ -0,0 +1,28 @@
From f27bdff18e98548f4c094b8cce23ca2d6270e30d Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Mon, 16 Jul 2018 14:26:11 +0200
Subject: [PATCH] Remove btrfs from requested libblockdev plugins
---
blivet/__init__.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index 62cc539a..bbc7ea3a 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -63,9 +63,9 @@ gi.require_version("BlockDev", "2.0")
from gi.repository import GLib
from gi.repository import BlockDev as blockdev
if arch.is_s390():
- _REQUESTED_PLUGIN_NAMES = set(("btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvdimm"))
+ _REQUESTED_PLUGIN_NAMES = set(("swap", "crypto", "loop", "mdraid", "mpath", "dm", "s390", "nvdimm"))
else:
- _REQUESTED_PLUGIN_NAMES = set(("btrfs", "swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
+ _REQUESTED_PLUGIN_NAMES = set(("swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
# XXX force non-dbus LVM plugin
--
2.37.3

@ -0,0 +1,330 @@
From b9021fde8ccdd14cbe192b6597f7ca350b4bb585 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 26 May 2021 12:15:54 +0200
Subject: [PATCH] Revert "More consistent lvm errors (API break)"
This reverts commit 49ec071c6d0673224a0774d613904387c52c7381.
---
blivet/devices/lvm.py | 72 +++++++++++------------
tests/unit_tests/devices_test/lvm_test.py | 14 ++---
2 files changed, 43 insertions(+), 43 deletions(-)
diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py
index 38e49e18..b8595d63 100644
--- a/blivet/devices/lvm.py
+++ b/blivet/devices/lvm.py
@@ -304,7 +304,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
def _add_log_vol(self, lv):
""" Add an LV to this VG. """
if lv in self._lvs:
- raise errors.DeviceError("lv is already part of this vg")
+ raise ValueError("lv is already part of this vg")
# verify we have the space, then add it
# do not verify for growing vg (because of ks)
@@ -337,7 +337,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
def _remove_log_vol(self, lv):
""" Remove an LV from this VG. """
if lv not in self.lvs:
- raise errors.DeviceError("specified lv is not part of this vg")
+ raise ValueError("specified lv is not part of this vg")
self._lvs.remove(lv)
@@ -430,7 +430,7 @@ class LVMVolumeGroupDevice(ContainerDevice):
@thpool_reserve.setter
def thpool_reserve(self, value):
if value is not None and not isinstance(value, ThPoolReserveSpec):
- raise AttributeError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec")
+ raise ValueError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec")
self._thpool_reserve = value
@property
@@ -665,14 +665,14 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
if seg_type not in [None, "linear", "thin", "thin-pool", "cache", "vdo-pool", "vdo", "cache-pool"] + lvm.raid_seg_types:
raise ValueError("Invalid or unsupported segment type: %s" % seg_type)
if seg_type and seg_type in lvm.raid_seg_types and not pvs:
- raise errors.DeviceError("List of PVs has to be given for every non-linear LV")
+ raise ValueError("List of PVs has to be given for every non-linear LV")
elif (not seg_type or seg_type == "linear") and pvs:
if not all(isinstance(pv, LVPVSpec) for pv in pvs):
- raise errors.DeviceError("Invalid specification of PVs for a linear LV: either no or complete "
- "specification (with all space split into PVs has to be given")
+ raise ValueError("Invalid specification of PVs for a linear LV: either no or complete "
+ "specification (with all space split into PVs has to be given")
elif sum(spec.size for spec in pvs) != size:
- raise errors.DeviceError("Invalid specification of PVs for a linear LV: the sum of space "
- "assigned to PVs is not equal to the size of the LV")
+ raise ValueError("Invalid specification of PVs for a linear LV: the sum of space "
+ "assigned to PVs is not equal to the size of the LV")
# When this device's format is set in the superclass constructor it will
# try to access self.snapshots.
@@ -721,13 +721,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
self._from_lvs = from_lvs
if self._from_lvs:
if exists:
- raise errors.DeviceError("Only new LVs can be created from other LVs")
+ raise ValueError("Only new LVs can be created from other LVs")
if size or maxsize or percent:
- raise errors.DeviceError("Cannot specify size for a converted LV")
+ raise ValueError("Cannot specify size for a converted LV")
if fmt:
- raise errors.DeviceError("Cannot specify format for a converted LV")
+ raise ValueError("Cannot specify format for a converted LV")
if any(lv.vg != self.vg for lv in self._from_lvs):
- raise errors.DeviceError("Conversion of LVs only possible inside a VG")
+ raise ValueError("Conversion of LVs only possible inside a VG")
self._cache = None
if cache_request and not self.exists:
@@ -746,13 +746,13 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
elif isinstance(pv_spec, StorageDevice):
self._pv_specs.append(LVPVSpec(pv_spec, Size(0)))
else:
- raise AttributeError("Invalid PV spec '%s' for the '%s' LV" % (pv_spec, self.name))
+ raise ValueError("Invalid PV spec '%s' for the '%s' LV" % (pv_spec, self.name))
# Make sure any destination PVs are actually PVs in this VG
if not set(spec.pv for spec in self._pv_specs).issubset(set(self.vg.parents)):
missing = [r.name for r in
set(spec.pv for spec in self._pv_specs).difference(set(self.vg.parents))]
msg = "invalid destination PV(s) %s for LV %s" % (missing, self.name)
- raise errors.DeviceError(msg)
+ raise ValueError(msg)
if self._pv_specs:
self._assign_pv_space()
@@ -1130,7 +1130,7 @@ class LVMLogicalVolumeBase(DMDevice, RaidDevice):
else:
msg = "the specified internal LV '%s' doesn't belong to this LV ('%s')" % (int_lv.lv_name,
self.name)
- raise errors.DeviceError(msg)
+ raise ValueError(msg)
def populate_ksdata(self, data):
super(LVMLogicalVolumeBase, self).populate_ksdata(data)
@@ -1229,7 +1229,7 @@ class LVMInternalLogicalVolumeMixin(object):
def _init_check(self):
# an internal LV should have no parents
if self._parent_lv and self._parents:
- raise errors.DeviceError("an internal LV should have no parents")
+ raise ValueError("an internal LV should have no parents")
@property
def is_internal_lv(self):
@@ -1289,7 +1289,7 @@ class LVMInternalLogicalVolumeMixin(object):
@readonly.setter
def readonly(self, value): # pylint: disable=unused-argument
- raise errors.DeviceError("Cannot make an internal LV read-write")
+ raise ValueError("Cannot make an internal LV read-write")
@property
def type(self):
@@ -1325,7 +1325,7 @@ class LVMInternalLogicalVolumeMixin(object):
def _check_parents(self):
# an internal LV should have no parents
if self._parents:
- raise errors.DeviceError("an internal LV should have no parents")
+ raise ValueError("an internal LV should have no parents")
def _add_to_parents(self):
# nothing to do here, an internal LV has no parents (in the DeviceTree's
@@ -1335,13 +1335,13 @@ class LVMInternalLogicalVolumeMixin(object):
# internal LVs follow different rules limitting size
def _set_size(self, newsize):
if not isinstance(newsize, Size):
- raise AttributeError("new size must of type Size")
+ raise ValueError("new size must of type Size")
if not self.takes_extra_space:
if newsize <= self.parent_lv.size: # pylint: disable=no-member
self._size = newsize # pylint: disable=attribute-defined-outside-init
else:
- raise errors.DeviceError("Internal LV cannot be bigger than its parent LV")
+ raise ValueError("Internal LV cannot be bigger than its parent LV")
else:
# same rules apply as for any other LV
raise NotTypeSpecific()
@@ -1419,18 +1419,18 @@ class LVMSnapshotMixin(object):
return
if self.origin and not isinstance(self.origin, LVMLogicalVolumeDevice):
- raise errors.DeviceError("lvm snapshot origin must be a logical volume")
+ raise ValueError("lvm snapshot origin must be a logical volume")
if self.vorigin and not self.exists:
- raise errors.DeviceError("only existing vorigin snapshots are supported")
+ raise ValueError("only existing vorigin snapshots are supported")
if isinstance(self.origin, LVMLogicalVolumeDevice) and \
isinstance(self.parents[0], LVMVolumeGroupDevice) and \
self.origin.vg != self.parents[0]:
- raise errors.DeviceError("lvm snapshot and origin must be in the same vg")
+ raise ValueError("lvm snapshot and origin must be in the same vg")
if self.is_thin_lv:
if self.origin and self.size and not self.exists:
- raise errors.DeviceError("thin snapshot size is determined automatically")
+ raise ValueError("thin snapshot size is determined automatically")
@property
def is_snapshot_lv(self):
@@ -1606,7 +1606,7 @@ class LVMThinPoolMixin(object):
def _check_from_lvs(self):
if self._from_lvs:
if len(self._from_lvs) != 2:
- raise errors.DeviceError("two LVs required to create a thin pool")
+ raise ValueError("two LVs required to create a thin pool")
def _convert_from_lvs(self):
data_lv, metadata_lv = self._from_lvs
@@ -1652,7 +1652,7 @@ class LVMThinPoolMixin(object):
def _add_log_vol(self, lv):
""" Add an LV to this pool. """
if lv in self._lvs:
- raise errors.DeviceError("lv is already part of this vg")
+ raise ValueError("lv is already part of this vg")
# TODO: add some checking to prevent overcommit for preexisting
self.vg._add_log_vol(lv)
@@ -1663,7 +1663,7 @@ class LVMThinPoolMixin(object):
def _remove_log_vol(self, lv):
""" Remove an LV from this pool. """
if lv not in self._lvs:
- raise errors.DeviceError("specified lv is not part of this vg")
+ raise ValueError("specified lv is not part of this vg")
self._lvs.remove(lv)
self.vg._remove_log_vol(lv)
@@ -1772,14 +1772,14 @@ class LVMThinLogicalVolumeMixin(object):
"""Check that this device has parents as expected"""
if isinstance(self.parents, (list, ParentList)):
if len(self.parents) != 1:
- raise errors.DeviceError("constructor requires a single thin-pool LV")
+ raise ValueError("constructor requires a single thin-pool LV")
container = self.parents[0]
else:
container = self.parents
if not container or not isinstance(container, LVMLogicalVolumeDevice) or not container.is_thin_pool:
- raise errors.DeviceError("constructor requires a thin-pool LV")
+ raise ValueError("constructor requires a thin-pool LV")
@property
def is_thin_lv(self):
@@ -1816,7 +1816,7 @@ class LVMThinLogicalVolumeMixin(object):
def _set_size(self, newsize):
if not isinstance(newsize, Size):
- raise AttributeError("new size must of type Size")
+ raise ValueError("new size must of type Size")
newsize = self.vg.align(newsize)
newsize = self.vg.align(util.numeric_type(newsize))
@@ -2499,7 +2499,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
container = self.parents
if not isinstance(container, LVMVolumeGroupDevice):
- raise AttributeError("constructor requires a LVMVolumeGroupDevice")
+ raise ValueError("constructor requires a LVMVolumeGroupDevice")
@type_specific
def _add_to_parents(self):
@@ -2510,12 +2510,12 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@type_specific
def _check_from_lvs(self):
"""Check the LVs to create this LV from"""
- raise errors.DeviceError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
+ raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
@type_specific
def _convert_from_lvs(self):
"""Convert the LVs to create this LV from into its internal LVs"""
- raise errors.DeviceError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
+ raise ValueError("Cannot create a new LV of type '%s' from other LVs" % self.seg_type)
@property
def external_dependencies(self):
@@ -2535,7 +2535,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
@type_specific
def _set_size(self, newsize):
if not isinstance(newsize, Size):
- raise AttributeError("new size must be of type Size")
+ raise ValueError("new size must be of type Size")
newsize = self.vg.align(newsize)
log.debug("trying to set lv %s size to %s", self.name, newsize)
@@ -2544,7 +2544,7 @@ class LVMLogicalVolumeDevice(LVMLogicalVolumeBase, LVMInternalLogicalVolumeMixin
# space for it. A similar reasoning applies to shrinking the LV.
if not self.exists and newsize > self.size and newsize > self.vg.free_space + self.vg_space_used:
log.error("failed to set size: %s short", newsize - (self.vg.free_space + self.vg_space_used))
- raise errors.DeviceError("not enough free space in volume group")
+ raise ValueError("not enough free space in volume group")
LVMLogicalVolumeBase._set_size(self, newsize)
@@ -2910,7 +2910,7 @@ class LVMCache(Cache):
spec.size = spec.pv.format.free
space_to_assign -= spec.pv.format.free
if space_to_assign > 0:
- raise errors.DeviceError("Not enough free space in the PVs for this cache: %s short" % space_to_assign)
+ raise ValueError("Not enough free space in the PVs for this cache: %s short" % space_to_assign)
@property
def size(self):
diff --git a/tests/unit_tests/devices_test/lvm_test.py b/tests/unit_tests/devices_test/lvm_test.py
index 47613fdc..995c2da4 100644
--- a/tests/unit_tests/devices_test/lvm_test.py
+++ b/tests/unit_tests/devices_test/lvm_test.py
@@ -32,10 +32,10 @@ class LVMDeviceTest(unittest.TestCase):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg],
fmt=blivet.formats.get_format("xfs"))
- with six.assertRaisesRegex(self, errors.DeviceError, "lvm snapshot origin must be a logical volume"):
+ with six.assertRaisesRegex(self, ValueError, "lvm snapshot origin must be a logical volume"):
LVMLogicalVolumeDevice("snap1", parents=[vg], origin=pv)
- with six.assertRaisesRegex(self, errors.DeviceError, "only existing vorigin snapshots are supported"):
+ with six.assertRaisesRegex(self, ValueError, "only existing vorigin snapshots are supported"):
LVMLogicalVolumeDevice("snap1", parents=[vg], vorigin=True)
lv.exists = True
@@ -60,7 +60,7 @@ class LVMDeviceTest(unittest.TestCase):
pool = LVMLogicalVolumeDevice("pool1", parents=[vg], size=Size("500 MiB"), seg_type="thin-pool")
thinlv = LVMLogicalVolumeDevice("thinlv", parents=[pool], size=Size("200 MiB"), seg_type="thin")
- with six.assertRaisesRegex(self, errors.DeviceError, "lvm snapshot origin must be a logical volume"):
+ with six.assertRaisesRegex(self, ValueError, "lvm snapshot origin must be a logical volume"):
LVMLogicalVolumeDevice("snap1", parents=[pool], origin=pv, seg_type="thin")
# now make the constructor succeed so we can test some properties
@@ -310,21 +310,21 @@ class LVMDeviceTest(unittest.TestCase):
vg = LVMVolumeGroupDevice("testvg", parents=[pv, pv2])
# pvs have to be specified for non-linear LVs
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
fmt=blivet.formats.get_format("xfs"),
exists=False, seg_type="raid1")
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
fmt=blivet.formats.get_format("xfs"),
exists=False, seg_type="striped")
# no or complete specification has to be given for linear LVs
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
fmt=blivet.formats.get_format("xfs"),
exists=False, pvs=[pv])
- with self.assertRaises(errors.DeviceError):
+ with self.assertRaises(ValueError):
pv_spec = LVPVSpec(pv, Size("256 MiB"))
pv_spec2 = LVPVSpec(pv2, Size("250 MiB"))
lv = LVMLogicalVolumeDevice("testlv", parents=[vg], size=Size("512 MiB"),
--
2.37.3

@ -0,0 +1,86 @@
From 4ad6f485a1e569feb5fd23ffcf78e08a7756e084 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:21 +0200
Subject: [PATCH 1/2] Use MD populator instead of DM to handle DDF RAID format
---
blivet/formats/dmraid.py | 2 +-
blivet/formats/mdraid.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/blivet/formats/dmraid.py b/blivet/formats/dmraid.py
index 2ba9dcfe..ce15905d 100644
--- a/blivet/formats/dmraid.py
+++ b/blivet/formats/dmraid.py
@@ -43,7 +43,7 @@ class DMRaidMember(DeviceFormat):
#
# One problem that presents is the possibility of someone passing
# a dmraid member to the MDRaidArrayDevice constructor.
- _udev_types = ["adaptec_raid_member", "ddf_raid_member",
+ _udev_types = ["adaptec_raid_member",
"hpt37x_raid_member", "hpt45x_raid_member",
"isw_raid_member",
"jmicron_raid_member", "lsi_mega_raid_member",
diff --git a/blivet/formats/mdraid.py b/blivet/formats/mdraid.py
index 41ddef81..4aa3f3b0 100644
--- a/blivet/formats/mdraid.py
+++ b/blivet/formats/mdraid.py
@@ -41,7 +41,7 @@ class MDRaidMember(DeviceFormat):
""" An mdraid member disk. """
_type = "mdmember"
_name = N_("software RAID")
- _udev_types = ["linux_raid_member"]
+ _udev_types = ["linux_raid_member", "ddf_raid_member"]
parted_flag = PARTITION_RAID
_formattable = True # can be formatted
_supported = True # is supported
--
2.37.3
From abc7e018f43976cdab286d67207d515a74693d16 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 17 Aug 2022 14:24:58 +0200
Subject: [PATCH 2/2] Do not read DDF RAID UUID from udev
The UUID we get from udev isn't the array UUID, we need to get
that using libblockdev.
---
blivet/populator/helpers/mdraid.py | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/blivet/populator/helpers/mdraid.py b/blivet/populator/helpers/mdraid.py
index 3479e3f7..a7602d20 100644
--- a/blivet/populator/helpers/mdraid.py
+++ b/blivet/populator/helpers/mdraid.py
@@ -98,17 +98,21 @@ class MDFormatPopulator(FormatPopulator):
def _get_kwargs(self):
kwargs = super(MDFormatPopulator, self)._get_kwargs()
- try:
- # ID_FS_UUID contains the array UUID
- kwargs["md_uuid"] = udev.device_get_uuid(self.data)
- except KeyError:
- log.warning("mdraid member %s has no md uuid", udev.device_get_name(self.data))
+ kwargs["biosraid"] = udev.device_is_biosraid_member(self.data)
+ if not kwargs["biosraid"]:
+ try:
+ # ID_FS_UUID contains the array UUID
+ kwargs["md_uuid"] = udev.device_get_uuid(self.data)
+ except KeyError:
+ log.warning("mdraid member %s has no md uuid", udev.device_get_name(self.data))
+ else:
+ # for BIOS RAIDs we can't get the UUID from udev, we'll get it from mdadm in `run` below
+ kwargs["md_uuid"] = None
# reset the uuid to the member-specific value
# this will be None for members of v0 metadata arrays
kwargs["uuid"] = udev.device_get_md_device_uuid(self.data)
- kwargs["biosraid"] = udev.device_is_biosraid_member(self.data)
return kwargs
def run(self):
--
2.37.3

@ -0,0 +1,77 @@
From 789dd296988aa9da17d97ece1efc33f9e232648e Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 13 Oct 2022 10:47:52 +0200
Subject: [PATCH] Revert "Remove the Blivet.roots attribute"
This reverts commit 19a826073345ca6b57a8f9a95ec855892320300e.
---
blivet/blivet.py | 21 +++++++++++++++++++++
blivet/devicefactory.py | 3 +++
2 files changed, 24 insertions(+)
diff --git a/blivet/blivet.py b/blivet/blivet.py
index bf72ee9c..dc066b03 100644
--- a/blivet/blivet.py
+++ b/blivet/blivet.py
@@ -88,6 +88,7 @@ class Blivet(object):
self.devicetree = DeviceTree(ignored_disks=self.ignored_disks,
exclusive_disks=self.exclusive_disks,
disk_images=self.disk_images)
+ self.roots = []
@property
def short_product_name(self):
@@ -1314,5 +1315,25 @@ class Blivet(object):
p = partition.disk.format.parted_disk.getPartitionByPath(partition.path)
partition.parted_partition = p
+ for root in new.roots:
+ root.swaps = [new.devicetree.get_device_by_id(d.id, hidden=True) for d in root.swaps]
+ root.swaps = [s for s in root.swaps if s]
+
+ removed = set()
+ for (mountpoint, old_dev) in root.mounts.items():
+ if old_dev is None:
+ continue
+
+ new_dev = new.devicetree.get_device_by_id(old_dev.id, hidden=True)
+ if new_dev is None:
+ # if the device has been removed don't include this
+ # mountpoint at all
+ removed.add(mountpoint)
+ else:
+ root.mounts[mountpoint] = new_dev
+
+ for mnt in removed:
+ del root.mounts[mnt]
+
log.debug("finished Blivet copy")
return new
diff --git a/blivet/devicefactory.py b/blivet/devicefactory.py
index 8105bfc7..6f460f6d 100644
--- a/blivet/devicefactory.py
+++ b/blivet/devicefactory.py
@@ -383,6 +383,7 @@ class DeviceFactory(object):
# used for error recovery
self.__devices = []
self.__actions = []
+ self.__roots = []
def _is_container_encrypted(self):
return all(isinstance(p, LUKSDevice) for p in self.device.container.parents)
@@ -994,10 +995,12 @@ class DeviceFactory(object):
_blivet_copy = self.storage.copy()
self.__devices = _blivet_copy.devicetree._devices
self.__actions = _blivet_copy.devicetree._actions
+ self.__roots = _blivet_copy.roots
def _revert_devicetree(self):
self.storage.devicetree._devices = self.__devices
self.storage.devicetree._actions = self.__actions
+ self.storage.roots = self.__roots
class PartitionFactory(DeviceFactory):
--
2.37.3

@ -0,0 +1,45 @@
From 7931a74e691979dd23a16e7a017b4ef5bc296b79 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Oct 2022 12:28:37 +0200
Subject: [PATCH] Fix potential AttributeError when getting stratis blockdev
info
---
blivet/static_data/stratis_info.py | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/blivet/static_data/stratis_info.py b/blivet/static_data/stratis_info.py
index bd1c5a18..42f230ee 100644
--- a/blivet/static_data/stratis_info.py
+++ b/blivet/static_data/stratis_info.py
@@ -124,20 +124,22 @@ class StratisInfo(object):
log.error("Failed to get DBus properties of '%s'", blockdev_path)
return None
+ blockdev_uuid = str(uuid.UUID(properties["Uuid"]))
+
pool_path = properties["Pool"]
if pool_path == "/":
pool_name = ""
+ return StratisBlockdevInfo(path=properties["Devnode"], uuid=blockdev_uuid,
+ pool_name="", pool_uuid="", object_path=blockdev_path)
else:
pool_info = self._get_pool_info(properties["Pool"])
if not pool_info:
return None
pool_name = pool_info.name
- blockdev_uuid = str(uuid.UUID(properties["Uuid"]))
-
- return StratisBlockdevInfo(path=properties["Devnode"], uuid=blockdev_uuid,
- pool_name=pool_name, pool_uuid=pool_info.uuid,
- object_path=blockdev_path)
+ return StratisBlockdevInfo(path=properties["Devnode"], uuid=blockdev_uuid,
+ pool_name=pool_name, pool_uuid=pool_info.uuid,
+ object_path=blockdev_path)
def _get_locked_pools_info(self):
locked_pools = []
--
2.37.3

@ -0,0 +1,27 @@
From b747c4ed07937f54a546ffb2f2c8c95e0797dd6c Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 20 Oct 2022 15:19:29 +0200
Subject: [PATCH] tests: Skip XFS resize test on CentOS/RHEL 9
Partitions on loop devices are broken on CentOS/RHEL 9.
---
tests/skip.yml | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/tests/skip.yml b/tests/skip.yml
index 568c3fff..66b34493 100644
--- a/tests/skip.yml
+++ b/tests/skip.yml
@@ -29,3 +29,9 @@
- distro: "centos"
version: "9"
reason: "Creating RAID 1 LV on CentOS/RHEL 9 causes a system deadlock"
+
+- test: storage_tests.formats_test.fs_test.XFSTestCase.test_resize
+ skip_on:
+ - distro: ["centos", "enterprise_linux"]
+ version: "9"
+ reason: "Creating partitions on loop devices is broken on CentOS/RHEL 9 latest kernel"
--
2.37.3

@ -0,0 +1,160 @@
From 9618b84f94187efddc7316c2546bed923a91ecf9 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 3 Nov 2022 08:36:27 +0100
Subject: [PATCH 1/2] Revert "Set XFS minimal size to 300 MiB"
This reverts commit 307d49833771d161314bae50c68e70dc35c3bb36.
---
blivet/formats/fs.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/blivet/formats/fs.py b/blivet/formats/fs.py
index 8c346aa5..33922f3a 100644
--- a/blivet/formats/fs.py
+++ b/blivet/formats/fs.py
@@ -1091,7 +1091,7 @@ class XFS(FS):
_modules = ["xfs"]
_labelfs = fslabeling.XFSLabeling()
_uuidfs = fsuuid.XFSUUID()
- _min_size = Size("300 MiB")
+ _min_size = Size("16 MiB")
_max_size = Size("16 EiB")
_formattable = True
_linux_native = True
--
2.38.1
From 24d94922d6879baa85aaa101f6b21efa568a9cbc Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 3 Nov 2022 08:36:39 +0100
Subject: [PATCH 2/2] Revert "tests: Create bigger devices for XFS testing"
This reverts commit 467cb8024010b2cabb1e92d9e64f6d3cbe949ad9.
---
tests/storage_tests/formats_test/fs_test.py | 7 +++----
tests/storage_tests/formats_test/fslabeling.py | 4 +---
tests/storage_tests/formats_test/fsuuid.py | 4 +---
tests/storage_tests/formats_test/labeling_test.py | 2 --
tests/storage_tests/formats_test/uuid_test.py | 3 ---
5 files changed, 5 insertions(+), 15 deletions(-)
diff --git a/tests/storage_tests/formats_test/fs_test.py b/tests/storage_tests/formats_test/fs_test.py
index cf8fb441..97f4cbbe 100644
--- a/tests/storage_tests/formats_test/fs_test.py
+++ b/tests/storage_tests/formats_test/fs_test.py
@@ -54,7 +54,6 @@ class ReiserFSTestCase(fstesting.FSAsRoot):
class XFSTestCase(fstesting.FSAsRoot):
_fs_class = fs.XFS
- _DEVICE_SIZE = Size("500 MiB")
def can_resize(self, an_fs):
resize_tasks = (an_fs._resize, an_fs._size_info)
@@ -96,12 +95,12 @@ class XFSTestCase(fstesting.FSAsRoot):
self.assertFalse(an_fs.resizable)
# Not resizable, so can not do resizing actions.
with self.assertRaises(DeviceFormatError):
- an_fs.target_size = Size("300 MiB")
+ an_fs.target_size = Size("64 MiB")
with self.assertRaises(DeviceFormatError):
an_fs.do_resize()
else:
disk = DiskDevice(os.path.basename(self.loop_devices[0]))
- part = self._create_partition(disk, Size("300 MiB"))
+ part = self._create_partition(disk, Size("50 MiB"))
an_fs = self._fs_class()
an_fs.device = part.path
self.assertIsNone(an_fs.create())
@@ -114,7 +113,7 @@ class XFSTestCase(fstesting.FSAsRoot):
part = self._create_partition(disk, size=part.size + Size("40 MiB"))
# Try a reasonable target size
- TARGET_SIZE = Size("325 MiB")
+ TARGET_SIZE = Size("64 MiB")
an_fs.target_size = TARGET_SIZE
self.assertEqual(an_fs.target_size, TARGET_SIZE)
self.assertNotEqual(an_fs._size, TARGET_SIZE)
diff --git a/tests/storage_tests/formats_test/fslabeling.py b/tests/storage_tests/formats_test/fslabeling.py
index ebe0b70a..0e0dc261 100644
--- a/tests/storage_tests/formats_test/fslabeling.py
+++ b/tests/storage_tests/formats_test/fslabeling.py
@@ -21,10 +21,8 @@ class LabelingAsRoot(loopbackedtestcase.LoopBackedTestCase):
_invalid_label = abc.abstractproperty(
doc="A label which is invalid for this filesystem.")
- _DEVICE_SIZE = Size("100 MiB")
-
def __init__(self, methodName='run_test'):
- super(LabelingAsRoot, self).__init__(methodName=methodName, device_spec=[self._DEVICE_SIZE])
+ super(LabelingAsRoot, self).__init__(methodName=methodName, device_spec=[Size("100 MiB")])
def setUp(self):
an_fs = self._fs_class()
diff --git a/tests/storage_tests/formats_test/fsuuid.py b/tests/storage_tests/formats_test/fsuuid.py
index 0b9762fd..16aa19a6 100644
--- a/tests/storage_tests/formats_test/fsuuid.py
+++ b/tests/storage_tests/formats_test/fsuuid.py
@@ -23,11 +23,9 @@ class SetUUID(loopbackedtestcase.LoopBackedTestCase):
_invalid_uuid = abc.abstractproperty(
doc="An invalid UUID for this filesystem.")
- _DEVICE_SIZE = Size("100 MiB")
-
def __init__(self, methodName='run_test'):
super(SetUUID, self).__init__(methodName=methodName,
- device_spec=[self._DEVICE_SIZE])
+ device_spec=[Size("100 MiB")])
def setUp(self):
an_fs = self._fs_class()
diff --git a/tests/storage_tests/formats_test/labeling_test.py b/tests/storage_tests/formats_test/labeling_test.py
index 0702260a..d24e6619 100644
--- a/tests/storage_tests/formats_test/labeling_test.py
+++ b/tests/storage_tests/formats_test/labeling_test.py
@@ -1,7 +1,6 @@
import unittest
from blivet.formats import device_formats
-from blivet.size import Size
import blivet.formats.fs as fs
import blivet.formats.swap as swap
@@ -62,7 +61,6 @@ class InitializationTestCase(unittest.TestCase):
class XFSTestCase(fslabeling.CompleteLabelingAsRoot):
_fs_class = fs.XFS
_invalid_label = "root filesystem"
- _DEVICE_SIZE = Size("500 MiB")
class FATFSTestCase(fslabeling.CompleteLabelingAsRoot):
diff --git a/tests/storage_tests/formats_test/uuid_test.py b/tests/storage_tests/formats_test/uuid_test.py
index af35c0ee..ee8d452e 100644
--- a/tests/storage_tests/formats_test/uuid_test.py
+++ b/tests/storage_tests/formats_test/uuid_test.py
@@ -2,7 +2,6 @@ import unittest
import blivet.formats.fs as fs
import blivet.formats.swap as swap
-from blivet.size import Size
from . import fsuuid
@@ -53,14 +52,12 @@ class XFSTestCase(fsuuid.SetUUIDWithMkFs):
_fs_class = fs.XFS
_invalid_uuid = "abcdefgh-ijkl-mnop-qrst-uvwxyz123456"
_valid_uuid = "97e3d40f-dca8-497d-8b86-92f257402465"
- _DEVICE_SIZE = Size("500 MiB")
class XFSAfterTestCase(fsuuid.SetUUIDAfterMkFs):
_fs_class = fs.XFS
_invalid_uuid = "abcdefgh-ijkl-mnop-qrst-uvwxyz123456"
_valid_uuid = "97e3d40f-dca8-497d-8b86-92f257402465"
- _DEVICE_SIZE = Size("500 MiB")
class FATFSTestCase(fsuuid.SetUUIDWithMkFs):
--
2.38.1

@ -0,0 +1,55 @@
From fed62af06eb1584adbacd821dfe79c2df52c6aa4 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 2 Nov 2022 12:14:28 +0100
Subject: [PATCH] Catch BlockDevNotImplementedError for btrfs plugin calls
This is a workaround for RHEL where the btrfs plugin is not
available and where we might still try to call some libblockdev
functions to gather information about preexisting btrfs devices.
---
blivet/devices/btrfs.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py
index 0e029715..1ae6a04d 100644
--- a/blivet/devices/btrfs.py
+++ b/blivet/devices/btrfs.py
@@ -362,7 +362,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
try:
subvols = blockdev.btrfs.list_subvolumes(mountpoint,
snapshots_only=snapshots_only)
- except blockdev.BtrfsError as e:
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
log.debug("failed to list subvolumes: %s", e)
else:
self._get_default_subvolume_id()
@@ -400,7 +400,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
with self._do_temp_mount() as mountpoint:
try:
subvolid = blockdev.btrfs.get_default_subvolume_id(mountpoint)
- except blockdev.BtrfsError as e:
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
log.debug("failed to get default subvolume id: %s", e)
self._default_subvolume_id = subvolid
@@ -413,7 +413,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
with self._do_temp_mount() as mountpoint:
try:
blockdev.btrfs.set_default_subvolume(mountpoint, vol_id)
- except blockdev.BtrfsError as e:
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
log.error("failed to set new default subvolume id (%s): %s",
vol_id, e)
# The only time we set a new default subvolume is so we can remove
@@ -471,7 +471,7 @@ class BTRFSVolumeDevice(BTRFSDevice, ContainerDevice, RaidDevice):
if not self.format.vol_uuid:
try:
bd_info = blockdev.btrfs.filesystem_info(self.parents[0].path)
- except blockdev.BtrfsError as e:
+ except (blockdev.BtrfsError, blockdev.BlockDevNotImplementedError) as e:
log.error("failed to get filesystem info for new btrfs volume %s", e)
else:
self.format.vol_uuid = bd_info.uuid
--
2.38.1

@ -0,0 +1,590 @@
From 9383855c8a15e6d7c4033cd8d7ae8310b462d166 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Tue, 18 Oct 2022 10:38:00 +0200
Subject: [PATCH 1/3] Add a basic support for NVMe and NVMe Fabrics devices
This adds two new device types: NVMeNamespaceDevice and
NVMeFabricsNamespaceDevice mostly to allow to differentiate
between "local" and "remote" NVMe devices. The new libblockdev
NVMe plugin is required for full functionality.
---
blivet/__init__.py | 6 +-
blivet/devices/__init__.py | 2 +-
blivet/devices/disk.py | 101 ++++++++++++++++++++++
blivet/devices/lib.py | 1 +
blivet/populator/helpers/__init__.py | 2 +-
blivet/populator/helpers/disk.py | 64 ++++++++++++++
blivet/udev.py | 33 +++++++
blivet/util.py | 9 ++
tests/unit_tests/populator_test.py | 124 +++++++++++++++++++++++++++
9 files changed, 339 insertions(+), 3 deletions(-)
diff --git a/blivet/__init__.py b/blivet/__init__.py
index bbc7ea3a..3b9e659e 100644
--- a/blivet/__init__.py
+++ b/blivet/__init__.py
@@ -67,6 +67,10 @@ if arch.is_s390():
else:
_REQUESTED_PLUGIN_NAMES = set(("swap", "crypto", "loop", "mdraid", "mpath", "dm", "nvdimm"))
+# nvme plugin is not generally available
+if hasattr(blockdev.Plugin, "NVME"):
+ _REQUESTED_PLUGIN_NAMES.add("nvme")
+
_requested_plugins = blockdev.plugin_specs_from_names(_REQUESTED_PLUGIN_NAMES)
# XXX force non-dbus LVM plugin
lvm_plugin = blockdev.PluginSpec()
@@ -74,7 +78,7 @@ lvm_plugin.name = blockdev.Plugin.LVM
lvm_plugin.so_name = "libbd_lvm.so.2"
_requested_plugins.append(lvm_plugin)
try:
- # do not check for dependencies during libblockdev initializtion, do runtime
+ # do not check for dependencies during libblockdev initialization, do runtime
# checks instead
blockdev.switch_init_checks(False)
succ_, avail_plugs = blockdev.try_reinit(require_plugins=_requested_plugins, reload=False, log_func=log_bd_message)
diff --git a/blivet/devices/__init__.py b/blivet/devices/__init__.py
index 8bb0a979..4d16466e 100644
--- a/blivet/devices/__init__.py
+++ b/blivet/devices/__init__.py
@@ -22,7 +22,7 @@
from .lib import device_path_to_name, device_name_to_disk_by_path, ParentList
from .device import Device
from .storage import StorageDevice
-from .disk import DiskDevice, DiskFile, DMRaidArrayDevice, MultipathDevice, iScsiDiskDevice, FcoeDiskDevice, DASDDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice
+from .disk import DiskDevice, DiskFile, DMRaidArrayDevice, MultipathDevice, iScsiDiskDevice, FcoeDiskDevice, DASDDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice, NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from .partition import PartitionDevice
from .dm import DMDevice, DMLinearDevice, DMCryptDevice, DMIntegrityDevice, DM_MAJORS
from .luks import LUKSDevice, IntegrityDevice
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index bc4a1b5e..b5e25939 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -22,10 +22,13 @@
import gi
gi.require_version("BlockDev", "2.0")
+gi.require_version("GLib", "2.0")
from gi.repository import BlockDev as blockdev
+from gi.repository import GLib
import os
+from collections import namedtuple
from .. import errors
from .. import util
@@ -725,3 +728,101 @@ class NVDIMMNamespaceDevice(DiskDevice):
@property
def sector_size(self):
return self._sector_size
+
+
+NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn"])
+
+
+class NVMeNamespaceDevice(DiskDevice):
+
+ """ NVMe namespace """
+ _type = "nvme"
+ _packages = ["nvme-cli"]
+
+ def __init__(self, device, **kwargs):
+ """
+ :param name: the device name (generally a device node's basename)
+ :type name: str
+ :keyword exists: does this device exist?
+ :type exists: bool
+ :keyword size: the device's size
+ :type size: :class:`~.size.Size`
+ :keyword parents: a list of parent devices
+ :type parents: list of :class:`StorageDevice`
+ :keyword format: this device's formatting
+ :type format: :class:`~.formats.DeviceFormat` or a subclass of it
+ :keyword nsid: namespace ID
+ :type nsid: int
+ """
+ self.nsid = kwargs.pop("nsid", 0)
+
+ DiskDevice.__init__(self, device, **kwargs)
+
+ self._clear_local_tags()
+ self.tags.add(Tags.local)
+ self.tags.add(Tags.nvme)
+
+ self._controllers = None
+
+ @property
+ def controllers(self):
+ if self._controllers is not None:
+ return self._controllers
+
+ self._controllers = []
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ log.debug("Failed to get controllers for %s: libblockdev NVME plugin is not available", self.name)
+ return self._controllers
+
+ try:
+ controllers = blockdev.nvme_find_ctrls_for_ns(self.sysfs_path)
+ except GLib.GError as err:
+ log.debug("Failed to get controllers for %s: %s", self.name, str(err))
+ return self._controllers
+
+ for controller in controllers:
+ try:
+ cpath = util.get_path_by_sysfs_path(controller, "char")
+ except RuntimeError as err:
+ log.debug("Failed to find controller %s: %s", controller, str(err))
+ continue
+ try:
+ cinfo = blockdev.nvme_get_controller_info(cpath)
+ except GLib.GError as err:
+ log.debug("Failed to get controller info for %s: %s", cpath, str(err))
+ continue
+ self._controllers.append(NVMeController(name=os.path.basename(cpath),
+ serial=cinfo.serial_number,
+ nvme_ver=cinfo.nvme_ver,
+ id=cinfo.ctrl_id,
+ subsysnqn=cinfo.subsysnqn))
+
+ return self._controllers
+
+
+class NVMeFabricsNamespaceDevice(NVMeNamespaceDevice, NetworkStorageDevice):
+
+ """ NVMe fabrics namespace """
+ _type = "nvme-fabrics"
+ _packages = ["nvme-cli"]
+
+ def __init__(self, device, **kwargs):
+ """
+ :param name: the device name (generally a device node's basename)
+ :type name: str
+ :keyword exists: does this device exist?
+ :type exists: bool
+ :keyword size: the device's size
+ :type size: :class:`~.size.Size`
+ :keyword parents: a list of parent devices
+ :type parents: list of :class:`StorageDevice`
+ :keyword format: this device's formatting
+ :type format: :class:`~.formats.DeviceFormat` or a subclass of it
+ """
+ NVMeNamespaceDevice.__init__(self, device, **kwargs)
+ NetworkStorageDevice.__init__(self)
+
+ self._clear_local_tags()
+ self.tags.add(Tags.remote)
+ self.tags.add(Tags.nvme)
diff --git a/blivet/devices/lib.py b/blivet/devices/lib.py
index 1bda0bab..b3c4c5b0 100644
--- a/blivet/devices/lib.py
+++ b/blivet/devices/lib.py
@@ -32,6 +32,7 @@ class Tags(str, Enum):
"""Tags that describe various classes of disk."""
local = 'local'
nvdimm = 'nvdimm'
+ nvme = 'nvme'
remote = 'remote'
removable = 'removable'
ssd = 'ssd'
diff --git a/blivet/populator/helpers/__init__.py b/blivet/populator/helpers/__init__.py
index c5ac412f..50ab4de8 100644
--- a/blivet/populator/helpers/__init__.py
+++ b/blivet/populator/helpers/__init__.py
@@ -6,7 +6,7 @@ from .formatpopulator import FormatPopulator
from .btrfs import BTRFSFormatPopulator
from .boot import AppleBootFormatPopulator, EFIFormatPopulator, MacEFIFormatPopulator
-from .disk import DiskDevicePopulator, iScsiDevicePopulator, FCoEDevicePopulator, MDBiosRaidDevicePopulator, DASDDevicePopulator, ZFCPDevicePopulator, NVDIMMNamespaceDevicePopulator
+from .disk import DiskDevicePopulator, iScsiDevicePopulator, FCoEDevicePopulator, MDBiosRaidDevicePopulator, DASDDevicePopulator, ZFCPDevicePopulator, NVDIMMNamespaceDevicePopulator, NVMeNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator
from .disklabel import DiskLabelFormatPopulator
from .dm import DMDevicePopulator
from .dmraid import DMRaidFormatPopulator
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 9db7b810..9ed1eebe 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -22,13 +22,16 @@
import gi
gi.require_version("BlockDev", "2.0")
+gi.require_version("GLib", "2.0")
from gi.repository import BlockDev as blockdev
+from gi.repository import GLib
from ... import udev
from ... import util
from ...devices import DASDDevice, DiskDevice, FcoeDiskDevice, iScsiDiskDevice
from ...devices import MDBiosRaidArrayDevice, ZFCPDiskDevice, NVDIMMNamespaceDevice
+from ...devices import NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from ...devices import device_path_to_name
from ...storage_log import log_method_call
from .devicepopulator import DevicePopulator
@@ -251,3 +254,64 @@ class NVDIMMNamespaceDevicePopulator(DiskDevicePopulator):
log.info("%s is an NVDIMM namespace device", udev.device_get_name(self.data))
return kwargs
+
+
+class NVMeNamespaceDevicePopulator(DiskDevicePopulator):
+ priority = 20
+
+ _device_class = NVMeNamespaceDevice
+
+ @classmethod
+ def match(cls, data):
+ return (super(NVMeNamespaceDevicePopulator, NVMeNamespaceDevicePopulator).match(data) and
+ udev.device_is_nvme_namespace(data) and not udev.device_is_nvme_fabrics(data))
+
+ def _get_kwargs(self):
+ kwargs = super(NVMeNamespaceDevicePopulator, self)._get_kwargs()
+
+ log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
+
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ return kwargs
+
+ path = udev.device_get_devname(self.data)
+ try:
+ ninfo = blockdev.nvme_get_namespace_info(path)
+ except GLib.GError as err:
+ log.debug("Failed to get namespace info for %s: %s", path, str(err))
+ else:
+ kwargs["nsid"] = ninfo.nsid
+
+ log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
+ return kwargs
+
+
+class NVMeFabricsNamespaceDevicePopulator(DiskDevicePopulator):
+ priority = 20
+
+ _device_class = NVMeFabricsNamespaceDevice
+
+ @classmethod
+ def match(cls, data):
+ return (super(NVMeFabricsNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator).match(data) and
+ udev.device_is_nvme_namespace(data) and udev.device_is_nvme_fabrics(data))
+
+ def _get_kwargs(self):
+ kwargs = super(NVMeFabricsNamespaceDevicePopulator, self)._get_kwargs()
+
+ log.info("%s is an NVMe fabrics namespace device", udev.device_get_name(self.data))
+
+ if not hasattr(blockdev.Plugin, "NVME"):
+ # the nvme plugin is not generally available
+ return kwargs
+
+ path = udev.device_get_devname(self.data)
+ try:
+ ninfo = blockdev.nvme_get_namespace_info(path)
+ except GLib.GError as err:
+ log.debug("Failed to get namespace info for %s: %s", path, str(err))
+ else:
+ kwargs["nsid"] = ninfo.nsid
+
+ return kwargs
diff --git a/blivet/udev.py b/blivet/udev.py
index efbc53d6..533a1edc 100644
--- a/blivet/udev.py
+++ b/blivet/udev.py
@@ -1023,6 +1023,39 @@ def device_is_nvdimm_namespace(info):
return ninfo is not None
+def device_is_nvme_namespace(info):
+ if info.get("DEVTYPE") != "disk":
+ return False
+
+ if not info.get("SYS_PATH"):
+ return False
+
+ device = pyudev.Devices.from_sys_path(global_udev, info.get("SYS_PATH"))
+ while device:
+ if device.subsystem and device.subsystem.startswith("nvme"):
+ return True
+ device = device.parent
+
+ return False
+
+
+def device_is_nvme_fabrics(info):
+ if not device_is_nvme_namespace(info):
+ return False
+
+ if not hasattr(blockdev.Plugin, "NVME") or not blockdev.is_plugin_available(blockdev.Plugin.NVME): # pylint: disable=no-member
+ # nvme plugin is not available -- even if this is an nvme fabrics device we
+ # don't have tools to work with it, so we should pretend it's just a normal nvme
+ return False
+
+ controllers = blockdev.nvme_find_ctrls_for_ns(info.get("SYS_PATH", ""))
+ if not controllers:
+ return False
+
+ transport = util.get_sysfs_attr(controllers[0], "transport")
+ return transport in ("rdma", "fc", "tcp", "loop")
+
+
def device_is_hidden(info):
sysfs_path = device_get_sysfs_path(info)
hidden = util.get_sysfs_attr(sysfs_path, "hidden")
diff --git a/blivet/util.py b/blivet/util.py
index 0e578aea..3040ee5a 100644
--- a/blivet/util.py
+++ b/blivet/util.py
@@ -432,6 +432,15 @@ def get_sysfs_path_by_name(dev_node, class_name="block"):
"for '%s' (it is not at '%s')" % (dev_node, dev_path))
+def get_path_by_sysfs_path(sysfs_path, dev_type="block"):
+ """ Return device path for a given device sysfs path. """
+
+ dev = get_sysfs_attr(sysfs_path, "dev")
+ if not dev or not os.path.exists("/dev/%s/%s" % (dev_type, dev)):
+ raise RuntimeError("get_path_by_sysfs_path: Could not find device for %s" % sysfs_path)
+ return os.path.realpath("/dev/%s/%s" % (dev_type, dev))
+
+
def get_cow_sysfs_path(dev_path, dev_sysfsPath):
""" Return sysfs path of cow device for a given device.
"""
diff --git a/tests/unit_tests/populator_test.py b/tests/unit_tests/populator_test.py
index 369fe878..1ee29b57 100644
--- a/tests/unit_tests/populator_test.py
+++ b/tests/unit_tests/populator_test.py
@@ -13,6 +13,7 @@ from gi.repository import BlockDev as blockdev
from blivet.devices import DiskDevice, DMDevice, FileDevice, LoopDevice
from blivet.devices import MDRaidArrayDevice, MultipathDevice, OpticalDevice
from blivet.devices import PartitionDevice, StorageDevice, NVDIMMNamespaceDevice
+from blivet.devices import NVMeNamespaceDevice, NVMeFabricsNamespaceDevice
from blivet.devicelibs import lvm
from blivet.devicetree import DeviceTree
from blivet.formats import get_device_format_class, get_format, DeviceFormat
@@ -21,6 +22,7 @@ from blivet.populator.helpers import DiskDevicePopulator, DMDevicePopulator, Loo
from blivet.populator.helpers import LVMDevicePopulator, MDDevicePopulator, MultipathDevicePopulator
from blivet.populator.helpers import OpticalDevicePopulator, PartitionDevicePopulator
from blivet.populator.helpers import LVMFormatPopulator, MDFormatPopulator, NVDIMMNamespaceDevicePopulator
+from blivet.populator.helpers import NVMeNamespaceDevicePopulator, NVMeFabricsNamespaceDevicePopulator
from blivet.populator.helpers import get_format_helper, get_device_helper
from blivet.populator.helpers.boot import AppleBootFormatPopulator, EFIFormatPopulator, MacEFIFormatPopulator
from blivet.populator.helpers.formatpopulator import FormatPopulator
@@ -591,6 +593,128 @@ class NVDIMMNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
self.assertTrue(device in devicetree.devices)
+class NVMeNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
+ helper_class = NVMeNamespaceDevicePopulator
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=False)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ def test_match(self, *args):
+ """Test matching of NVMe namespace device populator."""
+ device_is_nvme_namespace = args[0]
+ self.assertTrue(self.helper_class.match(None))
+ device_is_nvme_namespace.return_value = False
+ self.assertFalse(self.helper_class.match(None))
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=False)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ def test_get_helper(self, *args):
+ """Test get_device_helper for NVMe namespaces."""
+ device_is_nvme_namespace = args[0]
+ data = {}
+ self.assertEqual(get_device_helper(data), self.helper_class)
+
+ # verify that setting one of the required True return values to False prevents success
+ device_is_nvme_namespace.return_value = False
+ self.assertNotEqual(get_device_helper(data), self.helper_class)
+ device_is_nvme_namespace.return_value = True
+
+ @patch("blivet.udev.device_get_name")
+ def test_run(self, *args):
+ """Test disk device populator."""
+ device_get_name = args[0]
+
+ devicetree = DeviceTree()
+
+ # set up some fake udev data to verify handling of specific entries
+ data = {'SYS_PATH': 'dummy', 'DEVNAME': 'dummy', 'ID_PATH': 'dummy'}
+
+ device_name = "nop"
+ device_get_name.return_value = device_name
+ helper = self.helper_class(devicetree, data)
+
+ device = helper.run()
+
+ self.assertIsInstance(device, NVMeNamespaceDevice)
+ self.assertTrue(device.exists)
+ self.assertTrue(device.is_disk)
+ self.assertTrue(device in devicetree.devices)
+
+
+class NVMeFabricsNamespaceDevicePopulatorTestCase(PopulatorHelperTestCase):
+ helper_class = NVMeFabricsNamespaceDevicePopulator
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=True)
+ def test_match(self, *args):
+ """Test matching of NVMe namespace device populator."""
+ device_is_nvme_fabrics = args[0]
+ self.assertTrue(self.helper_class.match(None))
+ device_is_nvme_fabrics.return_value = False
+ self.assertFalse(self.helper_class.match(None))
+
+ @patch("os.path.join")
+ @patch("blivet.udev.device_is_cdrom", return_value=False)
+ @patch("blivet.udev.device_is_dm", return_value=False)
+ @patch("blivet.udev.device_is_loop", return_value=False)
+ @patch("blivet.udev.device_is_md", return_value=False)
+ @patch("blivet.udev.device_is_partition", return_value=False)
+ @patch("blivet.udev.device_is_disk", return_value=True)
+ @patch("blivet.udev.device_is_nvme_namespace", return_value=True)
+ @patch("blivet.udev.device_is_nvme_fabrics", return_value=True)
+ def test_get_helper(self, *args):
+ """Test get_device_helper for NVMe namespaces."""
+ device_is_nvme_fabrics = args[0]
+ data = {}
+ self.assertEqual(get_device_helper(data), self.helper_class)
+
+ # verify that setting one of the required True return values to False prevents success
+ device_is_nvme_fabrics.return_value = False
+ self.assertNotEqual(get_device_helper(data), self.helper_class)
+ device_is_nvme_fabrics.return_value = True
+
+ @patch("blivet.udev.device_get_name")
+ def test_run(self, *args):
+ """Test disk device populator."""
+ device_get_name = args[0]
+
+ devicetree = DeviceTree()
+
+ # set up some fake udev data to verify handling of specific entries
+ data = {'SYS_PATH': 'dummy', 'DEVNAME': 'dummy', 'ID_PATH': 'dummy'}
+
+ device_name = "nop"
+ device_get_name.return_value = device_name
+ helper = self.helper_class(devicetree, data)
+
+ device = helper.run()
+
+ self.assertIsInstance(device, NVMeFabricsNamespaceDevice)
+ self.assertTrue(device.exists)
+ self.assertTrue(device.is_disk)
+ self.assertTrue(device in devicetree.devices)
+
+
class MDDevicePopulatorTestCase(PopulatorHelperTestCase):
helper_class = MDDevicePopulator
--
2.38.1
From af6ad7ff2f08180672690910d453158bcd463936 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Fri, 2 Dec 2022 12:20:47 +0100
Subject: [PATCH 2/3] Add transport and address to NVMeController info
---
blivet/devices/disk.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index b5e25939..796b5b03 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -730,7 +730,8 @@ class NVDIMMNamespaceDevice(DiskDevice):
return self._sector_size
-NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn"])
+NVMeController = namedtuple("NVMeController", ["name", "serial", "nvme_ver", "id", "subsysnqn",
+ "transport", "transport_address"])
class NVMeNamespaceDevice(DiskDevice):
@@ -792,11 +793,15 @@ class NVMeNamespaceDevice(DiskDevice):
except GLib.GError as err:
log.debug("Failed to get controller info for %s: %s", cpath, str(err))
continue
+ ctrans = util.get_sysfs_attr(controller, "transport")
+ ctaddr = util.get_sysfs_attr(controller, "address")
self._controllers.append(NVMeController(name=os.path.basename(cpath),
serial=cinfo.serial_number,
nvme_ver=cinfo.nvme_ver,
id=cinfo.ctrl_id,
- subsysnqn=cinfo.subsysnqn))
+ subsysnqn=cinfo.subsysnqn,
+ transport=ctrans,
+ transport_address=ctaddr))
return self._controllers
--
2.38.1
From a04538936ff62958c272b5e2b2657d177df1ef13 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Thu, 8 Dec 2022 13:15:33 +0100
Subject: [PATCH 3/3] Add additional identifiers to NVMeNamespaceDevice
---
blivet/devices/disk.py | 2 ++
blivet/populator/helpers/disk.py | 3 +++
2 files changed, 5 insertions(+)
diff --git a/blivet/devices/disk.py b/blivet/devices/disk.py
index 796b5b03..8842b4dc 100644
--- a/blivet/devices/disk.py
+++ b/blivet/devices/disk.py
@@ -756,6 +756,8 @@ class NVMeNamespaceDevice(DiskDevice):
:type nsid: int
"""
self.nsid = kwargs.pop("nsid", 0)
+ self.eui64 = kwargs.pop("eui64", "")
+ self.nguid = kwargs.pop("nguid", "")
DiskDevice.__init__(self, device, **kwargs)
diff --git a/blivet/populator/helpers/disk.py b/blivet/populator/helpers/disk.py
index 9ed1eebe..cf20d302 100644
--- a/blivet/populator/helpers/disk.py
+++ b/blivet/populator/helpers/disk.py
@@ -282,6 +282,9 @@ class NVMeNamespaceDevicePopulator(DiskDevicePopulator):
log.debug("Failed to get namespace info for %s: %s", path, str(err))
else:
kwargs["nsid"] = ninfo.nsid
+ kwargs["uuid"] = ninfo.uuid
+ kwargs["eui64"] = ninfo.eui64
+ kwargs["nguid"] = ninfo.nguid
log.info("%s is an NVMe local namespace device", udev.device_get_name(self.data))
return kwargs
--
2.38.1

@ -0,0 +1,57 @@
From 2aba050e74dc5df483da022dcf436b101c7a4301 Mon Sep 17 00:00:00 2001
From: Vojtech Trefny <vtrefny@redhat.com>
Date: Wed, 11 Jan 2023 14:59:24 +0100
Subject: [PATCH] Default to encryption sector size 512 for LUKS devices
We are currently letting cryptsetup decide the optimal encryption
sector size for LUKS. The problem is that for disks with physical
sector size 4096 cryptsetup will default to 4096 encryption sector
size even if the drive logical sector size is 512 which means
these disks cannot be combined with other 512 logical sector size
disks in LVM. This requires a more sophisticated solution in the
future, but for now just default to 512 if not specified by the
user otherwise.
Resolves: rhbz#2103800
---
blivet/formats/luks.py | 10 +++++++---
tests/unit_tests/formats_tests/luks_test.py | 2 +-
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/blivet/formats/luks.py b/blivet/formats/luks.py
index 8de4911f..2637e0c5 100644
--- a/blivet/formats/luks.py
+++ b/blivet/formats/luks.py
@@ -166,9 +166,13 @@ class LUKS(DeviceFormat):
if self.pbkdf_args.type == "pbkdf2" and self.pbkdf_args.max_memory_kb:
log.warning("Memory limit is not used for pbkdf2 and it will be ignored.")
- self.luks_sector_size = kwargs.get("luks_sector_size") or 0
- if self.luks_sector_size and self.luks_version != "luks2":
- raise ValueError("Sector size argument is valid only for LUKS version 2.")
+ self.luks_sector_size = kwargs.get("luks_sector_size")
+ if self.luks_version == "luks2":
+ if self.luks_sector_size is None:
+ self.luks_sector_size = 512 # XXX we don't want cryptsetup choose automatically here so fallback to 512
+ else:
+ if self.luks_sector_size:
+ raise ValueError("Sector size argument is valid only for LUKS version 2.")
def __repr__(self):
s = DeviceFormat.__repr__(self)
diff --git a/tests/unit_tests/formats_tests/luks_test.py b/tests/unit_tests/formats_tests/luks_test.py
index 5ae6acfe..ec7b7592 100644
--- a/tests/unit_tests/formats_tests/luks_test.py
+++ b/tests/unit_tests/formats_tests/luks_test.py
@@ -53,7 +53,7 @@ class LUKSNodevTestCase(unittest.TestCase):
def test_sector_size(self):
fmt = LUKS()
- self.assertEqual(fmt.luks_sector_size, 0)
+ self.assertEqual(fmt.luks_sector_size, 512)
with self.assertRaises(ValueError):
fmt = LUKS(luks_version="luks1", luks_sector_size=4096)
--
2.39.0

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save