import rear-2.6-17.el9

i9c-beta changed/i9c-beta/rear-2.6-17.el9
MSVSphere Packaging Team 2 years ago
commit 1a6780d003

1
.gitignore vendored

@ -0,0 +1 @@
SOURCES/rear-2.6.tar.gz

@ -0,0 +1 @@
13c23ad59254438ffcd0cde6400fd991cbfe194e SOURCES/rear-2.6.tar.gz

@ -0,0 +1,47 @@
From df5e18b8d7c8359b48bc133bfa29734934d18160 Mon Sep 17 00:00:00 2001
From: Johannes Meixner <jsmeix@suse.com>
Date: Mon, 10 Aug 2020 16:20:38 +0200
Subject: [PATCH] Merge pull request #2469 from
rear/skip-kernel-builtin-modules-issue2414
In 400_copy_modules.sh skip copying kernel modules that are builtin modules.
The new behaviour is that when modules are listed in modules.builtin
and are also shown by modinfo then those modules are now skipped.
Before for such modules the modules file(s) would have been included
in the recovery system.
See https://github.com/rear/rear/issues/2414
---
usr/share/rear/build/GNU/Linux/400_copy_modules.sh | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/usr/share/rear/build/GNU/Linux/400_copy_modules.sh b/usr/share/rear/build/GNU/Linux/400_copy_modules.sh
index d8d733d2..641b7f83 100644
--- a/usr/share/rear/build/GNU/Linux/400_copy_modules.sh
+++ b/usr/share/rear/build/GNU/Linux/400_copy_modules.sh
@@ -133,8 +133,13 @@ for dummy in "once" ; do
module=${module#.o}
# Strip trailing ".ko" if there:
module=${module#.ko}
- # Continue with the next module if the current one does not exist:
+ # Continue with the next module if the current one does not exist as a module file:
modinfo $module 1>/dev/null || continue
+ # Continue with the next module if the current one is a kernel builtin module
+ # cf. https://github.com/rear/rear/issues/2414#issuecomment-668632798
+ # Quoting the grep search value is mandatory here ($module might be empty or blank),
+ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style
+ grep -q "$( echo $module | tr '_-' '..' )" /lib/modules/$KERNEL_VERSION/modules.builtin && continue
# Resolve module dependencies:
# Get the module file plus the module files of other needed modules.
# This is currently only a "best effort" attempt because
@@ -166,7 +171,10 @@ done
# Remove those modules that are specified in the EXCLUDE_MODULES array:
for exclude_module in "${EXCLUDE_MODULES[@]}" ; do
- # Continue with the next module if the current one does not exist:
+ # Continue with the next module only if the current one does not exist as a module file
+ # but do not continue with the next module if the current one is a kernel builtin module
+ # so when a module file exists that gets removed regardless if it is also a builtin module
+ # cf. https://github.com/rear/rear/issues/2414#issuecomment-669115481
modinfo $exclude_module 1>/dev/null || continue
# In this case it is ignored when a module exists but 'modinfo -F filename' cannot show its filename
# because then it is assumed that also no module file had been copied above:

@ -0,0 +1,15 @@
diff --git a/usr/share/rear/output/ISO/Linux-i386/249_check_rhel_grub2_efi_package.sh b/usr/share/rear/output/ISO/Linux-i386/249_check_rhel_grub2_efi_package.sh
new file mode 100644
index 00000000..4c4ded08
--- /dev/null
+++ b/usr/share/rear/output/ISO/Linux-i386/249_check_rhel_grub2_efi_package.sh
@@ -0,0 +1,9 @@
+# 249_check_rhel_grub2_efi_package.sh
+
+is_true $USING_UEFI_BOOTLOADER || return # empty or 0 means NO UEFI
+
+(
+ VERBOSE=1
+ test -r /usr/lib/grub/x86_64-efi/moddep.lst
+ PrintIfError "WARNING: /usr/lib/grub/x86_64-efi/moddep.lst not found, grub2-mkimage will likely fail. Please install the grub2-efi-x64-modules package to fix this."
+)

@ -0,0 +1,112 @@
diff --git a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh
index 7cfdfcf2..1be17ba8 100644
--- a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh
+++ b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh
@@ -68,9 +68,9 @@ create_lvmgrp() {
local vg=${vgrp#/dev/}
cat >> "$LAYOUT_CODE" <<EOF
-create_volume_group=1
-create_logical_volumes=1
-create_thin_volumes_only=0
+create_volume_group+=( "$vg" )
+create_logical_volumes+=( "$vg" )
+create_thin_volumes_only=( \$( RmInArray "$vg" "\${create_thin_volumes_only[@]}" ) )
EOF
@@ -83,7 +83,7 @@ EOF
# '--mirrorlog', etc.
# Also, we likely do not support every layout yet (e.g. 'cachepool').
- if ! is_true "$MIGRATION_MODE" ; then
+ if ! is_true "$MIGRATION_MODE" && lvmgrp_supports_vgcfgrestore "$vgrp" ; then
cat >> "$LAYOUT_CODE" <<EOF
LogPrint "Restoring LVM VG '$vg'"
if [ -e "$vgrp" ] ; then
@@ -97,9 +97,12 @@ if lvm vgcfgrestore -f "$VAR_DIR/layout/lvm/${vg}.cfg" $vg >&2 ; then
LogPrint "Sleeping 3 seconds to let udev or systemd-udevd create their devices..."
sleep 3 >&2
- create_volume_group=0
- create_logical_volumes=0
+ create_volume_group=( \$( RmInArray "$vg" "\${create_volume_group[@]}" ) )
+ create_logical_volumes=( \$( RmInArray "$vg" "\${create_logical_volumes[@]}" ) )
+EOF
+ if is_true "${FORCE_VGCFGRESTORE-no}"; then
+ cat >> "$LAYOUT_CODE" <<EOF
#
# It failed ... restore layout using 'vgcfgrestore --force', but then remove Thin volumes, they are broken
#
@@ -121,9 +124,12 @@ elif lvm vgcfgrestore --force -f "$VAR_DIR/layout/lvm/${vg}.cfg" $vg >&2 ; then
sleep 3 >&2
# All logical volumes have been created, except Thin volumes and pools
- create_volume_group=0
- create_thin_volumes_only=1
+ create_volume_group=( \$( RmInArray "$vg" "\${create_volume_group[@]}" ) )
+ create_thin_volumes_only+=( "$vg" )
+EOF
+ fi
+ cat >> "$LAYOUT_CODE" <<EOF
#
# It failed also ... restore using 'vgcreate/lvcreate' commands
#
@@ -138,7 +144,7 @@ EOF
local -a devices=($(awk "\$1 == \"lvmdev\" && \$2 == \"$vgrp\" { print \$3 }" "$LAYOUT_FILE"))
cat >> "$LAYOUT_CODE" <<EOF
-if [ \$create_volume_group -eq 1 ] ; then
+if IsInArray $vg "\${create_volume_group[@]}" ; then
LogPrint "Creating LVM VG '$vg'; Warning: some properties may not be preserved..."
if [ -e "$vgrp" ] ; then
rm -rf "$vgrp"
@@ -240,9 +246,9 @@ create_lvmvol() {
local warnraidline
if [ $is_thin -eq 0 ] ; then
- ifline="if [ \"\$create_logical_volumes\" -eq 1 ] && [ \"\$create_thin_volumes_only\" -eq 0 ] ; then"
+ ifline="if IsInArray $vg \"\${create_logical_volumes[@]}\" && ! \$IsInArray $vg \"\${create_thin_volumes_only[@]}\" ; then"
else
- ifline="if [ \"\$create_logical_volumes\" -eq 1 ] ; then"
+ ifline="if IsInArray $vg \"\${create_logical_volumes[@]}\" ; then"
fi
if [ $is_raidunknown -eq 1 ]; then
diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh
index 54ddb50f..ae62d666 100644
--- a/usr/share/rear/lib/layout-functions.sh
+++ b/usr/share/rear/lib/layout-functions.sh
@@ -1308,4 +1308,30 @@ delete_dummy_partitions_and_resize_real_ones() {
last_partition_number=0
}
+# vgcfgrestore can properly restore only volume groups that do not use
+# any kernel metadata. All volume types except linear and striped use
+# kernel metadata.
+# Check whether a VG (given as /dev/<vgname> in the first argument)
+# doesn't contain any LVs that use kernel metadata.
+# If the function returns true, we can safely use vgcfgrestore to restore the VG.
+function lvmgrp_supports_vgcfgrestore() {
+ if is_true "${FORCE_VGCFGRESTORE-no}"; then
+ # If we are willing to use vgcfgrestore --force and then remove broken volumes,
+ # then everything can be considered supported. Don't do it by default though.
+ return 0
+ fi
+
+ local lvmvol vgrp lvname size layout kval
+
+ local supported_layouts=("linear" "striped")
+
+ while read lvmvol vgrp lvname size layout kval; do
+ [ "$vgrp" == "$1" ] || BugError "vgrp '$vgrp' != '$1'"
+ if ! IsInArray $layout "${supported_layouts[@]}"; then
+ LogPrint "Layout '$layout' of LV '$lvname' in VG '$vgrp' not supported by vgcfgrestore"
+ return 1
+ fi
+ done < <(grep "^lvmvol $1 " "$LAYOUT_FILE")
+}
+
# vim: set et ts=4 sw=4:

@ -0,0 +1,351 @@
diff --git a/doc/user-guide/06-layout-configuration.adoc b/doc/user-guide/06-layout-configuration.adoc
index f59384db..88ba0420 100644
--- a/doc/user-guide/06-layout-configuration.adoc
+++ b/doc/user-guide/06-layout-configuration.adoc
@@ -630,7 +630,7 @@ lvmvol <volume_group> <name> <size(bytes)> <layout> [key:value ...]
=== LUKS Devices ===
----------------------------------
-crypt /dev/mapper/<name> <device> [cipher=<cipher>] [key_size=<key size>] [hash=<hash function>] [uuid=<uuid>] [keyfile=<keyfile>] [password=<password>]
+crypt /dev/mapper/<name> <device> [type=<type>] [cipher=<cipher>] [key_size=<key size>] [hash=<hash function>] [uuid=<uuid>] [keyfile=<keyfile>] [password=<password>]
----------------------------------
=== DRBD ===
diff --git a/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh
index 05279bc8..0c662f67 100644
--- a/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh
+++ b/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh
@@ -1,35 +1,75 @@
+
# Code to recreate and/or open LUKS volumes.
create_crypt() {
+ # See the create_device() function in lib/layout-functions.sh what "device type" means:
+ local device_type="$1"
+ if ! grep -q "^crypt $device_type " "$LAYOUT_FILE" ; then
+ LogPrintError "Skip recreating LUKS volume $device_type (no 'crypt $device_type' entry in $LAYOUT_FILE)"
+ # FIXME: The return code is ignored in the create_device() function in lib/layout-functions.sh:
+ return 1
+ fi
+
local crypt target_device source_device options
- read crypt target_device source_device options < <(grep "^crypt $1 " "$LAYOUT_FILE")
+ local mapping_name option key value
+ local cryptsetup_options="" keyfile="" password=""
- local target_name=${target_device#/dev/mapper/}
+ read crypt target_device source_device options < <( grep "^crypt $device_type " "$LAYOUT_FILE" )
+
+ # Careful! One cannot 'test -b $source_device' here at the time when this code is run
+ # because the source device is usually a disk partition block device like /dev/sda2
+ # but disk partition block devices usually do not yet exist (in particular not on a new clean disk)
+ # because partitions are actually created later when the diskrestore.sh script is run
+ # but not here when this code is run which only generates the diskrestore.sh script:
+ if ! test $source_device ; then
+ LogPrintError "Skip recreating LUKS volume $device_type: No source device (see the 'crypt $device_type' entry in $LAYOUT_FILE)"
+ # FIXME: The return code is ignored in the create_device() function in lib/layout-functions.sh:
+ return 1
+ fi
+
+ mapping_name=${target_device#/dev/mapper/}
+ if ! test $mapping_name ; then
+ LogPrintError "Skip recreating LUKS volume $device_type on $source_device: No /dev/mapper/... mapping name (see the 'crypt $device_type' entry in $LAYOUT_FILE)"
+ # FIXME: The return code is ignored in the create_device() function in lib/layout-functions.sh:
+ return 1
+ fi
- local cryptsetup_options="" keyfile="" password=""
- local option key value
for option in $options ; do
- key=${option%=*}
+ # $option is of the form keyword=value and
+ # we assume keyword has no '=' character but value could be anything that may have a '=' character
+ # so we split keyword=value at the leftmost '=' character so that
+ # e.g. keyword=foo=bar gets split into key="keyword" and value="foo=bar":
+ key=${option%%=*}
value=${option#*=}
-
+ # The "cryptseup luksFormat" command does not require any of the type, cipher, key-size, hash, uuid option values
+ # because if omitted a cryptseup default value is used so we treat those values as optional.
+ # Using plain test to ensure the value is a single non empty and non blank word
+ # without quoting because test " " would return zero exit code
+ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style
case "$key" in
- cipher)
- cryptsetup_options+=" --cipher $value"
+ (type)
+ test $value && cryptsetup_options+=" --type $value"
+ ;;
+ (cipher)
+ test $value && cryptsetup_options+=" --cipher $value"
+ ;;
+ (key_size)
+ test $value && cryptsetup_options+=" --key-size $value"
;;
- key_size)
- cryptsetup_options+=" --key-size $value"
+ (hash)
+ test $value && cryptsetup_options+=" --hash $value"
;;
- hash)
- cryptsetup_options+=" --hash $value"
+ (uuid)
+ test $value && cryptsetup_options+=" --uuid $value"
;;
- uuid)
- cryptsetup_options+=" --uuid $value"
+ (keyfile)
+ test $value && keyfile=$value
;;
- keyfile)
- keyfile=$value
+ (password)
+ test $value && password=$value
;;
- password)
- password=$value
+ (*)
+ LogPrintError "Skipping unsupported LUKS cryptsetup option '$key' in 'crypt $target_device $source_device' entry in $LAYOUT_FILE"
;;
esac
done
@@ -37,26 +77,25 @@ create_crypt() {
cryptsetup_options+=" $LUKS_CRYPTSETUP_OPTIONS"
(
- echo "Log \"Creating LUKS device $target_name on $source_device\""
+ echo "LogPrint \"Creating LUKS volume $mapping_name on $source_device\""
if [ -n "$keyfile" ] ; then
# Assign a temporary keyfile at this stage so that original keyfiles do not leak onto the rescue medium.
# The original keyfile will be restored from the backup and then re-assigned to the LUKS device in the
# 'finalize' stage.
# The scheme for generating a temporary keyfile path must be the same here and in the 'finalize' stage.
- keyfile="${TMPDIR:-/tmp}/LUKS-keyfile-$target_name"
+ keyfile="$TMP_DIR/LUKS-keyfile-$mapping_name"
dd bs=512 count=4 if=/dev/urandom of="$keyfile"
chmod u=rw,go=- "$keyfile"
-
echo "cryptsetup luksFormat --batch-mode $cryptsetup_options $source_device $keyfile"
- echo "cryptsetup luksOpen --key-file $keyfile $source_device $target_name"
+ echo "cryptsetup luksOpen --key-file $keyfile $source_device $mapping_name"
elif [ -n "$password" ] ; then
echo "echo \"$password\" | cryptsetup luksFormat --batch-mode $cryptsetup_options $source_device"
- echo "echo \"$password\" | cryptsetup luksOpen $source_device $target_name"
+ echo "echo \"$password\" | cryptsetup luksOpen $source_device $mapping_name"
else
- echo "LogPrint \"Please enter the password for LUKS device $target_name ($source_device):\""
+ echo "LogUserOutput \"Set the password for LUKS volume $mapping_name (for 'cryptsetup luksFormat' on $source_device):\""
echo "cryptsetup luksFormat --batch-mode $cryptsetup_options $source_device"
- echo "LogPrint \"Please re-enter the password for LUKS device $target_name ($source_device):\""
- echo "cryptsetup luksOpen $source_device $target_name"
+ echo "LogUserOutput \"Enter the password for LUKS volume $mapping_name (for 'cryptsetup luksOpen' on $source_device):\""
+ echo "cryptsetup luksOpen $source_device $mapping_name"
fi
echo ""
) >> "$LAYOUT_CODE"
@@ -64,38 +103,61 @@ create_crypt() {
# Function open_crypt() is meant to be used by the 'mountonly' workflow
open_crypt() {
+ # See the do_mount_device() function in lib/layout-functions.sh what "device type" means:
+ local device_type="$1"
+ if ! grep -q "^crypt $device_type " "$LAYOUT_FILE" ; then
+ LogPrintError "Skip opening LUKS volume $device_type (no 'crypt $device_type' entry in $LAYOUT_FILE)"
+ # FIXME: The return code is ignored in the do_mount_device() function in lib/layout-functions.sh:
+ return 1
+ fi
+
local crypt target_device source_device options
- read crypt target_device source_device options < <(grep "^crypt $1 " "$LAYOUT_FILE")
+ local mapping_name option key value
+ local cryptsetup_options="" keyfile="" password=""
- local target_name=${target_device#/dev/mapper/}
+ read crypt target_device source_device options < <( grep "^crypt $device_type " "$LAYOUT_FILE" )
+
+ if ! test -b "$source_device" ; then
+ LogPrintError "Skip opening LUKS volume $device_type on device '$source_device' that is no block device (see the 'crypt $device_type' entry in $LAYOUT_FILE)"
+ # FIXME: The return code is ignored in the do_mount_device() function in lib/layout-functions.sh:
+ return 1
+ fi
+
+ mapping_name=${target_device#/dev/mapper/}
+ if ! test $mapping_name ; then
+ LogPrintError "Skip opening LUKS volume $device_type on $source_device: No /dev/mapper/... mapping name (see the 'crypt $device_type' entry in $LAYOUT_FILE)"
+ # FIXME: The return code is ignored in the do_mount_device() function in lib/layout-functions.sh:
+ return 1
+ fi
- local cryptsetup_options="" keyfile="" password=""
- local option key value
for option in $options ; do
- key=${option%=*}
+ # $option is of the form keyword=value and
+ # we assume keyword has no '=' character but value could be anything that may have a '=' character
+ # so we split keyword=value at the leftmost '=' character so that
+ # e.g. keyword=foo=bar gets split into key="keyword" and value="foo=bar":
+ key=${option%%=*}
value=${option#*=}
-
case "$key" in
- keyfile)
- keyfile=$value
+ (keyfile)
+ test $value && keyfile=$value
;;
- password)
- password=$value
+ (password)
+ test $value && password=$value
;;
esac
done
(
- echo "Log \"Opening LUKS device $target_name on $source_device\""
+ echo "LogPrint \"Opening LUKS volume $mapping_name on $source_device\""
if [ -n "$keyfile" ] ; then
# During a 'mountonly' workflow, the original keyfile is supposed to be
# available at this point.
- echo "cryptsetup luksOpen --key-file $keyfile $source_device $target_name"
+ echo "cryptsetup luksOpen --key-file $keyfile $source_device $mapping_name"
elif [ -n "$password" ] ; then
- echo "echo \"$password\" | cryptsetup luksOpen $source_device $target_name"
+ echo "echo \"$password\" | cryptsetup luksOpen $source_device $mapping_name"
else
- echo "LogPrint \"Please enter the password for LUKS device $target_name ($source_device):\""
- echo "cryptsetup luksOpen $source_device $target_name"
+ echo "LogUserOutput \"Enter the password for LUKS volume $mapping_name (for 'cryptsetup luksOpen' on $source_device):\""
+ echo "cryptsetup luksOpen $source_device $mapping_name"
fi
echo ""
) >> "$LAYOUT_CODE"
diff --git a/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh b/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh
index c1e1cfd5..afeabf6a 100644
--- a/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh
+++ b/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh
@@ -9,6 +9,8 @@ Log "Saving Encrypted volumes."
REQUIRED_PROGS+=( cryptsetup dmsetup )
COPY_AS_IS+=( /usr/share/cracklib/\* /etc/security/pwquality.conf )
+local invalid_cryptsetup_option_value="no"
+
while read target_name junk ; do
# find the target device we're mapping
if ! [ -e /dev/mapper/$target_name ] ; then
@@ -30,29 +32,96 @@ while read target_name junk ; do
source_device="$(get_device_name ${slave##*/})"
done
- if ! cryptsetup isLuks $source_device >/dev/null 2>&1; then
+ if ! blkid -p -o export $source_device >$TMP_DIR/blkid.output ; then
+ LogPrintError "Error: Cannot get attributes for $target_name ('blkid -p -o export $source_device' failed)"
continue
fi
- # gather crypt information
- cipher=$(cryptsetup luksDump $source_device | grep "Cipher name" | sed -r 's/^.+:\s*(.+)$/\1/')
- mode=$(cryptsetup luksDump $source_device | grep "Cipher mode" | cut -d: -f2- | awk '{printf("%s",$1)};')
- key_size=$(cryptsetup luksDump $source_device | grep "MK bits" | sed -r 's/^.+:\s*(.+)$/\1/')
- hash=$(cryptsetup luksDump $source_device | grep "Hash spec" | sed -r 's/^.+:\s*(.+)$/\1/')
- uuid=$(cryptsetup luksDump $source_device | grep "UUID" | sed -r 's/^.+:\s*(.+)$/\1/')
- keyfile_option=$([ -f /etc/crypttab ] && awk '$1 == "'"$target_name"'" && $3 != "none" && $3 != "-" && $3 != "" { print "keyfile=" $3; }' /etc/crypttab)
+ if ! grep -q "TYPE=crypto_LUKS" $TMP_DIR/blkid.output ; then
+ Log "Skipping $target_name (no 'TYPE=crypto_LUKS' in 'blkid -p -o export $source_device' output)"
+ continue
+ fi
- # LUKS version 2 is not yet suppported, see https://github.com/rear/rear/issues/2204
- # When LUKS version 2 is used the above code fails at least to determine the hash value
- # so we use an empty hash value as a simple test if gathering crypt information was successful:
- test "$hash" || Error "No hash value for LUKS device '$target_name' at '$source_device' (only LUKS version 1 is supported)"
+ # Detect LUKS version:
+ # Remove all non-digits in particular to avoid leading or trailing spaces in the version string
+ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style
+ # that could happen if the blkid output contains "VERSION = 2" so that 'cut -d= -f2' results " 2".
+ version=$( grep "VERSION" $TMP_DIR/blkid.output | cut -d= -f2 | tr -c -d '[:digit:]' )
+ if ! test "$version" = "1" -o "$version" = "2" ; then
+ LogPrintError "Error: Unsupported LUKS version for $target_name ('blkid -p -o export $source_device' shows 'VERSION=$version')"
+ continue
+ fi
+ luks_type=luks$version
- echo "crypt /dev/mapper/$target_name $source_device cipher=$cipher-$mode key_size=$key_size hash=$hash uuid=$uuid $keyfile_option" >> $DISKLAYOUT_FILE
-done < <( dmsetup ls --target crypt )
+ # Gather crypt information:
+ if ! cryptsetup luksDump $source_device >$TMP_DIR/cryptsetup.luksDump ; then
+ LogPrintError "Error: Cannot get LUKS$version values for $target_name ('cryptsetup luksDump $source_device' failed)"
+ continue
+ fi
+ uuid=$( grep "UUID" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' )
+ keyfile_option=$( [ -f /etc/crypttab ] && awk '$1 == "'"$target_name"'" && $3 != "none" && $3 != "-" && $3 != "" { print "keyfile=" $3; }' /etc/crypttab )
+ if test $luks_type = "luks1" ; then
+ cipher_name=$( grep "Cipher name" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' )
+ cipher_mode=$( grep "Cipher mode" $TMP_DIR/cryptsetup.luksDump | cut -d: -f2- | awk '{printf("%s",$1)};' )
+ cipher=$cipher_name-$cipher_mode
+ key_size=$( grep "MK bits" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' )
+ hash=$( grep "Hash spec" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' )
+ elif test $luks_type = "luks2" ; then
+ cipher=$( grep "cipher:" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' )
+ # More than one keyslot may be defined - use key_size from the first slot.
+ # Depending on the version the "cryptsetup luksDump" command outputs the key_size value
+ # as a line like
+ # Key: 512 bits
+ # and/or as a line like
+ # Cipher key: 512 bits
+ # cf. https://github.com/rear/rear/pull/2504#issuecomment-718729198 and subsequent comments
+ # so we grep for both lines but use only the first match from the first slot:
+ key_size=$( egrep -m 1 "Key:|Cipher key:" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+) bits$/\1/' )
+ hash=$( grep "Hash" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' )
+ fi
-# cryptsetup is required in the recovery system if disklayout.conf contains at least one 'crypt' entry
-# see the create_crypt function in layout/prepare/GNU/Linux/160_include_luks_code.sh
-# what program calls are written to diskrestore.sh
-# cf. https://github.com/rear/rear/issues/1963
-grep -q '^crypt ' $DISKLAYOUT_FILE && REQUIRED_PROGS+=( cryptsetup ) || true
+ # Basic checks that the cipher key_size hash uuid values exist
+ # cf. https://github.com/rear/rear/pull/2504#issuecomment-718729198
+ # because some values are needed during "rear recover"
+ # to set cryptsetup options in layout/prepare/GNU/Linux/160_include_luks_code.sh
+ # and it seems cryptsetup fails when options with empty values are specified
+ # cf. https://github.com/rear/rear/pull/2504#issuecomment-719479724
+ # For example a LUKS1 crypt entry in disklayout.conf looks like
+ # crypt /dev/mapper/luks1test /dev/sda7 type=luks1 cipher=aes-xts-plain64 key_size=256 hash=sha256 uuid=1b4198c9-d9b0-4c57-b9a3-3433e391e706
+ # and a LUKS1 crypt entry in disklayout.conf looks like
+ # crypt /dev/mapper/luks2test /dev/sda8 type=luks2 cipher=aes-xts-plain64 key_size=256 hash=sha256 uuid=3e874a28-7415-4f8c-9757-b3f28a96c4d2
+ # Only the keyfile_option value is optional and the luks_type value is already tested above.
+ # Using plain test to ensure a value is a single non empty and non blank word
+ # without quoting because test " " would return zero exit code
+ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style
+ # Do not error out instantly here but only report errors here so the user can see all messages
+ # and actually error out at the end of this script if there was one actually invalid value:
+ if ! test $cipher ; then
+ LogPrint "No 'cipher' value for LUKS$version volume $target_name in $source_device"
+ fi
+ if test $key_size ; then
+ if ! is_positive_integer $key_size ; then
+ LogPrintError "Error: 'key_size=$key_size' is no positive integer for LUKS$version volume $target_name in $source_device"
+ invalid_cryptsetup_option_value="yes"
+ fi
+ else
+ LogPrint "No 'key_size' value for LUKS$version volume $target_name in $source_device"
+ fi
+ if ! test $hash ; then
+ LogPrint "No 'hash' value for LUKS$version volume $target_name in $source_device"
+ fi
+ if ! test $uuid ; then
+ # Report a missig uuid value as an error to have the user informed
+ # but do not error out here because things can be fixed manually during "rear recover"
+ # cf. https://github.com/rear/rear/pull/2506#issuecomment-721757810
+ # and https://github.com/rear/rear/pull/2506#issuecomment-722315498
+ # and https://github.com/rear/rear/issues/2509
+ LogPrintError "Error: No 'uuid' value for LUKS$version volume $target_name in $source_device (mounting it or booting the recreated system may fail)"
+ fi
+
+ echo "crypt /dev/mapper/$target_name $source_device type=$luks_type cipher=$cipher key_size=$key_size hash=$hash uuid=$uuid $keyfile_option" >> $DISKLAYOUT_FILE
+
+done < <( dmsetup ls --target crypt )
+# Let this script return successfully when invalid_cryptsetup_option_value is not true:
+is_true $invalid_cryptsetup_option_value && Error "Invalid or empty LUKS cryptsetup option value(s) in $DISKLAYOUT_FILE" || true

@ -0,0 +1,693 @@
diff --git a/usr/share/rear/backup/NETFS/default/500_make_backup.sh b/usr/share/rear/backup/NETFS/default/500_make_backup.sh
index 02c204c5..60c80b5f 100644
--- a/usr/share/rear/backup/NETFS/default/500_make_backup.sh
+++ b/usr/share/rear/backup/NETFS/default/500_make_backup.sh
@@ -16,6 +16,8 @@ function set_tar_features () {
FEATURE_TAR_IS_SET=1
}
+local backup_prog_rc
+
local scheme=$( url_scheme $BACKUP_URL )
local path=$( url_path $BACKUP_URL )
local opath=$( backup_path $scheme $path )
diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh
index c560ec94..1692ba4c 100644
--- a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh
+++ b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh
@@ -1,5 +1,7 @@
# Start SELinux if it was stopped - check presence of $TMP_DIR/selinux.mode
+local backup_prog_rc
+
[ -f $TMP_DIR/selinux.mode ] && {
touch "${TMP_DIR}/selinux.autorelabel"
cat $TMP_DIR/selinux.mode > $SELINUX_ENFORCE
@@ -13,19 +15,19 @@
ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null
$BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \
"$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null
- _rc=$?
- if [ $_rc -ne 0 ]; then
- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]"
+ backup_prog_rc=$?
+ if [ $backup_prog_rc -ne 0 ]; then
+ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]"
#StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
fi
;;
(rsync)
- $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" ${BACKUP_RSYNC_OPTIONS[@]} \
+ $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \
"${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel"
- _rc=$?
- if [ $_rc -ne 0 ]; then
- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]"
+ backup_prog_rc=$?
+ if [ $backup_prog_rc -ne 0 ]; then
+ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]"
#StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
fi
;;
diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh
index cae12e38..9a17d6bb 100644
--- a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh
+++ b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh
@@ -1,3 +1,5 @@
+local backup_prog_rc
+
[ -f $TMP_DIR/force.autorelabel ] && {
> "${TMP_DIR}/selinux.autorelabel"
@@ -11,19 +13,19 @@
ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null
$BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \
"$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null
- _rc=$?
- if [ $_rc -ne 0 ]; then
- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]"
+ backup_prog_rc=$?
+ if [ $backup_prog_rc -ne 0 ]; then
+ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]"
#StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
fi
;;
(rsync)
- $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" ${BACKUP_RSYNC_OPTIONS[@]} \
+ $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \
"${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel"
- _rc=$?
- if [ $_rc -ne 0 ]; then
- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]"
+ backup_prog_rc=$?
+ if [ $backup_prog_rc -ne 0 ]; then
+ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]"
#StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
fi
;;
@@ -35,8 +37,7 @@
# probably using the BACKUP=NETFS workflow instead
if [ -d "${opath}" ]; then
if [ ! -f "${opath}/selinux.autorelabel" ]; then
- > "${opath}/selinux.autorelabel"
- StopIfError "Failed to create selinux.autorelabel on ${opath}"
+ > "${opath}/selinux.autorelabel" || Error "Failed to create selinux.autorelabel on ${opath}"
fi
fi
;;
diff --git a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh b/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh
index 60330007..cedee9ce 100644
--- a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh
+++ b/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh
@@ -4,7 +4,7 @@
# check for the --relative option in BACKUP_RSYNC_OPTIONS array
# for the default values see the standard definition in conf/default.conf file
-if ! grep -q relative <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then
+if ! grep -q relative <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then
BACKUP_RSYNC_OPTIONS+=( --relative )
Log "Added option '--relative' to the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow"
fi
diff --git a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh
index 0d67d362..750a04ca 100644
--- a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh
+++ b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh
@@ -2,6 +2,9 @@
# This file is part of Relax-and-Recover, licensed under the GNU General
# Public License. Refer to the included COPYING for full text of license.
+local backup_prog_rc
+local backup_log_message
+
Log "Include list:"
while read -r ; do
Log " $REPLY"
@@ -11,9 +14,9 @@ while read -r ; do
Log " $REPLY"
done < $TMP_DIR/backup-exclude.txt
-LogPrint "Creating $BACKUP_PROG archive on '${RSYNC_HOST}:${RSYNC_PATH}'"
+LogPrint "Creating $BACKUP_PROG backup on '${RSYNC_HOST}:${RSYNC_PATH}'"
-ProgressStart "Running archive operation"
+ProgressStart "Running backup operation"
(
case "$(basename $BACKUP_PROG)" in
@@ -37,7 +40,7 @@ ProgressStart "Running archive operation"
;;
(*)
- # no other backup programs foreseen then rsync so far
+ # no other backup programs foreseen than rsync so far
:
;;
@@ -96,7 +99,7 @@ case "$(basename $BACKUP_PROG)" in
;;
esac
- ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec]"
+ ProgressInfo "Backed up $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec]"
done
;;
@@ -113,24 +116,23 @@ ProgressStop
wait $BackupPID
transfertime="$((SECONDS-starttime))"
-_rc="$(cat $TMP_DIR/retval)"
+backup_prog_rc="$(cat $TMP_DIR/retval)"
sleep 1
# everyone should see this warning, even if not verbose
-test "$_rc" -gt 0 && VERBOSE=1 LogPrint "WARNING !
-There was an error (${rsync_err_msg[$_rc]}) during archive creation.
-Please check the archive and see '$RUNTIME_LOGFILE' for more information.
+test "$backup_prog_rc" -gt 0 && Error "
+There was an error (${rsync_err_msg[$backup_prog_rc]}) during backup creation.
+Please check the destination and see '$RUNTIME_LOGFILE' for more information.
-Since errors are often related to files that cannot be saved by
-$BACKUP_PROG, we will continue the $WORKFLOW process. However, you MUST
-verify the backup yourself before trusting it !
+If the error is related to files that cannot and should not be saved by
+$BACKUP_PROG, they should be excluded from the backup.
"
-_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log)"
-if [ $_rc -eq 0 -a "$_message" ] ; then
- LogPrint "$_message in $transfertime seconds."
+backup_log_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log)"
+if [ $backup_prog_rc -eq 0 -a "$backup_log_message" ] ; then
+ LogPrint "$backup_log_message in $transfertime seconds."
elif [ "$size" ]; then
- LogPrint "Archived $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]"
+ LogPrint "Backed up $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]"
fi
diff --git a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh
index 01801a4e..b90d459b 100644
--- a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh
+++ b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh
@@ -1,6 +1,8 @@
# copy the backup.log & rear.log file to remote destination with timestamp added
-Timestamp=$( date +%Y%m%d.%H%M )
+local timestamp
+
+timestamp=$( date +%Y%m%d.%H%M )
# compress the log file first
gzip "$TMP_DIR/$BACKUP_PROG_ARCHIVE.log" || Error "Failed to 'gzip $TMP_DIR/$BACKUP_PROG_ARCHIVE.log'"
@@ -10,15 +12,15 @@ case $RSYNC_PROTO in
# FIXME: Add an explanatory comment why "2>/dev/null" is useful here
# or remove it according to https://github.com/rear/rear/issues/1395
$BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" \
- "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${Timestamp}.log.gz" 2>/dev/null
+ "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" 2>/dev/null
- $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/rear-${Timestamp}.log" 2>/dev/null
+ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/rear-${timestamp}.log" 2>/dev/null
;;
(rsync)
- $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" ${BACKUP_RSYNC_OPTIONS[@]} \
- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${Timestamp}.log.gz"
+ $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" "${BACKUP_RSYNC_OPTIONS[@]}" \
+ "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz"
- $BACKUP_PROG -a "$RUNTIME_LOGFILE" ${BACKUP_RSYNC_OPTIONS[@]} "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}//rear-${Timestamp}.log"
+ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}//rear-${timestamp}.log"
;;
esac
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index 455aa3ce..0c230f38 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -1106,7 +1106,8 @@ BACKUP_ONLY_EXCLUDE="no"
MANUAL_INCLUDE=NO
# Disable SELinux policy during backup with NETFS or RSYNC (default yes)
BACKUP_SELINUX_DISABLE=1
-# Enable integrity check of the backup archive (only with BACKUP=NETFS and BACKUP_PROG=tar)
+# Enable integrity check of the backup archive (full check only with BACKUP=NETFS and BACKUP_PROG=tar,
+# with BACKUP=rsync or BACKUP_PROG=rsync it only checks whether rsync completed the restore successfully)
BACKUP_INTEGRITY_CHECK=
# Define BACKUP_TYPE.
# By default BACKUP_TYPE is empty which means "rear mkbackup" will create a full backup.
diff --git a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh
index 32ac391d..519febf5 100644
--- a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh
+++ b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh
@@ -2,21 +2,19 @@
# RSYNC_PREFIX=$HOSTNAME as set in default.conf
# create temporary local work-spaces to collect files (we already make the remote backup dir with the correct mode!!)
-mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2
-StopIfError "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'"
-mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2
-StopIfError "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'"
+mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'"
+mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'"
case $RSYNC_PROTO in
(ssh)
- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}" >/dev/null 2>&1
- StopIfError "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}"
+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}" >/dev/null 2>&1 \
+ || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}"
;;
(rsync)
- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" ${BACKUP_RSYNC_OPTIONS[@]} "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/" >/dev/null
- StopIfError "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}"
+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/" >/dev/null \
+ || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}"
;;
esac
diff --git a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh
index c7b430d8..96b62da1 100644
--- a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh
+++ b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh
@@ -5,19 +5,19 @@ LogPrint "Copying resulting files to $OUTPUT_URL location"
# if called as mkbackuponly then we just don't have any result files.
if test "$RESULT_FILES" ; then
- Log "Copying files '${RESULT_FILES[@]}' to $OUTPUT_URL location"
- cp $v "${RESULT_FILES[@]}" "${TMP_DIR}/rsync/${RSYNC_PREFIX}/"
- StopIfError "Could not copy files to local rsync location"
+ Log "Copying files '${RESULT_FILES[*]}' to $OUTPUT_URL location"
+ cp $v "${RESULT_FILES[@]}" "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" \
+ || Error "Could not copy files to local rsync location"
fi
-echo "$VERSION_INFO" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/VERSION"
-StopIfError "Could not create VERSION file on local rsync location"
+echo "$VERSION_INFO" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/VERSION" \
+ || Error "Could not create VERSION file on local rsync location"
-cp $v $(get_template "RESULT_usage_$OUTPUT.txt") "${TMP_DIR}/rsync/${RSYNC_PREFIX}/README"
-StopIfError "Could not copy usage file to local rsync location"
+cp $v $(get_template "RESULT_usage_$OUTPUT.txt") "${TMP_DIR}/rsync/${RSYNC_PREFIX}/README" \
+ || Error "Could not copy usage file to local rsync location"
-cat "$RUNTIME_LOGFILE" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/rear.log"
-StopIfError "Could not copy $RUNTIME_LOGFILE to local rsync location"
+cat "$RUNTIME_LOGFILE" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/rear.log" \
+ || Error "Could not copy $RUNTIME_LOGFILE to local rsync location"
case $RSYNC_PROTO in
@@ -25,20 +25,20 @@ case $RSYNC_PROTO in
Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/"
# FIXME: Add an explanatory comment why "2>/dev/null" is useful here
# or remove it according to https://github.com/rear/rear/issues/1395
- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null
- StopIfError "Could not copy '${RESULT_FILES[@]}' to $OUTPUT_URL location"
+ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \
+ || Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location"
;;
(rsync)
- Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[@]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/"
+ Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/"
# FIXME: Add an explanatory comment why "2>/dev/null" is useful here
# or remove it according to https://github.com/rear/rear/issues/1395
- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" ${BACKUP_RSYNC_OPTIONS[@]} "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null
- StopIfError "Could not copy '${RESULT_FILES[@]}' to $OUTPUT_URL location"
+ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \
+ || Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location"
;;
esac
# cleanup the temporary space (need it for the log file during backup)
-rm -rf "${TMP_DIR}/rsync/${RSYNC_PREFIX}/"
-LogIfError "Could not cleanup temoprary rsync space: ${TMP_DIR}/rsync/${RSYNC_PREFIX}/"
+rm -rf "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" \
+ || Log "Could not cleanup temporary rsync space: ${TMP_DIR}/rsync/${RSYNC_PREFIX}/"
diff --git a/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh b/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh
index fadf9d72..3c719c44 100644
--- a/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh
+++ b/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh
@@ -31,7 +31,7 @@ case $scheme in
backup_directory_mountpoint=$( df -P "$backup_directory" | tail -1 | awk '{print $6}' )
test "/" = "$backup_directory_mountpoint" && Error "URL '$BACKUP_URL' has the backup directory '$backup_directory' in the '/' filesystem which is forbidden."
# When the mountpoint of the backup directory is not yet excluded add its mountpoint to the EXCLUDE_RECREATE array:
- if ! grep -q "$backup_directory_mountpoint" <<< $( echo ${EXCLUDE_RECREATE[@]} ) ; then
+ if ! grep -q "$backup_directory_mountpoint" <<< "${EXCLUDE_RECREATE[*]}" ; then
EXCLUDE_RECREATE+=( "fs:$backup_directory_mountpoint" )
fi
;;
diff --git a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh
index ac26edfa..eb7df29e 100644
--- a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh
+++ b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh
@@ -33,7 +33,7 @@ case $(basename $BACKUP_PROG) in
touch $TMP_DIR/force.autorelabel # after reboot the restored system do a forced SELinux relabeling
else
# if --xattrs is already set; no need to do it again
- if ! grep -q xattrs <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then
+ if ! grep -q xattrs <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then
BACKUP_RSYNC_OPTIONS+=( --xattrs )
fi
RSYNC_SELINUX=1 # variable used in recover mode (means using xattr and not disable SELinux)
diff --git a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh
index b8535352..c964a148 100644
--- a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh
+++ b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh
@@ -33,22 +33,20 @@ RSYNC_PORT=873 # default port (of rsync server)
RSYNC_PATH=
-echo $BACKUP_URL | egrep -q '(::)' # new style '::' means rsync protocol
-if [[ $? -eq 0 ]]; then
+if egrep -q '(::)' <<< $BACKUP_URL ; then # new style '::' means rsync protocol
RSYNC_PROTO=rsync
else
RSYNC_PROTO=ssh
fi
-echo $host | grep -q '@'
-if [[ $? -eq 0 ]]; then
+if grep -q '@' <<< $host ; then
RSYNC_USER="${host%%@*}" # grab user name
else
RSYNC_USER=root
fi
# remove USER@ if present (we don't need it anymore)
-tmp2="${host#*@}"
+local tmp2="${host#*@}"
case "$RSYNC_PROTO" in
@@ -56,8 +54,7 @@ case "$RSYNC_PROTO" in
# tmp2=witsbebelnx02::backup or tmp2=witsbebelnx02::
RSYNC_HOST="${tmp2%%::*}"
# path=/gdhaese1@witsbebelnx02::backup or path=/backup
- echo $path | grep -q '::'
- if [[ $? -eq 0 ]]; then
+ if grep -q '::' <<< $path ; then
RSYNC_PATH="${path##*::}"
else
RSYNC_PATH="${path##*/}"
@@ -79,8 +76,7 @@ esac
# check if host is reachable
if test "$PING" ; then
- ping -c 2 "$RSYNC_HOST" >/dev/null
- StopIfError "Backup host [$RSYNC_HOST] not reachable."
+ ping -c 2 "$RSYNC_HOST" >/dev/null || Error "Backup host [$RSYNC_HOST] not reachable."
else
Log "Skipping ping test"
fi
@@ -89,15 +85,15 @@ fi
case "$RSYNC_PROTO" in
(rsync)
- Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[@]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/"
- $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[@]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/ >/dev/null
- StopIfError "Rsync daemon not running on $RSYNC_HOST"
+ Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/"
+ $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/ >/dev/null \
+ || Error "Rsync daemon not running on $RSYNC_HOST"
;;
(ssh)
Log "Test: ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true"
- ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true >/dev/null 2>&1
- StopIfError "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]"
+ ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true >/dev/null 2>&1 \
+ || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]"
;;
esac
diff --git a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh
index 446dd736..e9103531 100644
--- a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh
+++ b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh
@@ -2,15 +2,17 @@
# This file is part of Relax-and-Recover, licensed under the GNU General
# Public License. Refer to the included COPYING for full text of license.
# try to grab the rsync protocol version of rsync on the remote server
+
+local remote_mountpoint
+
if [ -z "$RSYNC_PROTOCOL_VERSION" ]; then
case $RSYNC_PROTO in
(ssh)
- ssh ${RSYNC_USER}@${RSYNC_HOST} rsync --version >"$TMP_DIR/rsync_protocol" 2>&1
- StopIfError "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]"
- grep -q "protocol version" "$TMP_DIR/rsync_protocol"
- if [ $? -eq 0 ]; then
+ ssh ${RSYNC_USER}@${RSYNC_HOST} rsync --version >"$TMP_DIR/rsync_protocol" 2>&1 \
+ || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]"
+ if grep -q "protocol version" "$TMP_DIR/rsync_protocol" ; then
RSYNC_PROTOCOL_VERSION=$(grep 'protocol version' "$TMP_DIR/rsync_protocol" | awk '{print $6}')
else
RSYNC_PROTOCOL_VERSION=29 # being conservative (old rsync version < 3.0)
@@ -30,25 +32,21 @@ else
fi
-if [ "${RSYNC_USER}" != "root" ]; then
+if [ "${RSYNC_USER}" != "root" -a $RSYNC_PROTO = "ssh" ]; then
if [ $RSYNC_PROTOCOL_VERSION -gt 29 ]; then
if grep -q "no xattrs" "$TMP_DIR/rsync_protocol"; then
# no xattrs available in remote rsync, so --fake-super is not possible
Error "rsync --fake-super not possible on system ($RSYNC_HOST) (no xattrs compiled in rsync)"
else
# when using --fake-super we must have user_xattr mount options on the remote mntpt
- _mntpt=$(ssh ${RSYNC_USER}@${RSYNC_HOST} 'cd ${RSYNC_PATH}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}')
- ssh ${RSYNC_USER}@${RSYNC_HOST} "cd ${RSYNC_PATH} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete"
- StopIfError "Remote file system $_mntpt does not have user_xattr mount option set!"
- #BACKUP_RSYNC_OPTIONS+=( --xattrs --rsync-path="""rsync --fake-super""" )
+ remote_mountpoint=$(ssh ${RSYNC_USER}@${RSYNC_HOST} 'cd ${RSYNC_PATH}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}')
+ ssh ${RSYNC_USER}@${RSYNC_HOST} "cd ${RSYNC_PATH} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete" \
+ || Error "Remote file system $remote_mountpoint does not have user_xattr mount option set!"
+ #BACKUP_RSYNC_OPTIONS+=( --xattrs --rsync-path="rsync --fake-super" )
# see issue #366 for explanation of removing --xattrs
- BACKUP_RSYNC_OPTIONS+=( --rsync-path="""rsync --fake-super""" )
+ BACKUP_RSYNC_OPTIONS+=( --rsync-path="rsync --fake-super" )
fi
else
- if [ ${BACKUP_RSYNC_OPTIONS[@]/--fake-super/} != ${BACKUP_RSUNC_OPTIONS[@]} ]; then
- Error "rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)"
- else
- Log "Warning: rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)"
- fi
+ Error "rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)"
fi
fi
diff --git a/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh b/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh
index 0a9c9648..220ccc57 100644
--- a/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh
+++ b/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh
@@ -5,6 +5,8 @@
# Restore from remote backup via DUPLICIY over rsync
if [ "$BACKUP_PROG" = "duplicity" ]; then
+ local backup_prog_rc
+ local restore_log_message
LogPrint "========================================================================"
LogPrint "Restoring backup with $BACKUP_PROG from '$BACKUP_DUPLICITY_URL'"
@@ -49,7 +51,8 @@ if [ "$BACKUP_PROG" = "duplicity" ]; then
LogPrint "with CMD: $DUPLICITY_PROG -v 5 $GPG_KEY --force --tempdir=$DUPLICITY_TEMPDIR $BACKUP_DUPLICITY_URL/$HOSTNAME/ $TARGET_FS_ROOT"
$DUPLICITY_PROG -v 5 $GPG_KEY --force --tempdir="$DUPLICITY_TEMPDIR" $BACKUP_DUPLICITY_URL/$HOSTNAME/ $TARGET_FS_ROOT 0<&6 | tee $TMP_DIR/duplicity-restore.log
fi
- _rc=$?
+ # FIXME: this collects the exit code from "tee", not from $DUPLICITY_PROG
+ backup_prog_rc=$?
transfertime="$((SECONDS-$starttime))"
sleep 1
@@ -65,20 +68,20 @@ if [ "$BACKUP_PROG" = "duplicity" ]; then
LogPrint "========================================================================"
- if [ "$_rc" -gt 0 ]; then
+ if [ "$backup_prog_rc" -gt 0 ]; then
LogPrint "WARNING !
There was an error while restoring the archive.
Please check '$RUNTIME_LOGFILE' and $TMP_DIR/duplicity-restore.log for more information.
You should also manually check the restored system to see whether it is complete.
"
- _message="$(tail -14 ${TMP_DIR}/duplicity-restore.log)"
+ restore_log_message="$(tail -14 ${TMP_DIR}/duplicity-restore.log)"
LogPrint "Last 14 Lines of ${TMP_DIR}/duplicity-restore.log:"
- LogPrint "$_message"
+ LogPrint "$restore_log_message"
fi
- if [ $_rc -eq 0 ] ; then
+ if [ $backup_prog_rc -eq 0 ] ; then
LogPrint "Restore completed in $transfertime seconds."
fi
diff --git a/usr/share/rear/restore/RBME/default/400_restore_backup.sh b/usr/share/rear/restore/RBME/default/400_restore_backup.sh
index 28a3c354..3e97e16b 100644
--- a/usr/share/rear/restore/RBME/default/400_restore_backup.sh
+++ b/usr/share/rear/restore/RBME/default/400_restore_backup.sh
@@ -2,6 +2,8 @@ if [[ -z "$RBME_BACKUP" ]] ; then
Error "No RBME backup selected (BACKUP_URL?). Aborting."
fi
+local backup_prog_rc
+
scheme=$(url_scheme "$BACKUP_URL")
LogPrint "Restoring from backup $RBME_BACKUP."
@@ -43,11 +45,11 @@ transfertime="$((SECONDS-starttime))"
# harvest return code from background job. The kill -0 $BackupPID loop above should
# have made sure that this wait won't do any real "waiting" :-)
wait $BackupPID
-_rc=$?
+backup_prog_rc=$?
sleep 1
-test "$_rc" -gt 0 && LogPrint "WARNING !
-There was an error (${rsync_err_msg[$_rc]}) while restoring the archive.
+test "$backup_prog_rc" -gt 0 && LogPrint "WARNING !
+There was an error (${rsync_err_msg[$backup_prog_rc]}) while restoring the archive.
Please check '$RUNTIME_LOGFILE' for more information. You should also
manually check the restored system to see whether it is complete.
"
diff --git a/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh b/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh
index 53915322..a792f195 100644
--- a/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh
+++ b/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh
@@ -4,11 +4,11 @@
# without the --relative option ; my feeling says it is better to remove it from array BACKUP_RSYNC_OPTIONS
# If I'm wrong please let us know (use issue mentioned above to comment)
-if grep -q relative <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then
+if grep -q -- "--relative" <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then
BACKUP_RSYNC_OPTIONS=( $( RmInArray "--relative" "${BACKUP_RSYNC_OPTIONS[@]}" ) )
Log "Removed option '--relative' from the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow"
fi
-if grep -q "-R" <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then
+if grep -q -- "-R" <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then
BACKUP_RSYNC_OPTIONS=( $( RmInArray "-R" "${BACKUP_RSYNC_OPTIONS[@]}" ) )
Log "Removed option '-R' from the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow"
fi
diff --git a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh
index 2a0bf15e..993088be 100644
--- a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh
+++ b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh
@@ -4,10 +4,10 @@ get_size() {
echo $( stat --format '%s' "$TARGET_FS_ROOT/$1" )
}
-mkdir -p "${TMP_DIR}/rsync/${NETFS_PREFIX}"
-StopIfError "Could not mkdir '$TMP_DIR/rsync/${NETFS_PREFIX}'"
+local backup_prog_rc
+local restore_log_message
-LogPrint "Restoring $BACKUP_PROG archive from '${RSYNC_HOST}:${RSYNC_PATH}'"
+LogPrint "Restoring $BACKUP_PROG backup from '${RSYNC_HOST}:${RSYNC_PATH}'"
ProgressStart "Restore operation"
(
@@ -33,9 +33,10 @@ ProgressStart "Restore operation"
;;
(*)
- # no other backup programs foreseen then rsync so far
+ # no other backup programs foreseen than rsync so far
:
;;
+
esac
echo $? >$TMP_DIR/retval
) >"${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log" &
@@ -65,6 +66,7 @@ case "$(basename $BACKUP_PROG)" in
ProgressStep
done
;;
+
esac
ProgressStop
@@ -72,20 +74,28 @@ transfertime="$((SECONDS-starttime))"
# harvest return code from background job. The kill -0 $BackupPID loop above should
# have made sure that this wait won't do any real "waiting" :-)
-wait $BackupPID
-_rc=$?
+wait $BackupPID || LogPrintError "Restore job returned a nonzero exit code $?"
+# harvest the actual return code of rsync. Finishing the pipeline with an error code above is actually unlikely,
+# because rsync is not the last command in it. But error returns from rsync are common and must be handled.
+backup_prog_rc="$(cat $TMP_DIR/retval)"
sleep 1
-test "$_rc" -gt 0 && LogPrint "WARNING !
-There was an error (${rsync_err_msg[$_rc]}) while restoring the archive.
+if test "$backup_prog_rc" -gt 0 ; then
+ # TODO: Shouldn't we tell the user to check ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log as well?
+ LogPrintError "WARNING !
+There was an error (${rsync_err_msg[$backup_prog_rc]}) while restoring the backup.
Please check '$RUNTIME_LOGFILE' for more information. You should also
manually check the restored system to see whether it is complete.
"
+ is_true "$BACKUP_INTEGRITY_CHECK" && Error "Integrity check failed, restore aborted because BACKUP_INTEGRITY_CHECK is enabled"
+fi
-_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log)"
+restore_log_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log)"
-if [ $_rc -eq 0 -a "$_message" ] ; then
- LogPrint "$_message in $transfertime seconds."
+if [ $backup_prog_rc -eq 0 -a "$restore_log_message" ] ; then
+ LogPrint "$restore_log_message in $transfertime seconds."
elif [ "$size" ]; then
LogPrint "Restored $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]"
fi
+
+return $backup_prog_rc
diff --git a/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh b/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh
index 3622884a..890161f1 100644
--- a/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh
+++ b/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh
@@ -3,8 +3,8 @@
[[ $RSYNC_SELINUX ]] && {
# if --xattrs is already set; no need to do it again
- if ! grep -q xattrs <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then
- RSYNC_OPTIONS=( "${BACKUP_RSYNC_OPTIONS[@]}" --xattrs )
+ if ! grep -q xattrs <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then
+ BACKUP_RSYNC_OPTIONS+=( --xattrs )
fi
}
diff --git a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh
index 47ed9e02..b2fb72f5 100644
--- a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh
+++ b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh
@@ -3,12 +3,12 @@
case $RSYNC_PROTO in
(ssh)
- ssh ${RSYNC_USER}@${RSYNC_HOST} "ls -ld ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1
- StopIfError "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]"
+ ssh ${RSYNC_USER}@${RSYNC_HOST} "ls -ld ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \
+ || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]"
;;
(rsync)
- $BACKUP_PROG "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1
- StopIfError "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]"
+ $BACKUP_PROG "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \
+ || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]"
;;
esac

@ -0,0 +1,274 @@
diff --git a/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh b/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh
old mode 100644
new mode 100755
index cc646359..33d87767
--- a/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh
+++ b/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh
@@ -8,6 +8,10 @@ is_true $USING_UEFI_BOOTLOADER || return 0
# (cf. finalize/Linux-i386/610_EFISTUB_run_efibootmgr.sh):
is_true $EFI_STUB && return
+LogPrint "Creating EFI Boot Manager entries..."
+
+local esp_mountpoint esp_mountpoint_inside boot_efi_parts boot_efi_dev
+
# When UEFI_BOOTLOADER is not a regular file in the restored target system
# (cf. how esp_mountpoint is set below) it means BIOS is used
# (cf. rescue/default/850_save_sysfs_uefi_vars.sh)
@@ -15,64 +19,80 @@ is_true $EFI_STUB && return
# because when UEFI_BOOTLOADER is empty the test below evaluates to
# test -f /mnt/local/
# which also returns false because /mnt/local/ is a directory
-# (cf. https://github.com/rear/rear/pull/2051/files#r258826856):
-test -f "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" || return 0
+# (cf. https://github.com/rear/rear/pull/2051/files#r258826856)
+# but using BIOS conflicts with USING_UEFI_BOOTLOADER is true
+# i.e. we should create EFI Boot Manager entries but we cannot:
+if ! test -f "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" ; then
+ LogPrintError "Failed to create EFI Boot Manager entries (UEFI bootloader '$UEFI_BOOTLOADER' not found under target $TARGET_FS_ROOT)"
+ return 1
+fi
# Determine where the EFI System Partition (ESP) is mounted in the currently running recovery system:
-esp_mountpoint=$( df -P "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" | tail -1 | awk '{print $6}' )
-# Use TARGET_FS_ROOT/boot/efi as fallback ESP mountpoint:
-test "$esp_mountpoint" || esp_mountpoint="$TARGET_FS_ROOT/boot/efi"
+esp_mountpoint=$( filesystem_name "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" )
+# Use TARGET_FS_ROOT/boot/efi as fallback ESP mountpoint (filesystem_name returns "/"
+# if mountpoint not found otherwise):
+if [ "$esp_mountpoint" = "/" ] ; then
+ esp_mountpoint="$TARGET_FS_ROOT/boot/efi"
+ LogPrint "Mountpoint of $TARGET_FS_ROOT/$UEFI_BOOTLOADER not found, trying $esp_mountpoint"
+fi
# Skip if there is no esp_mountpoint directory (e.g. the fallback ESP mountpoint may not exist).
# Double quotes are mandatory here because 'test -d' without any (possibly empty) argument results true:
-test -d "$esp_mountpoint" || return 0
-
-BootEfiDev="$( mount | grep "$esp_mountpoint" | awk '{print $1}' )"
-# /dev/sda1 or /dev/mapper/vol34_part2 or /dev/mapper/mpath99p4
-Dev=$( get_device_name $BootEfiDev )
-# 1 (must anyway be a low nr <9)
-ParNr=$( get_partition_number $Dev )
-# /dev/sda or /dev/mapper/vol34_part or /dev/mapper/mpath99p or /dev/mmcblk0p
-Disk=$( echo ${Dev%$ParNr} )
-
-# Strip trailing partition remainders like '_part' or '-part' or 'p'
-# if we have 'mapper' in disk device name:
-if [[ ${Dev/mapper//} != $Dev ]] ; then
- # we only expect mpath_partX or mpathpX or mpath-partX
- case $Disk in
- (*p) Disk=${Disk%p} ;;
- (*-part) Disk=${Disk%-part} ;;
- (*_part) Disk=${Disk%_part} ;;
- (*) Log "Unsupported kpartx partition delimiter for $Dev"
- esac
+if ! test -d "$esp_mountpoint" ; then
+ LogPrintError "Failed to create EFI Boot Manager entries (no ESP mountpoint directory $esp_mountpoint)"
+ return 1
fi
-# For eMMC devices the trailing 'p' in the Disk value
-# (as in /dev/mmcblk0p that is derived from /dev/mmcblk0p1)
-# needs to be stripped (to get /dev/mmcblk0), otherwise the
-# efibootmgr call fails because of a wrong disk device name.
-# See also https://github.com/rear/rear/issues/2103
-if [[ $Disk = *'/mmcblk'+([0-9])p ]] ; then
- Disk=${Disk%p}
-fi
+# Mount point inside the target system,
+# accounting for possible trailing slashes in TARGET_FS_ROOT
+esp_mountpoint_inside="${esp_mountpoint#${TARGET_FS_ROOT%%*(/)}}"
-# For NVMe devices the trailing 'p' in the Disk value
-# (as in /dev/nvme0n1p that is derived from /dev/nvme0n1p1)
-# needs to be stripped (to get /dev/nvme0n1), otherwise the
-# efibootmgr call fails because of a wrong disk device name.
-# See also https://github.com/rear/rear/issues/1564
-if [[ $Disk = *'/nvme'+([0-9])n+([0-9])p ]] ; then
- Disk=${Disk%p}
+boot_efi_parts=$( find_partition "fs:$esp_mountpoint_inside" fs )
+if ! test "$boot_efi_parts" ; then
+ LogPrint "Unable to find ESP $esp_mountpoint_inside in layout"
+ LogPrint "Trying to determine device currently mounted at $esp_mountpoint as fallback"
+ boot_efi_dev="$( mount | grep "$esp_mountpoint" | awk '{print $1}' )"
+ if ! test "$boot_efi_dev" ; then
+ LogPrintError "Cannot create EFI Boot Manager entry (unable to find ESP $esp_mountpoint among mounted devices)"
+ return 1
+ fi
+ if test $(get_component_type "$boot_efi_dev") = part ; then
+ boot_efi_parts="$boot_efi_dev"
+ else
+ boot_efi_parts=$( find_partition "$boot_efi_dev" )
+ fi
+ if ! test "$boot_efi_parts" ; then
+ LogPrintError "Cannot create EFI Boot Manager entry (unable to find partition for $boot_efi_dev)"
+ return 1
+ fi
+ LogPrint "Using fallback EFI boot partition(s) $boot_efi_parts (unable to find ESP $esp_mountpoint_inside in layout)"
fi
+local bootloader partition_block_device partition_number disk efipart
+
# EFI\fedora\shim.efi
-BootLoader=$( echo $UEFI_BOOTLOADER | cut -d"/" -f4- | sed -e 's;/;\\;g' )
-LogPrint "Creating EFI Boot Manager entry '$OS_VENDOR $OS_VERSION' for '$BootLoader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')"
-Log efibootmgr --create --gpt --disk ${Disk} --part ${ParNr} --write-signature --label \"${OS_VENDOR} ${OS_VERSION}\" --loader \"\\${BootLoader}\"
-if efibootmgr --create --gpt --disk ${Disk} --part ${ParNr} --write-signature --label "${OS_VENDOR} ${OS_VERSION}" --loader "\\${BootLoader}" ; then
- # ok, boot loader has been set-up - tell rear we are done using following var.
- NOBOOTLOADER=''
- return
-fi
+bootloader=$( echo $UEFI_BOOTLOADER | cut -d"/" -f4- | sed -e 's;/;\\;g' )
+
+for efipart in $boot_efi_parts ; do
+ # /dev/sda1 or /dev/mapper/vol34_part2 or /dev/mapper/mpath99p4
+ partition_block_device=$( get_device_name $efipart )
+ # 1 or 2 or 4 for the examples above
+ partition_number=$( get_partition_number $partition_block_device )
+ if ! disk=$( get_device_from_partition $partition_block_device $partition_number ) ; then
+ LogPrintError "Cannot create EFI Boot Manager entry for ESP $partition_block_device (unable to find the underlying disk)"
+ # do not error out - we may be able to locate other disks if there are more of them
+ continue
+ fi
+ LogPrint "Creating EFI Boot Manager entry '$OS_VENDOR $OS_VERSION' for '$bootloader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER') "
+ Log efibootmgr --create --gpt --disk $disk --part $partition_number --write-signature --label \"${OS_VENDOR} ${OS_VERSION}\" --loader \"\\${bootloader}\"
+ if efibootmgr --create --gpt --disk $disk --part $partition_number --write-signature --label "${OS_VENDOR} ${OS_VERSION}" --loader "\\${bootloader}" ; then
+ # ok, boot loader has been set-up - continue with other disks (ESP can be on RAID)
+ NOBOOTLOADER=''
+ else
+ LogPrintError "efibootmgr failed to create EFI Boot Manager entry on $disk partition $partition_number (ESP $partition_block_device )"
+ fi
+done
-LogPrintError "efibootmgr failed to create EFI Boot Manager entry for '$BootLoader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')"
+is_true $NOBOOTLOADER || return 0
+LogPrintError "efibootmgr failed to create EFI Boot Manager entry for '$bootloader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')"
+return 1
diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh
index 54ddb50f..cdd81a14 100644
--- a/usr/share/rear/lib/layout-functions.sh
+++ b/usr/share/rear/lib/layout-functions.sh
@@ -302,12 +302,20 @@ get_child_components() {
done
}
-# Return all ancestors of component $1 [ of type $2 ]
+# Return all ancestors of component $1 [ of type $2 [ skipping types $3 during resolution ] ]
get_parent_components() {
- declare -a ancestors devlist
- declare current child parent
+ declare -a ancestors devlist ignoretypes
+ declare current child parent parenttype
devlist=( "$1" )
+ if [[ "$3" ]] ; then
+ # third argument should, if present, be a space-separated list
+ # of types to ignore when walking up the dependency tree.
+ # Convert it to array
+ ignoretypes=( $3 )
+ else
+ ignoretypes=()
+ fi
while (( ${#devlist[@]} )) ; do
current=${devlist[0]}
@@ -318,6 +326,13 @@ get_parent_components() {
if IsInArray "$parent" "${ancestors[@]}" ; then
continue
fi
+ ### ...test if parent is of a correct type if requested...
+ if [[ ${#ignoretypes[@]} -gt 0 ]] ; then
+ parenttype=$(get_component_type "$parent")
+ if IsInArray "$parenttype" "${ignoretypes[@]}" ; then
+ continue
+ fi
+ fi
### ...and add them to the list
devlist+=( "$parent" )
ancestors+=( "$parent" )
@@ -345,22 +360,24 @@ get_parent_components() {
}
# find_devices <other>
+# ${2+"$2"} in the following functions ensures that $2 gets passed down quoted if present
+# and ignored if not present
# Find the disk device(s) component $1 resides on.
find_disk() {
- get_parent_components "$1" "disk"
+ get_parent_components "$1" "disk" ${2+"$2"}
}
find_multipath() {
- get_parent_components "$1" "multipath"
+ get_parent_components "$1" "multipath" ${2+"$2"}
}
find_disk_and_multipath() {
- find_disk "$1"
- is_true "$AUTOEXCLUDE_MULTIPATH" || find_multipath "$1"
+ find_disk "$1" ${2+"$2"}
+ is_true "$AUTOEXCLUDE_MULTIPATH" || find_multipath "$1" ${2+"$2"}
}
find_partition() {
- get_parent_components "$1" "part"
+ get_parent_components "$1" "part" ${2+"$2"}
}
# The get_partition_number function
@@ -413,6 +430,54 @@ get_partition_number() {
echo $partition_number
}
+# Extract the underlying device name from the full partition device name.
+# Underlying device may be a disk, a multipath device or other devices that can be partitioned.
+# Should we use the information in $LAYOUT_DEPS, like get_parent_component does,
+# instead of string munging?
+function get_device_from_partition() {
+ local partition_block_device
+ local device
+ local partition_number
+
+ partition_block_device=$1
+ test -b "$partition_block_device" || BugError "get_device_from_partition called with '$partition_block_device' that is no block device"
+ partition_number=${2-$(get_partition_number $partition_block_device )}
+ # /dev/sda or /dev/mapper/vol34_part or /dev/mapper/mpath99p or /dev/mmcblk0p
+ device=${partition_block_device%$partition_number}
+
+ # Strip trailing partition remainders like '_part' or '-part' or 'p'
+ # if we have 'mapper' in disk device name:
+ if [[ ${partition_block_device/mapper//} != $partition_block_device ]] ; then
+ # we only expect mpath_partX or mpathpX or mpath-partX
+ case $device in
+ (*p) device=${device%p} ;;
+ (*-part) device=${device%-part} ;;
+ (*_part) device=${device%_part} ;;
+ (*) Log "Unsupported kpartx partition delimiter for $partition_block_device"
+ esac
+ fi
+
+ # For eMMC devices the trailing 'p' in the $device value
+ # (as in /dev/mmcblk0p that is derived from /dev/mmcblk0p1)
+ # needs to be stripped (to get /dev/mmcblk0), otherwise the
+ # efibootmgr call fails because of a wrong disk device name.
+ # See also https://github.com/rear/rear/issues/2103
+ if [[ $device = *'/mmcblk'+([0-9])p ]] ; then
+ device=${device%p}
+ fi
+
+ # For NVMe devices the trailing 'p' in the $device value
+ # (as in /dev/nvme0n1p that is derived from /dev/nvme0n1p1)
+ # needs to be stripped (to get /dev/nvme0n1), otherwise the
+ # efibootmgr call fails because of a wrong disk device name.
+ # See also https://github.com/rear/rear/issues/1564
+ if [[ $device = *'/nvme'+([0-9])n+([0-9])p ]] ; then
+ device=${device%p}
+ fi
+
+ test -b "$device" && echo $device
+}
+
# Returns partition start block or 'unknown'
# sda/sda1 or
# dm-XX

File diff suppressed because it is too large Load Diff

@ -0,0 +1,68 @@
diff --git a/usr/share/rear/conf/Linux-ppc64.conf b/usr/share/rear/conf/Linux-ppc64.conf
index 7e20ddc7..d7774062 100644
--- a/usr/share/rear/conf/Linux-ppc64.conf
+++ b/usr/share/rear/conf/Linux-ppc64.conf
@@ -1,18 +1,26 @@
-REQUIRED_PROGS+=( sfdisk )
+REQUIRED_PROGS+=( sfdisk ofpathname )
PROGS+=(
mkofboot
ofpath
ybin
yabootconfig
-bootlist
pseries_platform
nvram
-ofpathname
bc
agetty
)
+if grep -q "emulated by qemu" /proc/cpuinfo ; then
+ # Qemu/KVM virtual machines don't need bootlist - don't complain if
+ # it is missing
+ PROGS+=( bootlist )
+else
+ # PowerVM environment, we need to run bootlist, otherwise
+ # we can't make the system bpotable. Be strict about requiring it
+ REQUIRED_PROGS+=( bootlist )
+fi
+
COPY_AS_IS+=(
/usr/lib/yaboot/yaboot
/usr/lib/yaboot/ofboot
diff --git a/usr/share/rear/conf/Linux-ppc64le.conf b/usr/share/rear/conf/Linux-ppc64le.conf
index d00154a2..df8066ea 100644
--- a/usr/share/rear/conf/Linux-ppc64le.conf
+++ b/usr/share/rear/conf/Linux-ppc64le.conf
@@ -1,10 +1,8 @@
REQUIRED_PROGS+=( sfdisk )
PROGS+=(
-bootlist
pseries_platform
nvram
-ofpathname
bc
agetty
)
@@ -17,4 +15,18 @@ agetty
if [[ $(awk '/platform/ {print $NF}' < /proc/cpuinfo) != PowerNV ]] ; then
# No firmware files when ppc64le Linux is not run in BareMetal Mode (PowerNV):
test "${FIRMWARE_FILES[*]}" || FIRMWARE_FILES=( 'no' )
+ # grub2-install for powerpc-ieee1275 calls ofpathname, so without it,
+ # the rescue system can't make the recovered system bootable
+ REQUIRED_PROGS+=( ofpathname )
+ if grep -q "emulated by qemu" /proc/cpuinfo ; then
+ # Qemu/KVM virtual machines don't need bootlist - don't complain if
+ # it is missing
+ PROGS+=( bootlist )
+ else
+ # PowerVM environment, we need to run bootlist, otherwise
+ # we can't make the system bpotable. Be strict about requiring it
+ REQUIRED_PROGS+=( bootlist )
+ fi
+else
+ PROGS+=( ofpathname bootlist )
fi

@ -0,0 +1,34 @@
From 4233fe30b315737ac8c4d857e2b04e021c2e2886 Mon Sep 17 00:00:00 2001
From: Pavel Cahyna <pcahyna@redhat.com>
Date: Mon, 16 Aug 2021 10:10:38 +0300
Subject: [PATCH] Revert the main part of PR #2299
multipath -l is very slow with many multipath devices. As it will be
called for every multipath device, it leads to quadratic time complexity
in the number of multipath devices. For thousands of devices, ReaR can
take hours to scan and exclude them. We therefore have to comment
multipath -l out, as it is a huge performance regression, and find
another solution to bug #2298.
---
usr/share/rear/lib/layout-functions.sh | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh
index cdd81a14..8c8be74b 100644
--- a/usr/share/rear/lib/layout-functions.sh
+++ b/usr/share/rear/lib/layout-functions.sh
@@ -771,7 +771,10 @@ function is_multipath_path {
# so that no "multipath -l" output could clutter the log (the "multipath -l" output is irrelevant here)
# in contrast to e.g. test "$( multipath -l )" that would falsely succeed with blank output
# and the output would appear in the log in 'set -x' debugscript mode:
- multipath -l | grep -q '[[:alnum:]]' || return 1
+ #
+ # Unfortunately, multipat -l is quite slow with many multipath devices
+ # and becomes a performance bottleneck, so we must comment it out for now.
+ #multipath -l | grep -q '[[:alnum:]]' || return 1
# Check if a block device should be a path in a multipath device:
multipath -c /dev/$1 &>/dev/null
}
--
2.26.3

@ -0,0 +1,56 @@
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index 0c230f38..f231bf3d 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -2707,6 +2707,15 @@ WARN_MISSING_VOL_ID=1
USE_CFG2HTML=
# The SKIP_CFG2HTML variable is no longer supported since ReaR 1.18
+# IP addresses that are present on the system but must be excluded when
+# building the network configuration used in recovery mode; this is typically
+# used when floating IP addresses are used on the system
+EXCLUDE_IP_ADDRESSES=()
+
+# Network interfaces that are present on the system but must be excluded when
+# building the network configuration used in recovery mode
+EXCLUDE_NETWORK_INTERFACES=()
+
# Simplify bonding setups by configuring always the first active device of a
# bond, except when mode is 4 (IEEE 802.3ad policy)
SIMPLIFY_BONDING=no
diff --git a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh
index f806bfbf..2385f5b6 100644
--- a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh
+++ b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh
@@ -355,6 +355,11 @@ function is_interface_up () {
local network_interface=$1
local sysfspath=/sys/class/net/$network_interface
+ if IsInArray "$network_interface" "${EXCLUDE_NETWORK_INTERFACES[@]}"; then
+ LogPrint "Excluding '$network_interface' per EXCLUDE_NETWORK_INTERFACES directive."
+ return 1
+ fi
+
local state=$( cat $sysfspath/operstate )
if [ "$state" = "down" ] ; then
return 1
@@ -403,11 +408,19 @@ function ipaddr_setup () {
if [ -n "$ipaddrs" ] ; then
# If some IP is found for the network interface, then use them
for ipaddr in $ipaddrs ; do
+ if IsInArray "${ipaddr%%/*}" "${EXCLUDE_IP_ADDRESSES[@]}"; then
+ LogPrint "Excluding IP address '$ipaddr' per EXCLUDE_IP_ADDRESSES directive even through it's defined in mapping file '$CONFIG_DIR/mappings/ip_addresses'."
+ continue
+ fi
echo "ip addr add $ipaddr dev $mapped_as"
done
else
# Otherwise, collect IP addresses for the network interface on the system
for ipaddr in $( ip a show dev $network_interface scope global | grep "inet.*\ " | tr -s " " | cut -d " " -f 3 ) ; do
+ if IsInArray "${ipaddr%%/*}" "${EXCLUDE_IP_ADDRESSES[@]}"; then
+ LogPrint "Excluding IP address '$ipaddr' per EXCLUDE_IP_ADDRESSES directive."
+ continue
+ fi
echo "ip addr add $ipaddr dev $mapped_as"
done
fi

@ -0,0 +1,78 @@
diff --git a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh
index 35be1721..d3c9ae86 100644
--- a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh
+++ b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh
@@ -103,12 +103,7 @@ local lvs_exit_code
pdev=$( get_device_name $pdev )
# Output lvmdev entry to DISKLAYOUT_FILE:
- # With the above example the output is:
- # lvmdev /dev/system /dev/sda1 7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7 41940992
- echo "lvmdev /dev/$vgrp $pdev $uuid $size"
-
- # After the 'lvmdev' line was written to disklayout.conf so that the user can inspect it
- # check that the required positional parameters in the 'lvmdev' line are non-empty
+ # Check that the required positional parameters in the 'lvmdev' line are non-empty
# because an empty positional parameter would result an invalid 'lvmdev' line
# which would cause invalid parameters are 'read' as input during "rear recover"
# cf. "Verifying ... 'lvm...' entries" in layout/save/default/950_verify_disklayout_file.sh
@@ -117,13 +112,24 @@ local lvs_exit_code
# so that this also checks that the variables do not contain blanks or more than one word
# because blanks (actually $IFS characters) are used as field separators in disklayout.conf
# which means the positional parameter values must be exactly one non-empty word.
- # Two separated simple 'test $vgrp && test $pdev' commands are used here because
- # 'test $vgrp -a $pdev' does not work when $vgrp is empty or only blanks
- # because '-a' has two different meanings: "EXPR1 -a EXPR2" and "-a FILE" (see "help test")
- # so that when $vgrp is empty 'test $vgrp -a $pdev' tests if file $pdev exists
- # which is usually true because $pdev is usually a partition device node (e.g. /dev/sda1)
- # so that when $vgrp is empty 'test $vgrp -a $pdev' would falsely succeed:
- test $vgrp && test $pdev || Error "LVM 'lvmdev' entry in $DISKLAYOUT_FILE where volume_group or device is empty or more than one word"
+ test $pdev || Error "Cannot make 'lvmdev' entry in disklayout.conf (PV device '$pdev' empty or more than one word)"
+ if ! test $vgrp ; then
+ # Valid $pdev but invalid $vgrp (empty or more than one word):
+ # When $vgrp is empty it means it is a PV that is not part of a VG so the PV exists but it is not used.
+ # PVs that are not part of a VG are documented as comment in disklayout.conf but they are not recreated
+ # because they were not used on the original system so there is no need to recreate them by "rear recover"
+ # (the user can manually recreate them later in his recreated system when needed)
+ # cf. https://github.com/rear/rear/issues/2596
+ DebugPrint "Skipping PV $pdev that is not part of a valid VG (VG '$vgrp' empty or more than one word)"
+ echo "# Skipping PV $pdev that is not part of a valid VG (VG '$vgrp' empty or more than one word):"
+ contains_visible_char "$vgrp" || vgrp='<missing_VG>'
+ echo "# lvmdev /dev/$vgrp $pdev $uuid $size"
+ # Continue with the next line in the output of "lvm pvdisplay -c"
+ continue
+ fi
+ # With the above example the output is:
+ # lvmdev /dev/system /dev/sda1 7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7 41940992
+ echo "lvmdev /dev/$vgrp $pdev $uuid $size"
done
# Check the exit code of "lvm pvdisplay -c"
@@ -161,8 +167,15 @@ local lvs_exit_code
# lvmgrp /dev/system 4096 5119 20967424
echo "lvmgrp /dev/$vgrp $extentsize $nrextents $size"
- # Check that the required positional parameters in the 'lvmgrp' line are non-empty
- # cf. the code above to "check that the required positional parameters in the 'lvmdev' line are non-empty":
+ # Check that the required positional parameters in the 'lvmgrp' line are non-empty.
+ # The tested variables are intentionally not quoted here, cf. the code above to
+ # "check that the required positional parameters in the 'lvmdev' line are non-empty".
+ # Two separated simple 'test $vgrp && test $extentsize' commands are used here because
+ # 'test $vgrp -a $extentsize' does not work when $vgrp is empty or only blanks
+ # because '-a' has two different meanings: "EXPR1 -a EXPR2" and "-a FILE" (see "help test")
+ # so with empty $vgrp it becomes 'test -a $extentsize' that tests if a file $extentsize exists
+ # which is unlikely to be true but it is not impossible that a file $extentsize exists
+ # so when $vgrp is empty (or blanks) 'test $vgrp -a $extentsize' might falsely succeed:
test $vgrp && test $extentsize || Error "LVM 'lvmgrp' entry in $DISKLAYOUT_FILE where volume_group or extentsize is empty or more than one word"
done
@@ -305,7 +318,8 @@ local lvs_exit_code
fi
already_processed_lvs+=( "$vg/$lv" )
# Check that the required positional parameters in the 'lvmvol' line are non-empty
- # cf. the code above to "check that the required positional parameters in the 'lvmdev' line are non-empty":
+ # cf. the code above to "check that the required positional parameters in the 'lvmdev' line are non-empty"
+ # and the code above to "check that the required positional parameters in the 'lvmgrp' line are non-empty":
test $vg && test $lv && test $size && test $layout || Error "LVM 'lvmvol' entry in $DISKLAYOUT_FILE where volume_group or name or size or layout is empty or more than one word"
fi

@ -0,0 +1,25 @@
diff --git a/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh b/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh
index 040e9eec..e731c994 100644
--- a/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh
+++ b/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh
@@ -19,9 +19,9 @@ while read lvmdev name mpdev junk ; do
# Remember, multipath devices from a volume group that is "excluded" should be 'commented out'
device=$(echo $mpdev | cut -c1-45)
while read LINE ; do
- # Now we need to comment all lines that contain "$devices" in the LAYOUT_FILE
+ # Now we need to comment all lines that contain "$device" in the LAYOUT_FILE
sed -i "s|^$LINE|\#$LINE|" "$LAYOUT_FILE"
- done < <(grep "$device" $LAYOUT_FILE | grep -v "^#")
+ done < <(grep " $device " $LAYOUT_FILE | grep -v "^#")
Log "Excluding multipath device $device"
done < <(grep "^#lvmdev" $LAYOUT_FILE)
@@ -31,7 +31,7 @@ done < <(grep "^#lvmdev" $LAYOUT_FILE)
while read LINE ; do
# multipath /dev/mapper/360060e8007e2e3000030e2e300002065 /dev/sdae,/dev/sdat,/dev/sdbi,/dev/sdp
device=$(echo $LINE | awk '{print $2}' | cut -c1-45)
- num=$(grep "$device" $LAYOUT_FILE | grep -v "^#" | wc -l)
+ num=$(grep " $device " $LAYOUT_FILE | grep -v "^#" | wc -l)
if [ $num -lt 2 ] ; then
# If the $device is only seen once (in a uncommented line) then the multipath is not in use
sed -i "s|^$LINE|\#$LINE|" "$LAYOUT_FILE"

@ -0,0 +1,171 @@
commit 3d1bcf1b50ca8201a3805bc7cab6ca69c14951a1
Author: pcahyna <pcahyna@users.noreply.github.com>
Date: Thu May 5 12:11:55 2022 +0200
Merge pull request #2795 from pcahyna/recover-check-sums
Verify file hashes at the end of recover after file restore from backup
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index f231bf3d..881a0af0 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -313,8 +313,30 @@ CDROM_SIZE=20
# which exits with non-zero exit code when the disk layout or those files changed
# (cf. https://github.com/rear/rear/issues/1134) but the checklayout workflow
# does not automatically recreate the rescue/recovery system.
+# Files matching FILES_TO_PATCH_PATTERNS are added to this list automatically.
CHECK_CONFIG_FILES=( '/etc/drbd/' '/etc/drbd.conf' '/etc/lvm/lvm.conf' '/etc/multipath.conf' '/etc/rear/' '/etc/udev/udev.conf' )
+# FILES_TO_PATCH_PATTERNS is a space-separated list of shell glob patterns.
+# Files that match are eligible for a final migration of UUIDs and other
+# identifiers after recovery (if the layout recreation process has led
+# to a change of an UUID or a device name and a corresponding change needs
+# to be performed on restored configuration files ).
+# See finalize/GNU/Linux/280_migrate_uuid_tags.sh
+# The [] around the first letter make sure that shopt -s nullglob removes this file from the list if it does not exist
+
+FILES_TO_PATCH_PATTERNS="[b]oot/{grub.conf,menu.lst,device.map} [e]tc/grub.* \
+ [b]oot/grub/{grub.conf,grub.cfg,menu.lst,device.map} \
+ [b]oot/grub2/{grub.conf,grub.cfg,menu.lst,device.map} \
+ [e]tc/sysconfig/grub [e]tc/sysconfig/bootloader \
+ [e]tc/lilo.conf [e]tc/elilo.conf \
+ [e]tc/yaboot.conf \
+ [e]tc/mtab [e]tc/fstab \
+ [e]tc/mtools.conf \
+ [e]tc/smartd.conf [e]tc/sysconfig/smartmontools \
+ [e]tc/sysconfig/rawdevices \
+ [e]tc/security/pam_mount.conf.xml \
+ [b]oot/efi/*/*/grub.cfg"
+
##
# Relax-and-Recover recovery system update during "rear recover"
#
diff --git a/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh b/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh
index 1a91a0e3..e869e5e9 100644
--- a/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh
+++ b/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh
@@ -29,19 +29,9 @@ LogPrint "The original restored files get saved in $save_original_file_dir (in $
local symlink_target=""
local restored_file=""
-# the funny [] around the first letter make sure that shopt -s nullglob removes this file from the list if it does not exist
-# the files without a [] are mandatory, like fstab FIXME: but below there is [e]tc/fstab not etc/fstab - why?
-
-for restored_file in [b]oot/{grub.conf,menu.lst,device.map} [e]tc/grub.* [b]oot/grub/{grub.conf,menu.lst,device.map} \
- [b]oot/grub2/{grub.conf,grub.cfg,menu.lst,device.map} \
- [e]tc/sysconfig/grub [e]tc/sysconfig/bootloader \
- [e]tc/lilo.conf \
- [e]tc/yaboot.conf \
- [e]tc/mtab [e]tc/fstab \
- [e]tc/mtools.conf \
- [e]tc/smartd.conf [e]tc/sysconfig/smartmontools \
- [e]tc/sysconfig/rawdevices \
- [e]tc/security/pam_mount.conf.xml [b]oot/efi/*/*/grub.cfg
+# The variable expansion is deliberately not quoted in order to perform
+# pathname expansion on the variable value.
+for restored_file in $FILES_TO_PATCH_PATTERNS
do
# Silently skip directories and file not found:
test -f "$restored_file" || continue
diff --git a/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh b/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh
index 074689a1..d994ce8e 100644
--- a/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh
+++ b/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh
@@ -23,18 +23,9 @@ LogPrint "Migrating filesystem UUIDs in certain restored files in $TARGET_FS_ROO
local symlink_target=""
local restored_file=""
-# the funny [] around the first letter make sure that shopt -s nullglob removes this file from the list if it does not exist
-# the files without a [] are mandatory, like fstab FIXME: but below there is [e]tc/fstab not etc/fstab - why?
-for restored_file in [b]oot/{grub.conf,menu.lst,device.map} [e]tc/grub.* \
- [b]oot/grub/{grub.conf,grub.cfg,menu.lst,device.map} \
- [b]oot/grub2/{grub.conf,grub.cfg,menu.lst,device.map} \
- [e]tc/sysconfig/grub [e]tc/sysconfig/bootloader \
- [e]tc/lilo.conf [e]tc/elilo.conf \
- [e]tc/mtab [e]tc/fstab \
- [e]tc/mtools.conf \
- [e]tc/smartd.conf [e]tc/sysconfig/smartmontools \
- [e]tc/sysconfig/rawdevices \
- [e]tc/security/pam_mount.conf.xml [b]oot/efi/*/*/grub.cfg
+# The variable expansion is deliberately not quoted in order to perform
+# pathname expansion on the variable value.
+for restored_file in $FILES_TO_PATCH_PATTERNS
do
# Silently skip directories and file not found:
test -f "$restored_file" || continue
diff --git a/usr/share/rear/finalize/default/060_compare_files.sh b/usr/share/rear/finalize/default/060_compare_files.sh
new file mode 100644
index 00000000..6947fda9
--- /dev/null
+++ b/usr/share/rear/finalize/default/060_compare_files.sh
@@ -0,0 +1,6 @@
+if [ -e $VAR_DIR/layout/config/files.md5sum ] ; then
+ if ! chroot $TARGET_FS_ROOT md5sum -c --quiet < $VAR_DIR/layout/config/files.md5sum 1>> >( tee -a "$RUNTIME_LOGFILE" 1>&7 ) 2>> >( tee -a "$RUNTIME_LOGFILE" 1>&8 ) ; then
+ LogPrintError "Error: Restored files do not match the recreated system in $TARGET_FS_ROOT"
+ return 1
+ fi
+fi
diff --git a/usr/share/rear/layout/save/default/490_check_files_to_patch.sh b/usr/share/rear/layout/save/default/490_check_files_to_patch.sh
new file mode 100644
index 00000000..ee717063
--- /dev/null
+++ b/usr/share/rear/layout/save/default/490_check_files_to_patch.sh
@@ -0,0 +1,43 @@
+# FILES_TO_PATCH_PATTERNS is a space-separated list of shell glob patterns.
+# Files that match are eligible for a final migration of UUIDs and other
+# identifiers after recovery (if the layout recreation process has led
+# to a change of an UUID or a device name and a corresponding change needs
+# to be performed on restored configuration files ).
+# See finalize/GNU/Linux/280_migrate_uuid_tags.sh
+# We should add all such files to CHECK_CONFIG_FILES - if they change,
+# we risk inconsistencies between the restored files and recreated layout,
+# or failures of UUID migration.
+
+local file final_file symlink_target
+
+# The patterns are relative to /, change directory there
+# so that the shell finds the files during pathname expansion
+pushd / >/dev/null
+# The variable expansion is deliberately not quoted in order to perform
+# pathname expansion on the variable value.
+for file in $FILES_TO_PATCH_PATTERNS ; do
+ final_file="/$file"
+ IsInArray "$final_file" "${CHECK_CONFIG_FILES[@]}" && continue
+ # Symlink handling (partially from 280_migrate_uuid_tags.sh):
+ # avoid dead symlinks, and symlinks to files on dynamic filesystems
+ # ( /proc etc.) - they are expected to change and validating
+ # their checksums has no sense
+ if test -L "$final_file" ; then
+ if symlink_target="$( readlink -e "$final_file" )" ; then
+ # If the symlink target contains /proc/ /sys/ /dev/ or /run/ we skip it because then
+ # the symlink target is considered to not be a restored file that needs to be patched
+ # and thus we don't need to generate and check its hash, either
+ # cf. https://github.com/rear/rear/pull/2047#issuecomment-464846777
+ if echo $symlink_target | egrep -q '/proc/|/sys/|/dev/|/run/' ; then
+ Log "Skip adding symlink $final_file target $symlink_target on /proc/ /sys/ /dev/ or /run/ to CHECK_CONFIG_FILES"
+ continue
+ fi
+ Debug "Adding symlink $final_file with target $symlink_target to CHECK_CONFIG_FILES"
+ else
+ LogPrint "Skip adding dead symlink $final_file to CHECK_CONFIG_FILES"
+ continue
+ fi
+ fi
+ CHECK_CONFIG_FILES+=( "$final_file" )
+done
+popd >/dev/null
diff --git a/usr/share/rear/layout/save/default/600_snapshot_files.sh b/usr/share/rear/layout/save/default/600_snapshot_files.sh
index 0ebf197c..3ac6b07e 100644
--- a/usr/share/rear/layout/save/default/600_snapshot_files.sh
+++ b/usr/share/rear/layout/save/default/600_snapshot_files.sh
@@ -3,7 +3,8 @@ if [ "$WORKFLOW" = "checklayout" ] ; then
return 0
fi
-config_files=()
+local obj
+local config_files=()
for obj in "${CHECK_CONFIG_FILES[@]}" ; do
if [ -d "$obj" ] ; then
config_files+=( $( find "$obj" -type f ) )

@ -0,0 +1,46 @@
diff --git a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh
index d3c9ae86..f21845df 100644
--- a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh
+++ b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh
@@ -70,14 +70,20 @@ local lvs_exit_code
# Get physical_device configuration.
# Format: lvmdev <volume_group> <device> [<uuid>] [<size(bytes)>]
header_printed="no"
- # Example output of "lvm pvdisplay -c":
- # /dev/sda1:system:41940992:-1:8:8:-1:4096:5119:2:5117:7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7
+ # Set pvdisplay separator to '|' to prevent issues with a colon in the path under /dev/disk/by-path
+ # that contains a ':' in the SCSI slot name.
+ # Example output of "lvm pvdisplay -C --separator '|' --noheadings --nosuffix --units=b -o pv_name,vg_name,pv_size,pv_uuid"
+ # on a system where LVM is configured to show the /dev/disk/by-path device names instead of the usual
+ # /dev/sda etc. (by using a setting like
+ # filter = [ "r|/dev/disk/by-path/.*-usb-|", "a|/dev/disk/by-path/pci-.*-nvme-|", "a|/dev/disk/by-path/pci-.*-scsi-|", "a|/dev/disk/by-path/pci-.*-ata-|", "a|/dev/disk/by-path/pci-.*-sas-|", "a|loop|", "r|.*|" ]
+ # in /etc/lvm/lvm.conf):
+ # /dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:1:0-part1|system|107340627968|7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7
# There are two leading blanks in the output (at least on SLES12-SP4 with LVM 2.02.180).
- lvm pvdisplay -c | while read line ; do
+ lvm pvdisplay -C --separator '|' --noheadings --nosuffix --units=b -o pv_name,vg_name,pv_size,pv_uuid | while read line ; do
- # With the above example pdev=/dev/sda1
+ # With the above example pdev=/dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:1:0-part1
# (the "echo $line" makes the leading blanks disappear)
- pdev=$( echo $line | cut -d ":" -f "1" )
+ pdev=$( echo $line | cut -d "|" -f "1" )
# Skip lines that are not describing physical devices
# i.e. lines where pdev does not start with a leading / character:
@@ -91,11 +97,11 @@ local lvs_exit_code
fi
# With the above example vgrp=system
- vgrp=$( echo $line | cut -d ":" -f "2" )
- # With the above example size=41940992
- size=$( echo $line | cut -d ":" -f "3" )
+ vgrp=$( echo $line | cut -d "|" -f "2" )
+ # With the above example size=107340627968
+ size=$( echo $line | cut -d "|" -f "3" )
# With the above example uuid=7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7
- uuid=$( echo $line | cut -d ":" -f "12" )
+ uuid=$( echo $line | cut -d "|" -f "4" )
# Translate pdev through diskbyid_mappings file:
pdev=$( get_device_mapping $pdev )

@ -0,0 +1,58 @@
commit 389e5026df575ad98695191044257cf2b33d565b
Author: pcahyna <pcahyna@users.noreply.github.com>
Date: Mon Jul 4 15:48:43 2022 +0200
Merge pull request #2825 from lzaoral/replace-mkinitrd-with-dracut
Replace `mkinitrd` with `dracut` on Fedora and RHEL
diff --git a/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh
index 3476b77f..f296e624 100644
--- a/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh
+++ b/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh
@@ -61,7 +61,7 @@ NEW_INITRD_MODULES=( $(tr " " "\n" <<< "${NEW_INITRD_MODULES[*]}" | sort | uniq
Log "New INITRD_MODULES='${OLD_INITRD_MODULES[@]} ${NEW_INITRD_MODULES[@]}'"
INITRD_MODULES="${OLD_INITRD_MODULES[@]} ${NEW_INITRD_MODULES[@]}"
-WITH_INITRD_MODULES=$( printf '%s\n' ${INITRD_MODULES[@]} | awk '{printf "--with=%s ", $1}' )
+WITH_INITRD_MODULES=$( printf '%s\n' ${INITRD_MODULES[@]} | awk '{printf "--add-drivers=%s ", $1}' )
# Recreate any initrd or initramfs image under $TARGET_FS_ROOT/boot/ with new drivers
# Images ignored:
@@ -76,19 +76,19 @@ for INITRD_IMG in $( ls $TARGET_FS_ROOT/boot/initramfs-*.img $TARGET_FS_ROOT/boo
# Do not use KERNEL_VERSION here because that is readonly in the rear main script:
kernel_version=$( basename $( echo $INITRD_IMG ) | cut -f2- -d"-" | sed s/"\.img"// )
INITRD=$( echo $INITRD_IMG | egrep -o "/boot/.*" )
- LogPrint "Running mkinitrd..."
- # Run mkinitrd directly in chroot without a login shell in between (see https://github.com/rear/rear/issues/862).
- # We need the mkinitrd binary in the chroot environment i.e. the mkinitrd binary in the recreated system.
- # Normally we would use a login shell like: chroot $TARGET_FS_ROOT /bin/bash --login -c 'type -P mkinitrd'
+ LogPrint "Running dracut..."
+ # Run dracut directly in chroot without a login shell in between (see https://github.com/rear/rear/issues/862).
+ # We need the dracut binary in the chroot environment i.e. the dracut binary in the recreated system.
+ # Normally we would use a login shell like: chroot $TARGET_FS_ROOT /bin/bash --login -c 'type -P dracut'
# because otherwise there is no useful PATH (PATH is only /bin) so that 'type -P' won't find it
# but we cannot use a login shell because that contradicts https://github.com/rear/rear/issues/862
# so that we use a plain (non-login) shell and set a (hopefully) reasonable PATH:
- local mkinitrd_binary=$( chroot $TARGET_FS_ROOT /bin/bash -c 'PATH=/sbin:/usr/sbin:/usr/bin:/bin type -P mkinitrd' )
- # If there is no mkinitrd in the chroot environment plain 'chroot $TARGET_FS_ROOT' will hang up endlessly
+ local dracut_binary=$( chroot $TARGET_FS_ROOT /bin/bash -c 'PATH=/sbin:/usr/sbin:/usr/bin:/bin type -P dracut' )
+ # If there is no dracut in the chroot environment plain 'chroot $TARGET_FS_ROOT' will hang up endlessly
# and then "rear recover" cannot be aborted with the usual [Ctrl]+[C] keys.
# Use plain $var because when var contains only blanks test "$var" results true because test " " results true:
- if test $mkinitrd_binary ; then
- if chroot $TARGET_FS_ROOT $mkinitrd_binary -v -f ${WITH_INITRD_MODULES[@]} $INITRD $kernel_version >&2 ; then
+ if test $dracut_binary ; then
+ if chroot $TARGET_FS_ROOT $dracut_binary -v -f ${WITH_INITRD_MODULES[@]} $INITRD $kernel_version >&2 ; then
LogPrint "Updated initrd with new drivers for kernel $kernel_version."
else
LogPrint "WARNING:
@@ -99,7 +99,7 @@ and decide yourself, whether the system will boot or not.
fi
else
LogPrint "WARNING:
-Cannot create initrd (found no mkinitrd in the recreated system).
+Cannot create initrd (found no dracut in the recreated system).
Check the recreated system (mounted at $TARGET_FS_ROOT)
and decide yourself, whether the system will boot or not.
"

@ -0,0 +1,130 @@
commit b06d059108db9b0c46cba29cc174f60e129164f1
Author: Johannes Meixner <jsmeix@suse.com>
Date: Tue Mar 9 14:40:59 2021 +0100
Merge pull request #2580 from rear/jsmeix-load-nvram-module
In etc/scripts/system-setup.d/41-load-special-modules.sh
load the nvram kernel module if possible to make /dev/nvram appear
because /dev/nvram should be there when installing GRUB,
see https://github.com/rear/rear/issues/2554
and include the nvram kernel module in the recovery system
because nvram could be a module in particular on POWER architecture
see https://github.com/rear/rear/issues/2554#issuecomment-764720180
diff --git a/usr/share/rear/build/GNU/Linux/400_copy_modules.sh b/usr/share/rear/build/GNU/Linux/400_copy_modules.sh
index d8d733d2..a0ca9084 100644
--- a/usr/share/rear/build/GNU/Linux/400_copy_modules.sh
+++ b/usr/share/rear/build/GNU/Linux/400_copy_modules.sh
@@ -116,8 +116,12 @@ for dummy in "once" ; do
# As a way out of this dilemma we add the below listed modules no longer via conf/GNU/Linux.conf
# but here after the user config files were sourced so that now the user can specify
# MODULES=( 'moduleX' 'moduleY' ) in etc/rear/local.conf to get additional kernel modules
- # included in the recovery system in addition to the ones via an empty MODULES=() setting:
- MODULES+=( vfat
+ # included in the recovery system in addition to the ones via an empty MODULES=() setting.
+ # nvram could be a module in particular on POWER architecture,
+ # cf. https://github.com/rear/rear/issues/2554#issuecomment-764720180
+ # and https://github.com/rear/rear/pull/2580#issuecomment-791344794
+ MODULES+=( nvram
+ vfat
nls_iso8859_1 nls_utf8 nls_cp437
af_packet
unix
diff --git a/usr/share/rear/finalize/Linux-ppc64le/660_install_grub2.sh b/usr/share/rear/finalize/Linux-ppc64le/660_install_grub2.sh
index 4c2698f3..0cb3ee41 100644
--- a/usr/share/rear/finalize/Linux-ppc64le/660_install_grub2.sh
+++ b/usr/share/rear/finalize/Linux-ppc64le/660_install_grub2.sh
@@ -104,9 +104,39 @@ fi
# Do not update nvram when system is running in PowerNV mode (BareMetal).
# grub2-install will fail if not run with the --no-nvram option on a PowerNV system,
# see https://github.com/rear/rear/pull/1742
-grub2_install_option=""
+grub2_no_nvram_option=""
if [[ $(awk '/platform/ {print $NF}' < /proc/cpuinfo) == PowerNV ]] ; then
- grub2_install_option="--no-nvram"
+ grub2_no_nvram_option="--no-nvram"
+fi
+# Also do not update nvram when no character device node /dev/nvram exists.
+# On POWER architecture the nvram kernel driver could be also built as a kernel module
+# that gets loaded via etc/scripts/system-setup.d/41-load-special-modules.sh
+# but whether or not the nvram kernel driver will then create /dev/nvram
+# depends on whether or not the hardware platform supports nvram.
+# I <jsmeix@suse.de> asked on a SUSE internal mailing list
+# and got the following reply (excerpts):
+# ----------------------------------------------------------------
+# > I would like to know when /dev/nvram exists and when not.
+# > I assume /dev/nvram gets created as other device nodes
+# > by the kernel (probably together with udev).
+# > I would like to know under what conditions /dev/nvram
+# > gets created and when it is not created.
+# > It seems on PPC /dev/nvram usually exist but sometimes not.
+# In case of powerpc, it gets created by nvram driver
+# (nvram_module_init) whenever the powerpc platform driver
+# has ppc_md.nvram_size greater than zero in it's machine
+# description structure.
+# How exactly ppc_md.nvram_size gets gets populated by platform
+# code depends on the platform, e.g. on most modern systems
+# it gets populated from 'nvram' device tree node
+# (and only if such node has #bytes > 0).
+# ----------------------------------------------------------------
+# So /dev/nvram may not exist regardless that the nvram kernel driver is there
+# and then grub2-install must be called with the '--no-nvram' option
+# because otherwise installing the bootloader fails
+# cf. https://github.com/rear/rear/issues/2554
+if ! test -c /dev/nvram ; then
+ grub2_no_nvram_option="--no-nvram"
fi
# When GRUB2_INSTALL_DEVICES is specified by the user
@@ -134,7 +164,7 @@ if test "$GRUB2_INSTALL_DEVICES" ; then
else
LogPrint "Installing GRUB2 on $grub2_install_device (specified in GRUB2_INSTALL_DEVICES)"
fi
- if ! chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_install_option $grub2_install_device" ; then
+ if ! chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_no_nvram_option $grub2_install_device" ; then
LogPrintError "Failed to install GRUB2 on $grub2_install_device"
grub2_install_failed="yes"
fi
@@ -170,7 +200,7 @@ for part in $part_list ; do
LogPrint "Found PPC PReP boot partition $part - installing GRUB2 there"
# Erase the first 512 bytes of the PPC PReP boot partition:
dd if=/dev/zero of=$part
- if chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_install_option $part" ; then
+ if chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_no_nvram_option $part" ; then
# In contrast to the above behaviour when GRUB2_INSTALL_DEVICES is specified
# consider it here as a successful bootloader installation when GRUB2
# got installed on at least one PPC PReP boot partition:
diff --git a/usr/share/rear/skel/default/etc/scripts/system-setup.d/41-load-special-modules.sh b/usr/share/rear/skel/default/etc/scripts/system-setup.d/41-load-special-modules.sh
index 9b0b3b8a..2e1d1912 100644
--- a/usr/share/rear/skel/default/etc/scripts/system-setup.d/41-load-special-modules.sh
+++ b/usr/share/rear/skel/default/etc/scripts/system-setup.d/41-load-special-modules.sh
@@ -1,6 +1,24 @@
-# some things are special
+# Special cases of kernel module loading.
-# XEN PV does not autoload some modules
-if [ -d /proc/xen ] ; then
- modprobe xenblk
+# XEN PV does not autoload some modules:
+test -d /proc/xen && modprobe xenblk
+
+# On POWER architecture the nvram kernel driver may be no longer built into the kernel
+# but nowadays it could be also built as a kernel module that needs to be loaded
+# cf. https://github.com/rear/rear/issues/2554#issuecomment-764720180
+# because normally grub2-install gets called without the '--no-nvram' option
+# e.g. see finalize/Linux-ppc64le/620_install_grub2.sh
+# which is how grub2-install should be called when the hardware supports nvram.
+# Nothing to do when the character device node /dev/nvram exists
+# because then the nvram kernel driver is already there:
+if ! test -c /dev/nvram ; then
+ # Nothing can be done when there is no nvram kernel module.
+ # Suppress the possible 'modprobe -n nvram' error message like
+ # "modprobe: FATAL: Module nvram not found in directory /lib/modules/..."
+ # to avoid a possible "FATAL" false alarm message that would appear
+ # on the user's terminal during recovery system startup
+ # cf. https://github.com/rear/rear/pull/2537#issuecomment-741825046
+ # but when there is a nvram kernel module show possible 'modprobe nvram'
+ # (error) messages on the user's terminal during recovery system startup:
+ modprobe -n nvram 2>/dev/null && modprobe nvram
fi

@ -0,0 +1,37 @@
commit 2922b77e950537799fdadf5b3ebf6a05d97f6f2f
Author: pcahyna <pcahyna@users.noreply.github.com>
Date: Mon Jun 20 17:42:58 2022 +0200
Merge pull request #2822 from pcahyna/fix-vim-symlink
Fix vi in the rescue system on Fedora and RHEL 9
diff --git a/usr/share/rear/build/GNU/Linux/005_create_symlinks.sh b/usr/share/rear/build/GNU/Linux/005_create_symlinks.sh
index df75e07d..55f25bef 100644
--- a/usr/share/rear/build/GNU/Linux/005_create_symlinks.sh
+++ b/usr/share/rear/build/GNU/Linux/005_create_symlinks.sh
@@ -8,7 +8,6 @@
ln -sf $v bin/init $ROOTFS_DIR/init >&2
ln -sf $v bin $ROOTFS_DIR/sbin >&2
ln -sf $v bash $ROOTFS_DIR/bin/sh >&2
-ln -sf $v vi $ROOTFS_DIR/bin/vim >&2
ln -sf $v true $ROOTFS_DIR/bin/pam_console_apply >&2 # RH/Fedora with udev needs this
ln -sf $v ../bin $ROOTFS_DIR/usr/bin >&2
ln -sf $v ../bin $ROOTFS_DIR/usr/sbin >&2
diff --git a/usr/share/rear/conf/GNU/Linux.conf b/usr/share/rear/conf/GNU/Linux.conf
index 89aedd4c..0c97594a 100644
--- a/usr/share/rear/conf/GNU/Linux.conf
+++ b/usr/share/rear/conf/GNU/Linux.conf
@@ -206,6 +206,12 @@ LIBS+=(
)
COPY_AS_IS+=( /dev /etc/inputr[c] /etc/protocols /etc/services /etc/rpc /etc/termcap /etc/terminfo /lib*/terminfo /usr/share/terminfo /etc/netconfig /etc/mke2fs.conf /etc/*-release /etc/localtime /etc/magic /usr/share/misc/magic /etc/dracut.conf /etc/dracut.conf.d /usr/lib/dracut /sbin/modprobe.ksplice-orig /etc/sysctl.conf /etc/sysctl.d /etc/e2fsck.conf )
+
+# Needed by vi on Fedora and derived distributions
+# where vi is a shell script that executes /usr/libexec/vi
+# see https://github.com/rear/rear/pull/2822
+COPY_AS_IS+=( /usr/libexec/vi )
+
# Required by curl with https:
# There are stored the distribution provided certificates
# installed from packages, nothing confidential.

@ -0,0 +1,21 @@
commit 40ec3bf072a51229e81bfbfa7cedb8a7c7902dbd
Author: Johannes Meixner <jsmeix@suse.com>
Date: Fri Jun 24 15:11:27 2022 +0200
Merge pull request #2827 from rear/jsmeix-fail-safe-yes-pipe-lvcreate
and commit b3fd58fc871e00bd713a0cb081de54d746ffffb3 from pull request #2839
diff --git a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh
index 1be17ba8..d34ab335 100644
--- a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh
+++ b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh
@@ -263,7 +263,7 @@ $ifline
LogPrint "Creating LVM volume '$vg/$lvname'; Warning: some properties may not be preserved..."
$warnraidline
- lvm lvcreate $lvopts $vg <<<y
+ lvm lvcreate -y $lvopts $vg
fi
EOF

@ -0,0 +1,37 @@
commit 1447530f502305ed08149d9b2a56a51fb91af875
Author: Johannes Meixner <jsmeix@suse.com>
Date: Wed May 25 13:51:14 2022 +0200
Merge pull request #2808 from rear/jsmeix-exclude-watchdog
Exclude dev/watchdog* from the ReaR recovery system:
In default.conf add dev/watchdog* to COPY_AS_IS_EXCLUDE
because watchdog functionality is not wanted in the recovery system
because we do not want any automated reboot functionality
while disaster recovery happens via "rear recover",
see https://github.com/rear/rear/pull/2808
Furthermore having a copy of dev/watchdog*
during "rear mkrescue" in ReaR's build area
may even trigger a system crash that is caused by a
buggy TrendMicro ds_am module touching dev/watchdog
in ReaR's build area (/var/tmp/rear.XXX/rootfs),
see https://github.com/rear/rear/issues/2798
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index 881a0af0..cb14da8b 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -1414,7 +1414,12 @@ COPY_AS_IS=( $SHARE_DIR $VAR_DIR )
# We let them being recreated by device mapper in the recovery system during the recovery process.
# Copying them into the recovery system would let "rear recover" avoid the migration process.
# See https://github.com/rear/rear/pull/1393 for details.
-COPY_AS_IS_EXCLUDE=( $VAR_DIR/output/\* dev/.udev dev/shm dev/shm/\* dev/oracleasm dev/mapper )
+# /dev/watchdog /dev/watchdog\* functionality is not wanted in the ReaR rescue/recovery system
+# because we do not want any automated reboot while disaster recovery happens via "rear recover".
+# Furthermore having dev/watchdog* during "rear mkrescue" may even trigger a system "crash" that is
+# caused by TrendMicro ds_am module touching dev/watchdog in ReaR's build area (/var/tmp/rear.XXX/rootfs).
+# See https://github.com/rear/rear/issues/2798
+COPY_AS_IS_EXCLUDE=( $VAR_DIR/output/\* dev/.udev dev/shm dev/shm/\* dev/oracleasm dev/mapper dev/watchdog\* )
# Array of user names that are trusted owners of files where RequiredSharedObjects calls ldd (cf. COPY_AS_IS)
# and where a ldd test is run inside the recovery system that tests all binaries for 'not found' libraries.
# The default is 'root' plus those standard system users that have a 'bin' or 'sbin' or 'root' home directory

@ -0,0 +1,105 @@
commit 552dd6bfb20fdb3dc712b5243656d147392c27c3
Author: Johannes Meixner <jsmeix@suse.com>
Date: Thu Jun 2 15:25:52 2022 +0200
Merge pull request #2811 from rear/jsmeix-RECOVERY_COMMANDS
Add PRE_RECOVERY_COMMANDS and POST_RECOVERY_COMMANDS
as alternative to PRE_RECOVERY_SCRIPT and POST_RECOVERY_SCRIPT
see the description in default.conf how to use them and how they work.
See https://github.com/rear/rear/pull/2811 and see also
https://github.com/rear/rear/pull/2735 therein in particular
https://github.com/rear/rear/pull/2735#issuecomment-1134686196
Additionally use LogPrint to show the user the executed commands,
see https://github.com/rear/rear/pull/2789
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index cb14da8b..b14525da 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -3117,14 +3117,37 @@ ELILO_BIN=
################ ---- custom scripts
#
# NOTE: The scripts can be defined as an array to better handly spaces in parameters.
-# The scripts are called like this: eval "${PRE_RECOVERY_SCRIPT[@]}"
+# The scripts are called like this:
+# eval "${PRE_RECOVERY_SCRIPT[@]}"
+#
+# Alternatively, commands can be executed by using the corresponding
+# PRE_RECOVERY_COMMANDS and POST_RECOVERY_COMMANDS array variables
+# which evaluate like this:
+# for command in "${PRE_RECOVERY_COMMANDS[@]}" ; do
+# eval "$command"
+# done
+#
+# Using PRE_RECOVERY_COMMANDS and POST_RECOVERY_COMMANDS
+# is simpler when multiple commands should be executed.
+# For example,
+# PRE_RECOVERY_SCRIPT=( 'echo Hello' ';' 'sleep 3' )
+# can be rewritten as
+# PRE_RECOVERY_COMMANDS=( 'echo Hello' 'sleep 3' )
+# or
+# PRE_RECOVERY_COMMANDS=( 'echo Hello' )
+# PRE_RECOVERY_COMMANDS+=( 'sleep 3' )
+
+# Those get called at the very beginning of "rear recover".
+# The PRE_RECOVERY_COMMANDS are called directly before the PRE_RECOVERY_SCRIPT.
+# Nothing was recreated and you have only the plain ReaR rescue/recovery system:
+PRE_RECOVERY_COMMANDS=()
+PRE_RECOVERY_SCRIPT=
-# Call this after Relax-and-Recover did everything in the recover workflow.
-# Use $TARGET_FS_ROOT (by default '/mnt/local') to refer to the recovered system.
+# Those get called at the very end of "rear recover".
+# The POST_RECOVERY_COMMANDS are called directly after the POST_RECOVERY_SCRIPT.
+# Use $TARGET_FS_ROOT (by default '/mnt/local') to access the recreated target system.
POST_RECOVERY_SCRIPT=
-
-# Call this before Relax-and-Recover starts to do anything in the recover workflow. You have the rescue system but nothing else
-PRE_RECOVERY_SCRIPT=
+POST_RECOVERY_COMMANDS=()
# PRE/POST Backup scripts will provide the ability to run certain tasks before and after a ReaR backup.
# for example:
diff --git a/usr/share/rear/setup/default/010_pre_recovery_script.sh b/usr/share/rear/setup/default/010_pre_recovery_script.sh
index 005107cc..8b4e4a36 100644
--- a/usr/share/rear/setup/default/010_pre_recovery_script.sh
+++ b/usr/share/rear/setup/default/010_pre_recovery_script.sh
@@ -1,4 +1,14 @@
+
+# The PRE_RECOVERY_COMMANDS are called directly before the PRE_RECOVERY_SCRIPT
+# so PRE_RECOVERY_COMMANDS can also be used to prepare things for the PRE_RECOVERY_SCRIPT:
+
+local command
+for command in "${PRE_RECOVERY_COMMANDS[@]}" ; do
+ LogPrint "Running PRE_RECOVERY_COMMANDS '$command'"
+ eval "$command"
+done
+
if test "$PRE_RECOVERY_SCRIPT" ; then
- Log "Running PRE_RECOVERY_SCRIPT '${PRE_RECOVERY_SCRIPT[@]}'"
- eval "${PRE_RECOVERY_SCRIPT[@]}"
+ LogPrint "Running PRE_RECOVERY_SCRIPT '${PRE_RECOVERY_SCRIPT[@]}'"
+ eval "${PRE_RECOVERY_SCRIPT[@]}"
fi
diff --git a/usr/share/rear/wrapup/default/500_post_recovery_script.sh b/usr/share/rear/wrapup/default/500_post_recovery_script.sh
index 77751800..866c9368 100644
--- a/usr/share/rear/wrapup/default/500_post_recovery_script.sh
+++ b/usr/share/rear/wrapup/default/500_post_recovery_script.sh
@@ -1,4 +1,14 @@
+
+# The POST_RECOVERY_COMMANDS are called directly after the POST_RECOVERY_SCRIPT
+# so POST_RECOVERY_COMMANDS can also be used to clean up things after the POST_RECOVERY_SCRIPT:
+
if test "$POST_RECOVERY_SCRIPT" ; then
- Log "Running POST_RECOVERY_SCRIPT '${POST_RECOVERY_SCRIPT[@]}'"
- eval "${POST_RECOVERY_SCRIPT[@]}"
+ LogPrint "Running POST_RECOVERY_SCRIPT '${POST_RECOVERY_SCRIPT[@]}'"
+ eval "${POST_RECOVERY_SCRIPT[@]}"
fi
+
+local command
+for command in "${POST_RECOVERY_COMMANDS[@]}" ; do
+ LogPrint "Running POST_RECOVERY_COMMANDS '$command'"
+ eval "$command"
+done

@ -0,0 +1,18 @@
diff --git a/usr/share/rear/prep/GNU/Linux/220_include_lvm_tools.sh b/usr/share/rear/prep/GNU/Linux/220_include_lvm_tools.sh
index 4b73fb05..c7704032 100644
--- a/usr/share/rear/prep/GNU/Linux/220_include_lvm_tools.sh
+++ b/usr/share/rear/prep/GNU/Linux/220_include_lvm_tools.sh
@@ -8,6 +8,13 @@ PROGS+=( lvm dmsetup dmeventd fsadm )
COPY_AS_IS+=( /etc/lvm )
+# Workaround for a LVM segfault when creating a PV with an UUID already present
+# in the device file: omit the device file from the rescue system
+# https://bugzilla.redhat.com/show_bug.cgi?id=2117937
+# proper fix:
+# https://sourceware.org/git/?p=lvm2.git;a=commit;h=8c3cfc75c72696ae8b620555fcc4f815b0c1d6b6
+COPY_AS_IS_EXCLUDE+=( /etc/lvm/devices )
+
if lvs --noheadings -o thin_count | grep -q -v "^\s*$" ; then
# There are Thin Pools on the system, include required binaries
PROGS+=( thin_check )

@ -0,0 +1,39 @@
diff --git a/usr/share/rear/build/default/490_fix_broken_links.sh b/usr/share/rear/build/default/490_fix_broken_links.sh
index 5bace664..cf960be8 100644
--- a/usr/share/rear/build/default/490_fix_broken_links.sh
+++ b/usr/share/rear/build/default/490_fix_broken_links.sh
@@ -7,6 +7,23 @@
# see https://github.com/rear/rear/issues/1638
# and https://github.com/rear/rear/pull/1734
+# Some broken symlinks are expected. The 'build' and 'source' symlinks in kernel modules point to kernel sources
+# and are broken untol one installs the kernel-debug-devel or kernel-devel packages (on Fedora) and even then
+# the targets are jot included in the rescue system by default.
+# Do not warn about those, it is just noise.
+local irrelevant_symlinks=( '*/lib/modules/*/build' '*/lib/modules/*/source' )
+function symlink_is_irrelevant () {
+ for i in "${irrelevant_symlinks[@]}"; do
+ # do not quote $i, it is a glob pattern, matching will be performed by [[ ... == ... ]]
+ # quoting inside [[ ]] prevents pattern matching
+ if [[ "$1" == $i ]]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+
# FIXME: The following code fails if symlinks or their targets contain characters from IFS (e.g. blanks),
# cf. the same kind of comments in build/default/990_verify_rootfs.sh
# and layout/prepare/GNU/Linux/130_include_mount_subvolumes_code.sh
@@ -38,6 +55,10 @@ pushd $ROOTFS_DIR
local broken_symlink=''
local link_target=''
for broken_symlink in $broken_symlinks ; do
+ if symlink_is_irrelevant "$broken_symlink" ; then
+ DebugPrint "Ignoring irrelevant broken symlink $broken_symlink"
+ continue
+ fi
# For each broken symlink absolute path inside ROOTFS_DIR
# we call "readlink -e" in the original system to get its link target there.
# If in the original system there was a chain of symbolic links like

@ -0,0 +1,18 @@
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index b14525da..23a83b71 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -1841,10 +1841,10 @@ OBDR_BLOCKSIZE=2048
# BACKUP=NBU stuff (Symantec/Veritas NetBackup)
##
#
-COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt /opt/VRTSpbx /etc/vx/VxICS /etc/vx/vrtslog.conf )
-COPY_AS_IS_EXCLUDE_NBU=( /usr/openv/netbackup/logs "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal )
+COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt /usr/openv/var/vxss /usr/openv/var/webtruststore /usr/openv/resources/nbpxyhelper /opt/VRTSpbx /etc/vx/VxICS /etc/vx/vrtslog.conf /var/log/VRTSpbx )
+COPY_AS_IS_EXCLUDE_NBU=( "/usr/openv/netbackup/logs/*" "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java "/usr/openv/lib/*-plugins" /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal "/var/log/VRTSpbx/*" )
# See https://github.com/rear/rear/issues/2105 why /usr/openv/netbackup/sec/at/lib/ is needed:
-NBU_LD_LIBRARY_PATH="/usr/openv/lib:/usr/openv/netbackup/sec/at/lib/"
+NBU_LD_LIBRARY_PATH="/usr/openv/lib:/usr/openv/netbackup/sec/at/lib/:/usr/openv/lib/boost"
PROGS_NBU=( )
##

@ -0,0 +1,20 @@
diff --git a/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/550_rebuild_initramfs.sh
similarity index 100%
rename from usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh
rename to usr/share/rear/finalize/Fedora/550_rebuild_initramfs.sh
diff --git a/usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh
deleted file mode 120000
index 22eede59..00000000
--- a/usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh
+++ /dev/null
@@ -1 +0,0 @@
-../i386/550_rebuild_initramfs.sh
\ No newline at end of file
diff --git a/usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh
deleted file mode 120000
index 22eede59..00000000
--- a/usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh
+++ /dev/null
@@ -1 +0,0 @@
-../i386/550_rebuild_initramfs.sh
\ No newline at end of file

@ -0,0 +1,129 @@
diff --git a/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh
index 172ac032..9cff63a0 100644
--- a/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh
+++ b/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh
@@ -143,9 +143,9 @@ function create_fs () {
# unless the user has explicitly specified XFS filesystem options:
local xfs_opts
local xfs_device_basename="$( basename $device )"
- local xfs_info_filename="$LAYOUT_XFS_OPT_DIR/$xfs_device_basename.xfs"
+ local xfs_info_filename="$LAYOUT_XFS_OPT_DIR_RESTORE/$xfs_device_basename.xfs"
# Only uppercase letters and digits are used to ensure mkfs_xfs_options_variable_name is a valid bash variable name
- # even in case of complicated device nodes e.g. things like /dev/mapper/SIBM_2810XIV_78033E7012F-part3
+ # even in case of complicated device nodes e.g. things like /dev/mapper/SIBM_2810XIV_78033E7012F-part3
# cf. current_orig_device_basename_alnum_uppercase in layout/prepare/default/300_map_disks.sh
local xfs_device_basename_alnum_uppercase="$( echo $xfs_device_basename | tr -d -c '[:alnum:]' | tr '[:lower:]' '[:upper:]' )"
# cf. predefined_input_variable_name in the function UserInput in lib/_input-output-functions.sh
diff --git a/usr/share/rear/layout/prepare/default/010_prepare_files.sh b/usr/share/rear/layout/prepare/default/010_prepare_files.sh
index 85964712..7a980e63 100644
--- a/usr/share/rear/layout/prepare/default/010_prepare_files.sh
+++ b/usr/share/rear/layout/prepare/default/010_prepare_files.sh
@@ -5,6 +5,7 @@ LAYOUT_DEPS="$VAR_DIR/layout/diskdeps.conf"
LAYOUT_TODO="$VAR_DIR/layout/disktodo.conf"
LAYOUT_CODE="$VAR_DIR/layout/diskrestore.sh"
LAYOUT_XFS_OPT_DIR="$VAR_DIR/layout/xfs"
+LAYOUT_XFS_OPT_DIR_RESTORE="$LAYOUT_XFS_OPT_DIR/restore"
FS_UUID_MAP="$VAR_DIR/layout/fs_uuid_mapping"
LUN_WWID_MAP="$VAR_DIR/layout/lun_wwid_mapping"
diff --git a/usr/share/rear/layout/prepare/default/319_rename_xfs_configs.sh b/usr/share/rear/layout/prepare/default/319_rename_xfs_configs.sh
new file mode 100644
index 00000000..406afa61
--- /dev/null
+++ b/usr/share/rear/layout/prepare/default/319_rename_xfs_configs.sh
@@ -0,0 +1,83 @@
+# Cleanup directory which hold XFS configuration file for `rear recover'.
+# This will avoid possible mess in LAYOUT_XFS_OPT_DIR_RESTORE if `rear recover'
+# would be launched multiple times, where user will choose different disk
+# mapping each time.
+# Removing and creating LAYOUT_XFS_OPT_DIR_RESTORE will ensure that ReaR will
+# have only current files available during current session.
+rm -rf "$LAYOUT_XFS_OPT_DIR_RESTORE"
+mkdir -p "$LAYOUT_XFS_OPT_DIR_RESTORE"
+
+local excluded_configs=()
+
+# Read $MAPPING_FILE (disk_mappings) to discover final disk mapping.
+# Once mapping is known, configuration files can be renamed.
+# (e.g. sds2.xfs to sdb2.xfs, ...)
+while read source target junk ; do
+ # Disks in MAPPING_FILE are listed with full device path. Since XFS config
+ # files are created in format e.g. sda2.xfs strip prefixed path to have
+ # only short device name available.
+ base_source=$(basename "$source")
+ base_target=$(basename "$target")
+
+ # Check if XFS configuration file for whole device (unpartitioned)
+ # is available (sda, sdb, ...). If so, rename and copy it to
+ # LAYOUT_XFS_OPT_DIR_RESTORE.
+ if [ -e "$LAYOUT_XFS_OPT_DIR/$base_source.xfs" ]; then
+ Log "Migrating XFS configuration file $base_source.xfs to $base_target.xfs"
+ cp "$v" "$LAYOUT_XFS_OPT_DIR/$base_source.xfs" \
+ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_target.xfs"
+
+ # Replace old device name in meta-data= option in XFS
+ # configuration file as well.
+ sed -i s#"meta-data=${source}\(\s\)"#"meta-data=${target}\1"# \
+ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_target.xfs"
+
+ # Mark XFS config file as processed to avoid copying it again later.
+ # More details on why are configs excluded can be found near the
+ # end of this script (near `tar' command).
+ excluded_configs+=("--exclude=$base_source.xfs")
+ fi
+
+ # Find corresponding partitions to source disk in LAYOUT_FILE
+ # and migrate/rename them too if necessary.
+ while read _ layout_device _ _ _ _ layout_partition; do
+ if [[ "$source" = "$layout_device" ]]; then
+ base_src_layout_partition=$(basename "$layout_partition")
+ base_dst_layout_partition=${base_src_layout_partition//$base_source/$base_target}
+ dst_layout_partition=${layout_partition//$base_source/$base_target}
+
+ if [ -e "$LAYOUT_XFS_OPT_DIR/$base_src_layout_partition.xfs" ]; then
+ Log "Migrating XFS configuration $base_src_layout_partition.xfs to $base_dst_layout_partition.xfs"
+ cp "$v" "$LAYOUT_XFS_OPT_DIR/$base_src_layout_partition.xfs" \
+ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_dst_layout_partition.xfs"
+
+ # Replace old device name in meta-data= option in XFS
+ # configuration file as well.
+ sed -i s#"meta-data=${layout_partition}\(\s\)"#"meta-data=${dst_layout_partition}\1"# \
+ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_dst_layout_partition.xfs"
+
+ # Mark XFS config file as processed to avoid copying it again later.
+ # More details on why are configs excluded can be found near the
+ # end of this script (near `tar' command).
+ excluded_configs+=("--exclude=$base_src_layout_partition.xfs")
+ fi
+ fi
+ done < <( grep -E "^part " "$LAYOUT_FILE" )
+done < <( grep -v '^#' "$MAPPING_FILE" )
+
+pushd "$LAYOUT_XFS_OPT_DIR" >/dev/null
+# Copy remaining files
+# We need to copy remaining files into LAYOUT_XFS_OPT_DIR_RESTORE which will
+# serve as base dictionary where ReaR will look for XFS config files.
+# It is necessary to copy only files that were not previously processed,
+# because in LAYOUT_XFS_OPT_DIR they are still listed with
+# original name and copy to LAYOUT_XFS_OPT_DIR_RESTORE could overwrite
+# XFS configs already migrated.
+# e.g. with following disk mapping situation:
+# /dev/sda2 => /dev/sdb2
+# /dev/sdb2 => /dev/sda2
+# Files in LAYOUT_XFS_OPT_DIR_RESTORE would be overwritten by XFS configs with
+# wrong names.
+# tar is used to take advantage of its exclude feature.
+tar cf - --exclude=restore "${excluded_configs[@]}" . | tar xfp - -C "$LAYOUT_XFS_OPT_DIR_RESTORE"
+popd >/dev/null
diff --git a/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh b/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh
index 7895e4ee..fc0fa8fc 100644
--- a/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh
+++ b/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh
@@ -10,6 +10,7 @@ mkdir -p $v $VAR_DIR/layout/config
# We need directory for XFS options only if XFS is in use:
if test "$( mount -t xfs )" ; then
LAYOUT_XFS_OPT_DIR="$VAR_DIR/layout/xfs"
+ rm -rf $LAYOUT_XFS_OPT_DIR
mkdir -p $v $LAYOUT_XFS_OPT_DIR
fi

@ -0,0 +1,60 @@
diff --git a/usr/share/rear/lib/framework-functions.sh b/usr/share/rear/lib/framework-functions.sh
index 4878216b..e919bdbf 100644
--- a/usr/share/rear/lib/framework-functions.sh
+++ b/usr/share/rear/lib/framework-functions.sh
@@ -121,7 +121,7 @@ function cleanup_build_area_and_end_program () {
sleep 2
umount_mountpoint_lazy $BUILD_DIR/outputfs
fi
- remove_temporary_mountpoint '$BUILD_DIR/outputfs' || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove"
+ remove_temporary_mountpoint "$BUILD_DIR/outputfs" || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove"
rmdir $v $BUILD_DIR >&2
fi
Log "End of program reached"
diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh
index c1a11615..0f8f362d 100644
--- a/usr/share/rear/lib/global-functions.sh
+++ b/usr/share/rear/lib/global-functions.sh
@@ -317,7 +317,20 @@ function url_path() {
### Returns true if one can upload files to the URL
function scheme_accepts_files() {
- local scheme=$1
+ # Be safe against 'set -eu' which would exit 'rear' with "bash: $1: unbound variable"
+ # when scheme_accepts_files is called without an argument
+ # by bash parameter expansion with using an empty default value if $1 is unset or null.
+ # Bash parameter expansion with assigning a default value ${1:=} does not work
+ # (then it would still exit with "bash: $1: cannot assign in this way")
+ # but using a default value is practicable here because $1 is used only once
+ # cf. https://github.com/rear/rear/pull/2675#discussion_r705018956
+ local scheme=${1:-}
+ # Return false if scheme is empty or blank (e.g. when OUTPUT_URL is unset or empty or blank)
+ # cf. https://github.com/rear/rear/issues/2676
+ # and https://github.com/rear/rear/issues/2667#issuecomment-914447326
+ # also return false if scheme is more than one word (so no quoted "$scheme" here)
+ # cf. https://github.com/rear/rear/pull/2675#discussion_r704401462
+ test $scheme || return 1
case $scheme in
(null|tape|obdr)
# tapes do not support uploading arbitrary files, one has to handle them
@@ -341,7 +354,10 @@ function scheme_accepts_files() {
### Returning true does not imply that the URL is currently mounted at a filesystem and usable,
### only that it can be mounted (use mount_url() first)
function scheme_supports_filesystem() {
- local scheme=$1
+ # Be safe against 'set -eu' exit if scheme_supports_filesystem is called without argument
+ local scheme=${1:-}
+ # Return false if scheme is empty or blank or more than one word, cf. scheme_accepts_files() above
+ test $scheme || return 1
case $scheme in
(null|tape|obdr|rsync|fish|ftp|ftps|hftp|http|https|sftp)
return 1
@@ -560,7 +576,7 @@ function umount_url() {
RemoveExitTask "perform_umount_url '$url' '$mountpoint' lazy"
- remove_temporary_mountpoint '$mountpoint' && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'"
+ remove_temporary_mountpoint "$mountpoint" && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'"
return 0
}

@ -0,0 +1,38 @@
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index 9ada92c3..455aa3ce 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -1813,7 +1813,7 @@ OBDR_BLOCKSIZE=2048
# BACKUP=NBU stuff (Symantec/Veritas NetBackup)
##
#
-COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt )
+COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt /opt/VRTSpbx /etc/vx/VxICS /etc/vx/vrtslog.conf )
COPY_AS_IS_EXCLUDE_NBU=( /usr/openv/netbackup/logs "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal )
# See https://github.com/rear/rear/issues/2105 why /usr/openv/netbackup/sec/at/lib/ is needed:
NBU_LD_LIBRARY_PATH="/usr/openv/lib:/usr/openv/netbackup/sec/at/lib/"
diff --git a/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh b/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh
index cd48b8d9..ae5a3ccc 100644
--- a/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh
+++ b/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh
@@ -7,6 +7,12 @@
[[ $NBU_version -lt 7 ]] && return # NBU is using xinetd when version <7.x
+if [ -e "/etc/init.d/vxpbx_exchanged" ]; then
+ cp $v /etc/init.d/vxpbx_exchanged $ROOTFS_DIR/etc/scripts/system-setup.d/vxpbx_exchanged.real
+ chmod $v +x $ROOTFS_DIR/etc/scripts/system-setup.d/vxpbx_exchanged.real
+ echo "( /etc/scripts/system-setup.d/vxpbx_exchanged.real start )" > $ROOTFS_DIR/etc/scripts/system-setup.d/89-vxpbx_exchanged.sh
+fi
+
if [ -e "/etc/init.d/netbackup" ]; then
cp $v /etc/init.d/netbackup $ROOTFS_DIR/etc/scripts/system-setup.d/netbackup.real
chmod $v +x $ROOTFS_DIR/etc/scripts/system-setup.d/netbackup.real
diff --git a/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore b/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore
new file mode 100644
index 00000000..d6b7ef32
--- /dev/null
+++ b/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore

@ -0,0 +1,37 @@
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index 9ada92c3..3bdb5497 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -57,10 +57,16 @@
#
# where /prefix/for/rear/working/directory must already exist.
# This is useful for example when there is not sufficient free space
-# in /tmp or $TMPDIR for the ISO image or even the backup archive.
-# TMPDIR cannot be set to a default value here, otherwise /usr/sbin/rear
+# in /var/tmp or $TMPDIR for the ISO image or even the backup archive.
+# TMPDIR cannot be set to a default value here unconditionally but only
+# if it is not set before calling the program, otherwise /usr/sbin/rear
# would not work in compliance with the Linux/Unix standards regarding TMPDIR
# see https://github.com/rear/rear/issues/968
+# The default is /var/tmp instead of the more usual /tmp (the system default),
+# because /tmp is not intended for such large amounts of data that ReaR usually
+# produces when creating the image (see file-hierarchy(7)). In particular,
+# /tmp can be a tmpfs, and thus restricted by the available RAM/swap.
+export TMPDIR="${TMPDIR-/var/tmp}"
##
# ROOT_HOME_DIR
diff --git a/usr/share/rear/rescue/GNU/Linux/600_unset_TMPDIR_in_rescue_conf.sh b/usr/share/rear/rescue/GNU/Linux/600_unset_TMPDIR_in_rescue_conf.sh
deleted file mode 100644
index 84d0cabb..00000000
--- a/usr/share/rear/rescue/GNU/Linux/600_unset_TMPDIR_in_rescue_conf.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-cat - <<EOF >> "$ROOTFS_DIR/etc/rear/rescue.conf"
-# TMPDIR variable may be defined in local.conf file as prefix dir for mktemp command
-# e.g. by defining TMPDIR=/var we would get our BUILD_DIR=/var/tmp/rear.XXXXXXXXXXXX
-# However, in rescue we want our BUILD_DIR=/tmp/rear.XXXXXXX as we are not sure that
-# the user defined TMPDIR would exist in our rescue image
-# by 'unset TMPDIR' we achieve above goal (as rescue.conf is read after local.conf)!
-unset TMPDIR
-EOF

@ -0,0 +1,4 @@
# cronjob for ReaR
# periodically check if disk layout has changed and update
# the rescue image
30 1 * * * root test -f /var/lib/rear/layout/disklayout.conf && /usr/sbin/rear checklayout || /usr/sbin/rear mkrescue

@ -0,0 +1,6 @@
[Unit]
Description=Update ReaR rescue image
[Service]
Type=oneshot
ExecStart=/usr/sbin/rear checklayout || /usr/sbin/rear mkrescue

@ -0,0 +1,10 @@
[Unit]
Description=Update ReaR rescue image
[Timer]
OnCalendar=daily
RandomizedDelaySec=1h
Persistent=true
[Install]
WantedBy=timers.target

@ -0,0 +1,864 @@
commit e6a9c973dbb7be6e46ed9a7fe34df0635635fed6
Author: Johannes Meixner <jsmeix@suse.com>
Date: Tue Jul 12 13:59:28 2022 +0200
Merge pull request #2831 from pcahyna/rsync-url-fix-refactor
Refactor rsync URL support, fixes rsync OUTPUT_URL:
The code to parse rsync:// URLs was BACKUP_URL specific.
If one specified BACKUP=RSYNC and an OUTPUT_URL different from BACKUP_URL,
the OUTPUT_URL was ignored and the output files went to BACKUP_URL.
Fix by introducing generic functions for rsync URL parsing and
use them for both BACKUP_URL and OUTPUT_URL, as appropriate.
Replace all uses of global RSYNC_* variables derived
from BACKUP_URL by those functions.
There also was inconsistent special handling for OUTPUT=PXE which is now removed:
An rsync OUTPUT_URL with OUTPUT=PXE now creates the RSYNC_PREFIX directory
at the destination and the URL is interpreted as in all other cases.
See https://github.com/rear/rear/pull/2831
and https://github.com/rear/rear/issues/2781
diff --git a/usr/share/rear/backup/NETFS/default/200_check_rsync_relative_option.sh b/usr/share/rear/backup/NETFS/default/200_check_rsync_relative_option.sh
deleted file mode 120000
index 336b83f5..00000000
--- a/usr/share/rear/backup/NETFS/default/200_check_rsync_relative_option.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../RSYNC/default/200_check_rsync_relative_option.sh
\ No newline at end of file
diff --git a/usr/share/rear/backup/NETFS/default/210_check_rsync_relative_option.sh b/usr/share/rear/backup/NETFS/default/210_check_rsync_relative_option.sh
new file mode 120000
index 00000000..0570eb44
--- /dev/null
+++ b/usr/share/rear/backup/NETFS/default/210_check_rsync_relative_option.sh
@@ -0,0 +1 @@
+../../RSYNC/default/210_check_rsync_relative_option.sh
\ No newline at end of file
diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh
index 1692ba4c..dd198ede 100644
--- a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh
+++ b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh
@@ -6,29 +6,29 @@ local backup_prog_rc
touch "${TMP_DIR}/selinux.autorelabel"
cat $TMP_DIR/selinux.mode > $SELINUX_ENFORCE
Log "Restored original SELinux mode"
- case $RSYNC_PROTO in
+ case $(rsync_proto "$BACKUP_URL") in
(ssh)
# for some reason rsync changes the mode of backup after each run to 666
# FIXME: Add an explanatory comment why "2>/dev/null" is useful here
# or remove it according to https://github.com/rear/rear/issues/1395
- ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null
+ ssh $(rsync_remote_ssh "$BACKUP_URL") "chmod $v 755 $(rsync_path_full "$BACKUP_URL")/backup" 2>/dev/null
$BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \
- "$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null
+ "$(rsync_remote_full "$BACKUP_URL")/backup/.autorelabel" 2>/dev/null
backup_prog_rc=$?
if [ $backup_prog_rc -ne 0 ]; then
- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]"
- #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
+ LogPrint "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup [${rsync_err_msg[$backup_prog_rc]}]"
+ #StopIfError "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup"
fi
;;
(rsync)
$BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \
- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel"
+ "$(rsync_remote_full "$BACKUP_URL")/backup/.autorelabel"
backup_prog_rc=$?
if [ $backup_prog_rc -ne 0 ]; then
- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]"
- #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
+ LogPrint "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup [${rsync_err_msg[$backup_prog_rc]}]"
+ #StopIfError "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup"
fi
;;
diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh
index 9a17d6bb..de57d571 100644
--- a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh
+++ b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh
@@ -4,29 +4,29 @@ local backup_prog_rc
> "${TMP_DIR}/selinux.autorelabel"
- case $RSYNC_PROTO in
+ case $(rsync_proto "$BACKUP_URL") in
(ssh)
# for some reason rsync changes the mode of backup after each run to 666
# FIXME: Add an explanatory comment why "2>/dev/null" is useful here
# or remove it according to https://github.com/rear/rear/issues/1395
- ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null
+ ssh $(rsync_remote_ssh "$BACKUP_URL") "chmod $v 755 $(rsync_path_full "$BACKUP_URL")/backup" 2>/dev/null
$BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \
- "$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null
+ "$(rsync_remote_full "$BACKUP_URL")/backup/.autorelabel" 2>/dev/null
backup_prog_rc=$?
if [ $backup_prog_rc -ne 0 ]; then
- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]"
- #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
+ LogPrint "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup [${rsync_err_msg[$backup_prog_rc]}]"
+ #StopIfError "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup"
fi
;;
(rsync)
$BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \
- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel"
+ "$(rsync_remote_full "$BACKUP_URL")/backup/.autorelabel"
backup_prog_rc=$?
if [ $backup_prog_rc -ne 0 ]; then
- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]"
- #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
+ LogPrint "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup [${rsync_err_msg[$backup_prog_rc]}]"
+ #StopIfError "Failed to create .autorelabel on $(rsync_path_full "$BACKUP_URL")/backup"
fi
;;
diff --git a/usr/share/rear/backup/RSYNC/default/200_make_prefix_dir.sh b/usr/share/rear/backup/RSYNC/default/200_make_prefix_dir.sh
new file mode 100644
index 00000000..81aa6879
--- /dev/null
+++ b/usr/share/rear/backup/RSYNC/default/200_make_prefix_dir.sh
@@ -0,0 +1,28 @@
+# Create RSYNC_PREFIX/backup on remote rsync server
+# RSYNC_PREFIX=$HOSTNAME as set in default.conf
+
+local proto host
+
+proto="$(rsync_proto "$BACKUP_URL")"
+host="$(rsync_host "$BACKUP_URL")"
+
+mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'"
+mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'"
+
+case $proto in
+
+ (ssh)
+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "$(rsync_remote "$BACKUP_URL")" >/dev/null 2>&1 \
+ || Error "Could not create '$(rsync_path_full "$BACKUP_URL")' on remote ${host}"
+ ;;
+
+ (rsync)
+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote "$BACKUP_URL")/" >/dev/null \
+ || Error "Could not create '$(rsync_path_full "$BACKUP_URL")' on remote ${host}"
+ ;;
+
+esac
+
+# We don't need it anymore, from now we operate on the remote copy
+rmdir $v "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup"
+rmdir $v "${TMP_DIR}/rsync/${RSYNC_PREFIX}"
diff --git a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh b/usr/share/rear/backup/RSYNC/default/210_check_rsync_relative_option.sh
similarity index 91%
rename from usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh
rename to usr/share/rear/backup/RSYNC/default/210_check_rsync_relative_option.sh
index cedee9ce..692616b7 100644
--- a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh
+++ b/usr/share/rear/backup/RSYNC/default/210_check_rsync_relative_option.sh
@@ -1,4 +1,4 @@
-# 200_check_rsync_relative_option.sh
+# 210_check_rsync_relative_option.sh
# See issue #871 for details
# check for the --relative option in BACKUP_RSYNC_OPTIONS array
diff --git a/usr/share/rear/backup/RSYNC/default/450_calculate_req_space.sh b/usr/share/rear/backup/RSYNC/default/450_calculate_req_space.sh
index eb99dbf6..037e49c0 100644
--- a/usr/share/rear/backup/RSYNC/default/450_calculate_req_space.sh
+++ b/usr/share/rear/backup/RSYNC/default/450_calculate_req_space.sh
@@ -1,6 +1,12 @@
# here we will calculate the space required to hold the backup archive on the remote rsync system
# This file is part of Relax-and-Recover, licensed under the GNU General
# Public License. Refer to the included COPYING for full text of license.
+local proto host path
+
+proto="$(rsync_proto "$BACKUP_URL")"
+host="$(rsync_host "$BACKUP_URL")"
+path="$(rsync_path "$BACKUP_URL")"
+
_local_size=0
_remote_size=0
while read -r ; do
@@ -13,17 +19,17 @@ while read -r ; do
done < $TMP_DIR/backup-include.txt
LogPrint "Estimated size of local file systems is $(( _local_size / 1024 )) MB"
-case $RSYNC_PROTO in
+case $proto in
(ssh)
- LogPrint "Calculating size of $RSYNC_HOST:$RSYNC_PATH"
- ssh -l $RSYNC_USER $RSYNC_HOST "df -P $RSYNC_PATH" >$TMP_DIR/rs_size
- StopIfError "Failed to determine size of $RSYNC_PATH"
+ LogPrint "Calculating size of ${host}:${path}"
+ ssh $(rsync_remote_ssh "$BACKUP_URL") "df -P ${path}" >$TMP_DIR/rs_size
+ StopIfError "Failed to determine size of ${path}"
_div=1 # 1024-blocks
grep -q "512-blocks" $TMP_DIR/rs_size && _div=2 # HPUX: divide with 2 to get kB size
_remote_size=$( tail -n 1 $TMP_DIR/rs_size | awk '{print $2}' )
_remote_size=$(( _remote_size / _div ))
[[ $_remote_size -gt $_local_size ]]
- StopIfError "Not enough disk space available on $RSYNC_HOST:$RSYNC_PATH ($_remote_size < $_local_size)"
+ StopIfError "Not enough disk space available on ${host}:${path} ($_remote_size < $_local_size)"
;;
(rsync)
# TODO: how can we calculate the free size on remote system via rsync protocol??
diff --git a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh
index 750a04ca..aa8192c0 100644
--- a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh
+++ b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh
@@ -5,6 +5,11 @@
local backup_prog_rc
local backup_log_message
+local host path
+
+host="$(rsync_host "$BACKUP_URL")"
+path="$(rsync_path "$BACKUP_URL")"
+
Log "Include list:"
while read -r ; do
Log " $REPLY"
@@ -14,26 +19,27 @@ while read -r ; do
Log " $REPLY"
done < $TMP_DIR/backup-exclude.txt
-LogPrint "Creating $BACKUP_PROG backup on '${RSYNC_HOST}:${RSYNC_PATH}'"
+LogPrint "Creating $BACKUP_PROG backup on '${host}:${path}'"
ProgressStart "Running backup operation"
(
case "$(basename $BACKUP_PROG)" in
(rsync)
+ # We are in a subshell, so this change will not propagate to later scripts
BACKUP_RSYNC_OPTIONS+=( --one-file-system --delete --exclude-from=$TMP_DIR/backup-exclude.txt --delete-excluded )
- case $RSYNC_PROTO in
+ case $(rsync_proto "$BACKUP_URL") in
(ssh)
- Log $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(cat $TMP_DIR/backup-include.txt) "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
+ Log $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(cat $TMP_DIR/backup-include.txt) "$(rsync_remote_full "$BACKUP_URL")/backup"
$BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(cat $TMP_DIR/backup-include.txt) \
- "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
+ "$(rsync_remote_full "$BACKUP_URL")/backup"
;;
(rsync)
$BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(cat $TMP_DIR/backup-include.txt) \
- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup"
+ "$(rsync_remote_full "$BACKUP_URL")/backup"
;;
esac
@@ -57,11 +63,11 @@ get_size() {
}
check_remote_df() {
- echo $(ssh ${RSYNC_USER}@${RSYNC_HOST} df -P ${RSYNC_PATH} 2>/dev/null | tail -1 | awk '{print $5}' | sed -e 's/%//')
+ echo $(ssh $(rsync_remote_ssh "$BACKUP_URL") df -P ${path} 2>/dev/null | tail -1 | awk '{print $5}' | sed -e 's/%//')
}
check_remote_du() {
- x=$(ssh ${RSYNC_USER}@${RSYNC_HOST} du -sb ${RSYNC_PATH}/${RSYNC_PREFIX}/backup 2>/dev/null | awk '{print $1}')
+ x=$(ssh $(rsync_remote_ssh "$BACKUP_URL") du -sb $(rsync_path_full "$BACKUP_URL")/backup 2>/dev/null | awk '{print $1}')
[[ -z "${x}" ]] && x=0
echo $x
}
@@ -81,7 +87,7 @@ case "$(basename $BACKUP_PROG)" in
case $i in
300)
- [[ $(check_remote_df) -eq 100 ]] && Error "Disk is full on system ${RSYNC_HOST}"
+ [[ $(check_remote_df) -eq 100 ]] && Error "Disk is full on system ${host}"
;;
15|30|45|60|75|90|105|120|135|150|165|180|195|210|225|240|255|270|285)
diff --git a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh
index b90d459b..76b9f971 100644
--- a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh
+++ b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh
@@ -1,26 +1,27 @@
# copy the backup.log & rear.log file to remote destination with timestamp added
-local timestamp
+local timestamp proto
timestamp=$( date +%Y%m%d.%H%M )
+proto="$(rsync_proto "$BACKUP_URL")"
# compress the log file first
gzip "$TMP_DIR/$BACKUP_PROG_ARCHIVE.log" || Error "Failed to 'gzip $TMP_DIR/$BACKUP_PROG_ARCHIVE.log'"
-case $RSYNC_PROTO in
+case $proto in
(ssh)
# FIXME: Add an explanatory comment why "2>/dev/null" is useful here
# or remove it according to https://github.com/rear/rear/issues/1395
$BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" \
- "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" 2>/dev/null
+ "$(rsync_remote_full "$BACKUP_URL")/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" 2>/dev/null
- $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/rear-${timestamp}.log" 2>/dev/null
+ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "$(rsync_remote_full "$BACKUP_URL")/rear-${timestamp}.log" 2>/dev/null
;;
(rsync)
$BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" "${BACKUP_RSYNC_OPTIONS[@]}" \
- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz"
+ "$(rsync_remote_full "$BACKUP_URL")/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz"
- $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}//rear-${timestamp}.log"
+ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote_full "$BACKUP_URL")//rear-${timestamp}.log"
;;
esac
diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh
index 32aeb8ca..2edb64a6 100644
--- a/usr/share/rear/lib/global-functions.sh
+++ b/usr/share/rear/lib/global-functions.sh
@@ -259,7 +259,7 @@ function url_scheme() {
# the scheme is the leading part up to '://'
local scheme=${url%%://*}
# rsync scheme does not have to start with rsync:// it can also be scp style
- # see the comments in usr/share/rear/prep/RSYNC/default/100_check_rsync.sh
+ # see the comments in usr/share/rear/lib/rsync-functions.sh
echo $scheme | grep -q ":" && echo rsync || echo $scheme
}
diff --git a/usr/share/rear/lib/rsync-functions.sh b/usr/share/rear/lib/rsync-functions.sh
new file mode 100644
index 00000000..443a9625
--- /dev/null
+++ b/usr/share/rear/lib/rsync-functions.sh
@@ -0,0 +1,178 @@
+# Functions for manipulation of rsync URLs (both OUTPUT_URL and BACKUP_URL)
+
+#### OLD STYLE:
+# BACKUP_URL=[USER@]HOST:PATH # using ssh (no rsh)
+#
+# with rsync protocol PATH is a MODULE name defined in remote /etc/rsyncd.conf file
+# BACKUP_URL=[USER@]HOST::PATH # using rsync
+# BACKUP_URL=rsync://[USER@]HOST[:PORT]/PATH # using rsync (is not compatible with new style!!!)
+
+#### NEW STYLE:
+# BACKUP_URL=rsync://[USER@]HOST[:PORT]/PATH # using ssh
+# BACKUP_URL=rsync://[USER@]HOST[:PORT]::/PATH # using rsync
+
+function rsync_validate () {
+ local url="$1"
+
+ if [[ "$(url_scheme "$url")" != "rsync" ]]; then # url_scheme still recognizes old style
+ BugError "Non-rsync URL $url !"
+ fi
+}
+
+# Determine whether the URL specifies the use of the rsync protocol (rsyncd) or ssh
+# Do not call on non-rsync URLs (use url_scheme first)
+function rsync_proto () {
+ local url="$1"
+
+ rsync_validate "$url"
+ if egrep -q '(::)' <<< $url ; then # new style '::' means rsync protocol
+ echo rsync
+ else
+ echo ssh
+ fi
+}
+
+# Functions to parse the URL into its components:
+# USER, HOST, PORT, PATH
+
+function rsync_user () {
+ local url="$1"
+ local host
+
+ host=$(url_host "$url")
+
+ if grep -q '@' <<< $host ; then
+ echo "${host%%@*}" # grab user name
+ else
+ echo root
+ fi
+}
+
+function rsync_host () {
+ local url="$1"
+ local host
+ local path
+
+ host=$(url_host "$url")
+ path=$(url_path "$url")
+ # remove USER@ if present
+ local tmp2="${host#*@}"
+
+ case "$(rsync_proto "$url")" in
+ (rsync)
+ # tmp2=witsbebelnx02::backup or tmp2=witsbebelnx02::
+ echo "${tmp2%%::*}"
+ ;;
+ (ssh)
+ # tmp2=host or tmp2=host:
+ echo "${tmp2%%:*}"
+ ;;
+ esac
+}
+
+function rsync_path () {
+ local url="$1"
+ local host
+ local path
+ local url_without_scheme
+ local url_without_scheme_user
+
+ host=$(url_host "$url")
+ path=$(url_path "$url")
+ local tmp2="${host#*@}"
+
+ url_without_scheme="${url#*//}"
+ url_without_scheme_user="${url_without_scheme#$(rsync_user "$url")@}"
+
+ case "$(rsync_proto "$url")" in
+
+ (rsync)
+ if grep -q '::' <<< $url_without_scheme_user ; then
+ # we can not use url_path here, it uses / as separator, not ::
+ local url_after_separator="${url_without_scheme_user##*::}"
+ # remove leading / - this is a module name
+ echo "${url_after_separator#/}"
+ else
+ echo "${path#*/}"
+ fi
+ ;;
+ (ssh)
+ if [ "$url_without_scheme" == "$url" ]; then
+ # no scheme - old-style URL
+ if grep -q ':' <<< $url_without_scheme_user ; then
+ echo "${url_without_scheme_user##*:}"
+ else
+ BugError "Old-style rsync URL $url without : !"
+ fi
+ else
+ echo "$path"
+ fi
+ ;;
+
+ esac
+}
+
+function rsync_port () {
+ # XXX changing port not implemented yet
+ echo 873
+}
+
+# Full path to the destination directory on the remote server,
+# includes RSYNC_PREFIX. RSYNC_PREFIX is not given by the URL,
+# it is a global parameter (by default derived from hostname).
+function rsync_path_full () {
+ local url="$1"
+
+ echo "$(rsync_path "$url")/${RSYNC_PREFIX}"
+}
+
+# Argument for the ssh command to log in to the remote host ("user@host")
+function rsync_remote_ssh () {
+ local url="$1"
+
+ local user host
+
+ user="$(rsync_user "$url")"
+ host="$(rsync_host "$url")"
+
+ echo "${user}@${host}"
+}
+
+# Argument for the rsync command to reach the remote host, without path.
+function rsync_remote_base () {
+ local url="$1"
+
+ local user host port
+
+ user="$(rsync_user "$url")"
+ host="$(rsync_host "$url")"
+ port="$(rsync_port "$url")"
+
+ case "$(rsync_proto "$url")" in
+
+ (rsync)
+ echo "rsync://${user}@${host}:${port}/"
+ ;;
+ (ssh)
+ echo "$(rsync_remote_ssh "$url"):"
+ ;;
+
+ esac
+}
+
+# Complete argument to rsync to reach the remote location identified by URL,
+# but without the added RSYNC_PREFIX.
+# This essentially converts our rsync:// URLs into a form accepted by the rsync command.
+function rsync_remote () {
+ local url="$1"
+
+ echo "$(rsync_remote_base "$url")$(rsync_path "$url")"
+}
+
+# Complete argument to rsync including even RSYNC_PREFIX.
+# Determined from the URL and RSYNC_PREFIX.
+function rsync_remote_full () {
+ local url="$1"
+
+ echo "$(rsync_remote_base "$url")$(rsync_path_full "$url")"
+}
diff --git a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh
index 519febf5..d00d15e4 100644
--- a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh
+++ b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh
@@ -1,20 +1,32 @@
# Create RSYNC_PREFIX under the local TMP_DIR and also on remote rsync server
# RSYNC_PREFIX=$HOSTNAME as set in default.conf
-# create temporary local work-spaces to collect files (we already make the remote backup dir with the correct mode!!)
+local proto host scheme
+
+scheme="$(url_scheme "$OUTPUT_URL")"
+
+# we handle only rsync:// output schemes.
+# ToDo: why does handling of the output URL scheme belong under RSYNC (which is a backup method)?
+# OUTPUT_URL is independent on the chosen backup method, so this code should be moved to be backup-independent.
+test "rsync" = "$scheme" || return 0
+
+proto="$(rsync_proto "$OUTPUT_URL")"
+host="$(rsync_host "$OUTPUT_URL")"
+
+# create temporary local work-spaces to collect files
mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'"
-mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'"
-case $RSYNC_PROTO in
+case $proto in
(ssh)
- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}" >/dev/null 2>&1 \
- || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}"
+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "$(rsync_remote "$OUTPUT_URL")" >/dev/null 2>&1 \
+ || Error "Could not create '$(rsync_path_full "$OUTPUT_URL")' on remote ${host}"
;;
(rsync)
- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/" >/dev/null \
- || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}"
+ # This must run before the backup stage. Otherwise --relative gets added to BACKUP_RSYNC_OPTIONS
+ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote "$OUTPUT_URL")/" >/dev/null \
+ || Error "Could not create '$(rsync_path_full "$OUTPUT_URL")' on remote ${host}"
;;
esac
diff --git a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh
index 96b62da1..4ddf3cb4 100644
--- a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh
+++ b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh
@@ -1,6 +1,17 @@
#
# copy resulting files to remote network (backup) location
+local proto scheme
+
+scheme="$(url_scheme "$OUTPUT_URL")"
+
+# we handle only rsync:// output schemes.
+# ToDo: why does handling of the output URL scheme belong under RSYNC (which is a backup method)?
+# OUTPUT_URL is independent on the chosen backup method, so this code should be moved to be backup-independent.
+test "rsync" = "$scheme" || return 0
+
+proto="$(rsync_proto "$OUTPUT_URL")"
+
LogPrint "Copying resulting files to $OUTPUT_URL location"
# if called as mkbackuponly then we just don't have any result files.
@@ -19,21 +30,21 @@ cp $v $(get_template "RESULT_usage_$OUTPUT.txt") "${TMP_DIR}/rsync/${RSYNC_PREFI
cat "$RUNTIME_LOGFILE" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/rear.log" \
|| Error "Could not copy $RUNTIME_LOGFILE to local rsync location"
-case $RSYNC_PROTO in
+case $proto in
(ssh)
- Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/"
+ Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ $(rsync_remote_full "$OUTPUT_URL")/"
# FIXME: Add an explanatory comment why "2>/dev/null" is useful here
# or remove it according to https://github.com/rear/rear/issues/1395
- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \
+ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "$(rsync_remote_full "$OUTPUT_URL")/" 2>/dev/null \
|| Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location"
;;
(rsync)
- Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/"
+ Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[*]} $(rsync_remote_full "$OUTPUT_URL")/"
# FIXME: Add an explanatory comment why "2>/dev/null" is useful here
# or remove it according to https://github.com/rear/rear/issues/1395
- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \
+ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote_full "$OUTPUT_URL")/" 2>/dev/null \
|| Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location"
;;
diff --git a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh
index eb7df29e..84500039 100644
--- a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh
+++ b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh
@@ -25,8 +25,10 @@ case $(basename $BACKUP_PROG) in
(rsync)
if grep -q "no xattrs" "$TMP_DIR/rsync_protocol"; then
+ local host
+ host="$(rsync_host "$BACKUP_URL")"
# no xattrs compiled in remote rsync, so saving SELinux attributes are not possible
- Log "WARNING: --xattrs not possible on system ($RSYNC_HOST) (no xattrs compiled in rsync)"
+ Log "WARNING: --xattrs not possible on system ($host) (no xattrs compiled in rsync)"
# $TMP_DIR/selinux.mode is a trigger during backup to disable SELinux
cat $SELINUX_ENFORCE > $TMP_DIR/selinux.mode
RSYNC_SELINUX= # internal variable used in recover mode (empty means disable SELinux)
diff --git a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh
index c964a148..448a1b1a 100644
--- a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh
+++ b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh
@@ -3,97 +3,40 @@
# This file is part of Relax-and-Recover, licensed under the GNU General
# Public License. Refer to the included COPYING for full text of license.
-#### OLD STYLE:
-# BACKUP_URL=[USER@]HOST:PATH # using ssh (no rsh)
-#
-# with rsync protocol PATH is a MODULE name defined in remote /etc/rsyncd.conf file
-# BACKUP_URL=[USER@]HOST::PATH # using rsync
-# BACKUP_URL=rsync://[USER@]HOST[:PORT]/PATH # using rsync (is not compatible with new style!!!)
-
-#### NEW STYLE:
-# BACKUP_URL=rsync://[USER@]HOST[:PORT]/PATH # using ssh
-# BACKUP_URL=rsync://[USER@]HOST[:PORT]::/PATH # using rsync
-
if test -z "$BACKUP_URL" ; then
Error "Missing BACKUP_URL=rsync://[USER@]HOST[:PORT][::]/PATH !"
fi
-local host=$(url_host $BACKUP_URL)
local scheme=$(url_scheme $BACKUP_URL) # url_scheme still recognizes old style
-local path=$(url_path $BACKUP_URL)
if [[ "$scheme" != "rsync" ]]; then
Error "Missing BACKUP_URL=rsync://[USER@]HOST[:PORT][::]/PATH !"
fi
-RSYNC_PROTO= # ssh or rsync
-RSYNC_USER=
-RSYNC_HOST=
-RSYNC_PORT=873 # default port (of rsync server)
-RSYNC_PATH=
-
-
-if egrep -q '(::)' <<< $BACKUP_URL ; then # new style '::' means rsync protocol
- RSYNC_PROTO=rsync
-else
- RSYNC_PROTO=ssh
-fi
-
-if grep -q '@' <<< $host ; then
- RSYNC_USER="${host%%@*}" # grab user name
-else
- RSYNC_USER=root
-fi
-
-# remove USER@ if present (we don't need it anymore)
-local tmp2="${host#*@}"
-
-case "$RSYNC_PROTO" in
-
- (rsync)
- # tmp2=witsbebelnx02::backup or tmp2=witsbebelnx02::
- RSYNC_HOST="${tmp2%%::*}"
- # path=/gdhaese1@witsbebelnx02::backup or path=/backup
- if grep -q '::' <<< $path ; then
- RSYNC_PATH="${path##*::}"
- else
- RSYNC_PATH="${path##*/}"
- fi
- ;;
- (ssh)
- # tmp2=host or tmp2=host:
- RSYNC_HOST="${tmp2%%:*}"
- RSYNC_PATH=$path
- ;;
-
-esac
-
-#echo RSYNC_PROTO=$RSYNC_PROTO
-#echo RSYNC_USER=$RSYNC_USER
-#echo RSYNC_HOST=$RSYNC_HOST
-#echo RSYNC_PORT=$RSYNC_PORT
-#echo RSYNC_PATH=$RSYNC_PATH
+local host proto
+host="$(rsync_host "$BACKUP_URL")"
+proto="$(rsync_proto "$BACKUP_URL")"
# check if host is reachable
if test "$PING" ; then
- ping -c 2 "$RSYNC_HOST" >/dev/null || Error "Backup host [$RSYNC_HOST] not reachable."
+ ping -c 2 "$host" >/dev/null || Error "Backup host [$host] not reachable."
else
Log "Skipping ping test"
fi
# check protocol connectivity
-case "$RSYNC_PROTO" in
+case "$proto" in
(rsync)
- Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/"
- $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/ >/dev/null \
- || Error "Rsync daemon not running on $RSYNC_HOST"
+ Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[*]} $(rsync_remote_base "$BACKUP_URL")"
+ $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" $(rsync_remote_base "$BACKUP_URL") >/dev/null \
+ || Error "Rsync daemon not running on $host"
;;
(ssh)
- Log "Test: ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true"
- ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true >/dev/null 2>&1 \
- || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]"
+ Log "Test: ssh $(rsync_remote_ssh "$BACKUP_URL") /bin/true"
+ ssh $(rsync_remote_ssh "$BACKUP_URL") /bin/true >/dev/null 2>&1 \
+ || Error "Secure shell connection not setup properly [$(rsync_remote_ssh "$BACKUP_URL")]"
;;
esac
diff --git a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh
index e9103531..becf35a0 100644
--- a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh
+++ b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh
@@ -3,15 +3,18 @@
# Public License. Refer to the included COPYING for full text of license.
# try to grab the rsync protocol version of rsync on the remote server
-local remote_mountpoint
+local remote_mountpoint host path proto
+host="$(rsync_host "$BACKUP_URL")"
+path="$(rsync_path "$BACKUP_URL")"
+proto="$(rsync_proto "$BACKUP_URL")"
if [ -z "$RSYNC_PROTOCOL_VERSION" ]; then
- case $RSYNC_PROTO in
+ case $proto in
(ssh)
- ssh ${RSYNC_USER}@${RSYNC_HOST} rsync --version >"$TMP_DIR/rsync_protocol" 2>&1 \
- || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]"
+ ssh $(rsync_remote_ssh "$BACKUP_URL") rsync --version >"$TMP_DIR/rsync_protocol" 2>&1 \
+ || Error "Secure shell connection not setup properly [$(rsync_remote_ssh "$BACKUP_URL")]"
if grep -q "protocol version" "$TMP_DIR/rsync_protocol" ; then
RSYNC_PROTOCOL_VERSION=$(grep 'protocol version' "$TMP_DIR/rsync_protocol" | awk '{print $6}')
else
@@ -24,29 +27,29 @@ if [ -z "$RSYNC_PROTOCOL_VERSION" ]; then
RSYNC_PROTOCOL_VERSION=29 # being conservative (old rsync)
;;
esac
- Log "Remote rsync system ($RSYNC_HOST) uses rsync protocol version $RSYNC_PROTOCOL_VERSION"
+ Log "Remote rsync system ($host) uses rsync protocol version $RSYNC_PROTOCOL_VERSION"
else
- Log "Remote rsync system ($RSYNC_HOST) uses rsync protocol version $RSYNC_PROTOCOL_VERSION (overruled by user)"
+ Log "Remote rsync system ($host) uses rsync protocol version $RSYNC_PROTOCOL_VERSION (overruled by user)"
fi
-if [ "${RSYNC_USER}" != "root" -a $RSYNC_PROTO = "ssh" ]; then
+if [ "$(rsync_user "$BACKUP_URL")" != "root" -a $proto = "ssh" ]; then
if [ $RSYNC_PROTOCOL_VERSION -gt 29 ]; then
if grep -q "no xattrs" "$TMP_DIR/rsync_protocol"; then
# no xattrs available in remote rsync, so --fake-super is not possible
- Error "rsync --fake-super not possible on system ($RSYNC_HOST) (no xattrs compiled in rsync)"
+ Error "rsync --fake-super not possible on system ($host) (no xattrs compiled in rsync)"
else
# when using --fake-super we must have user_xattr mount options on the remote mntpt
- remote_mountpoint=$(ssh ${RSYNC_USER}@${RSYNC_HOST} 'cd ${RSYNC_PATH}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}')
- ssh ${RSYNC_USER}@${RSYNC_HOST} "cd ${RSYNC_PATH} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete" \
+ remote_mountpoint=$(ssh $(rsync_remote_ssh "$BACKUP_URL") 'cd ${path}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}')
+ ssh $(rsync_remote_ssh "$BACKUP_URL") "cd ${path} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete" \
|| Error "Remote file system $remote_mountpoint does not have user_xattr mount option set!"
#BACKUP_RSYNC_OPTIONS+=( --xattrs --rsync-path="rsync --fake-super" )
# see issue #366 for explanation of removing --xattrs
BACKUP_RSYNC_OPTIONS+=( --rsync-path="rsync --fake-super" )
fi
else
- Error "rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)"
+ Error "rsync --fake-super not possible on system ($host) (please upgrade rsync to 3.x)"
fi
fi
diff --git a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh
index 993088be..0fa08587 100644
--- a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh
+++ b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh
@@ -7,7 +7,11 @@ get_size() {
local backup_prog_rc
local restore_log_message
-LogPrint "Restoring $BACKUP_PROG backup from '${RSYNC_HOST}:${RSYNC_PATH}'"
+local host path
+host="$(rsync_host "$BACKUP_URL")"
+path="$(rsync_path "$BACKUP_URL")"
+
+LogPrint "Restoring $BACKUP_PROG backup from '${host}:${path}'"
ProgressStart "Restore operation"
(
@@ -15,18 +19,18 @@ ProgressStart "Restore operation"
(rsync)
- case $RSYNC_PROTO in
+ case $(rsync_proto "$BACKUP_URL") in
(ssh)
- Log $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/backup"/ $TARGET_FS_ROOT/
+ Log $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" "$(rsync_remote_full "$BACKUP_URL")/backup"/ $TARGET_FS_ROOT/
$BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" \
- "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/backup"/ \
+ "$(rsync_remote_full "$BACKUP_URL")/backup"/ \
$TARGET_FS_ROOT/
;;
(rsync)
$BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" \
- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup"/ $TARGET_FS_ROOT/
+ "$(rsync_remote_full "$BACKUP_URL")/backup"/ $TARGET_FS_ROOT/
;;
esac
diff --git a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh
index b2fb72f5..76132794 100644
--- a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh
+++ b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh
@@ -1,14 +1,14 @@
# check the backup archive on remote rsync server
-case $RSYNC_PROTO in
+case $(rsync_proto "$BACKUP_URL") in
(ssh)
- ssh ${RSYNC_USER}@${RSYNC_HOST} "ls -ld ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \
- || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]"
+ ssh $(rsync_remote_ssh "$BACKUP_URL") "ls -ld $(rsync_path_full "$BACKUP_URL")/backup" >/dev/null 2>&1 \
+ || Error "Archive not found on [$(rsync_remote_full "$BACKUP_URL")]"
;;
(rsync)
- $BACKUP_PROG "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \
- || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]"
+ $BACKUP_PROG "$(rsync_remote_full "$BACKUP_URL")/backup" >/dev/null 2>&1 \
+ || Error "Archive not found on [$(rsync_remote_full "$BACKUP_URL")]"
;;
esac

@ -0,0 +1,751 @@
diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf
index 23a83b71..0d13b487 100644
--- a/usr/share/rear/conf/default.conf
+++ b/usr/share/rear/conf/default.conf
@@ -416,6 +416,18 @@ test "$RECOVERY_UPDATE_URL" || RECOVERY_UPDATE_URL=""
# export MIGRATION_MODE='true'
# directly before he calls "rear recover":
test "$MIGRATION_MODE" || MIGRATION_MODE=''
+####
+
+####
+# Formatting DASDs (S/390 specific)
+# DASD (Direct Access Storage Device) denotes a disk drive on the S/390 architecture.
+# DASDs need to be formatted before use (even before creating a partition table on them).
+# By default ReaR will format the DASDs that are going to be used to recreate the system
+# (are referenced in disklayout.conf) before recreating the disk layout.
+# This can be suppressed by setting FORMAT_DASDS="false". It can be useful when one intends
+# to use already formatted DASDs as recovery target.
+FORMAT_DASDS=""
+####
##
# Resizing partitions in MIGRATION_MODE during "rear recover"
diff --git a/usr/share/rear/layout/prep-for-mount/Linux-s390/205_s390_enable_disk.sh b/usr/share/rear/layout/prep-for-mount/Linux-s390/205_s390_enable_disk.sh
new file mode 120000
index 00000000..5f7a2ac0
--- /dev/null
+++ b/usr/share/rear/layout/prep-for-mount/Linux-s390/205_s390_enable_disk.sh
@@ -0,0 +1 @@
+../../prepare/Linux-s390/205_s390_enable_disk.sh
\ No newline at end of file
diff --git a/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh
index 13c69ce8..2a2bc33f 100644
--- a/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh
+++ b/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh
@@ -24,6 +24,7 @@ fi
### Prepare a disk for partitioning/general usage.
create_disk() {
local component disk size label junk
+ local blocksize layout dasdtype dasdcyls junk2
read component disk size label junk < <(grep "^disk $1 " "$LAYOUT_FILE")
### Disks should be block devices.
@@ -67,7 +68,8 @@ sync
EOF
- create_partitions "$disk" "$label"
+ # $junk can contain useful DASD-specific fields
+ create_partitions "$disk" "$label" "$junk"
cat >> "$LAYOUT_CODE" <<EOF
# Make sure device nodes are visible (eg. in RHEL4)
@@ -93,6 +95,11 @@ create_partitions() {
### List partition types/names to detect disk label type.
local -a names=()
local part disk size pstart name junk
+ local orig_block_size layout dasdtype dasdcyls junk2
+ if [ "$label" == dasd ]; then
+ # dasd has more fields - junk is not junk anymore
+ read orig_block_size layout dasdtype dasdcyls junk2 <<<$3
+ fi
while read part disk size pstart name junk ; do
names+=( $name )
case $name in
@@ -227,10 +234,12 @@ EOF
if [[ "$end" ]] ; then
end=$( mathlib_calculate "$end - 1" )
fi
- if [[ "$ARCH" == "Linux-s390" ]] ; then
- # if dasd disk is LDL formated, then do not partition it, because it is partitioned and can take only partition
- if [[ ! "${listDasdLdl[@]}" =~ "$device" ]] ; then
- echo "not LDL dasd formated disk, create a partition"
+ if [[ "$ARCH" == "Linux-s390" && "$label" == dasd ]] ; then
+ # LDL formatted disks are already partitioned and should not be partitioned with parted or fdasd , it will fail
+ if [ "$layout" == LDL ]; then
+ Debug "$device: LDL formatted DASD, do not create a partition"
+ else
+ Debug "$device: ${layout} formatted DASD, create a partition"
cat >> "$LAYOUT_CODE" <<EOF
create_disk_partition "$device" "$name" $number $start $end
EOF
diff --git a/usr/share/rear/layout/prepare/Linux-s390/090_include_dasd_code.sh b/usr/share/rear/layout/prepare/Linux-s390/090_include_dasd_code.sh
new file mode 100644
index 00000000..fc5be463
--- /dev/null
+++ b/usr/share/rear/layout/prepare/Linux-s390/090_include_dasd_code.sh
@@ -0,0 +1,17 @@
+# Generate code for low-level formatting of a DASD
+
+dasd_format_code() {
+ local device size blocksize layout dasdtype dasdcyls
+
+ device="$1"
+ size="$2"
+ blocksize="$3"
+ layout="$4"
+ dasdtype="$5"
+ dasdcyls="$6"
+
+ has_binary dasdfmt || Error "Cannot find 'dasdfmt' command"
+
+ LogPrint 'dasdfmt:' $device ', blocksize:' $blocksize ', layout:' $layout
+ echo "dasdfmt -b $blocksize -d $layout -y $device"
+}
diff --git a/usr/share/rear/layout/prepare/Linux-s390/205_s390_enable_disk.sh b/usr/share/rear/layout/prepare/Linux-s390/205_s390_enable_disk.sh
index c4037e02..0f6946a9 100644
--- a/usr/share/rear/layout/prepare/Linux-s390/205_s390_enable_disk.sh
+++ b/usr/share/rear/layout/prepare/Linux-s390/205_s390_enable_disk.sh
@@ -2,45 +2,36 @@
# Before we can compare or map DASD devices we must enable them.
# This operation is only needed during "rear recover".
-format_s390_disk() {
- LogPrint "run dasdfmt"
- while read line ; do
- LogPrint 'dasdfmt:' "$line"
- # example format command: dasdfmt -b 4096 -d cdl -y /dev/dasda
- # where
- # b is the block size
- # d is the layout:
- # cdl - compatible disk layout (can be shared with zos and zvm apps)
- # ldl - linux disk layout
- # y - answer yes
- device=$( echo $line | awk '{ print $7 }' )
- blocksize=$( echo $line | awk '{ print $3 }' )
- layout=$( echo $line | awk '{ print tolower($5) }' )
- if [[ "$layout" == "ldl" ]] ; then
- # listDasdLdl contains devices such as /dev/dasdb that are formatted as LDL
- # LDL formatted disks are already partitioned and should not be partitioned with parted or fdasd , it will fail
- # this var, listDasdLdl, is used by 100_include_partition_code.sh to exclude writing partition code to diskrestore.sh for LDL disks
- listDasdLdl+=( $device )
- LogPrint "LDL disk added to listDasdLdl:" ${listDasdLdl[@]}
- fi
- LogPrint 'dasdfmt:' $device ', blocksize:' $blocksize ', layout:' $layout
- # dasd format
- dasdfmt -b $blocksize -d $layout -y $device
- done < <( grep "^dasdfmt " "$LAYOUT_FILE" )
-}
-
+DISK_MAPPING_HINTS=()
enable_s390_disk() {
+ local keyword device bus len newname
+
LogPrint "run chccwdev"
- while read line ; do
- LogPrint 'dasd channel:' "$line"
- device=$( echo $line | awk '{ print $4 }' )
- bus=$( echo $line | awk '{ print $2 }' )
- channel=$( echo $line | awk '{ print $5 }' )
- LogPrint 'chccwdev:' $device ', bus:' $bus ', channel:' $channel
- # dasd channel enable
- chccwdev -e $bus
- done < <( grep "^dasd_channel " "$LAYOUT_FILE" )
+ while read len device bus ; do
+ # this while loop must be outside the pipeline so that variables propagate outside
+ # (pipelines run in subshells)
+ LogPrint "Enabling DASD $device with virtual device number $bus"
+ if chccwdev -e $bus ; then
+ newname=$(lsdasd $bus | awk "/$bus/ { print \$3}" )
+ if ! test $newname ; then
+ LogPrintError "New device with virtual device number $bus not found among online DASDs"
+ continue
+ fi
+ if [ "$newname" != "$device" ]; then
+ LogPrint "original DASD '$device' changed name to '$newname'"
+ test "$MIGRATION_MODE" || MIGRATION_MODE='true'
+ fi
+ DISK_MAPPING_HINTS+=( "/dev/$device /dev/$newname" )
+ else
+ LogPrintError "Failed to enable $bus"
+ fi
+ done < <( grep "^dasd_channel " "$LAYOUT_FILE" | while read keyword bus device; do
+ # add device name length, so that "dasdb" sorts properly before "dasdaa"
+ # we need to create devices in the same order as the kernel orders them (by minor number)
+ # - this increases the chance that they will get identical names
+ echo ${#device} $device $bus
+ done | sort -k1n -k2 )
}
# May need to look at $OS_VENDOR also as DASD disk layout is distro specific:
@@ -49,7 +40,6 @@ case $OS_MASTER_VENDOR in
# "Fedora" also handles Red Hat
# "Debian" also handles Ubuntu
enable_s390_disk
- format_s390_disk
;;
(*)
LogPrintError "No code for DASD disk device enablement on $OS_MASTER_VENDOR"
diff --git a/usr/share/rear/layout/prepare/Linux-s390/360_generate_dasd_format_code.sh b/usr/share/rear/layout/prepare/Linux-s390/360_generate_dasd_format_code.sh
new file mode 100644
index 00000000..14bb942d
--- /dev/null
+++ b/usr/share/rear/layout/prepare/Linux-s390/360_generate_dasd_format_code.sh
@@ -0,0 +1,51 @@
+# DASD_FORMAT_CODE is the script to recreate the dasd formatting (dasdformat.sh).
+
+local component disk size label junk
+local blocksize layout dasdtype dasdcyls junk2
+
+
+save_original_file "$DASD_FORMAT_CODE"
+
+# Initialize
+
+echo '#!/bin/bash' >"$DASD_FORMAT_CODE"
+
+# Show the current output of lsdasd, it can be useful for identifying disks
+# (in particular it shows the Linux device name <-> virtual device number mapping,
+# formatted / unformatted status and the number/size of blocks when formatted )
+echo "# Current output of 'lsdasd':" >>"$DASD_FORMAT_CODE"
+lsdasd | sed -e 's/^/# /' >>"$DASD_FORMAT_CODE"
+
+cat <<EOF >>"$DASD_FORMAT_CODE"
+
+LogPrint "Start DASD format restoration."
+
+set -e
+set -x
+
+EOF
+
+while read component disk size label junk; do
+ if [ "$label" == dasd ]; then
+ # Ignore excluded components.
+ # Normally they are removed in 520_exclude_components.sh,
+ # but we run before it, so we must skip them here as well.
+ if IsInArray "$disk" "${EXCLUDE_RECREATE[@]}" ; then
+ Log "Excluding $disk from DASD reformatting."
+ continue
+ fi
+ # dasd has more fields - junk is not junk anymore
+ read blocksize layout dasdtype dasdcyls junk2 <<<$junk
+ dasd_format_code "$disk" "$size" "$blocksize" "$layout" "$dasdtype" "$dasdcyls" >> "$DASD_FORMAT_CODE" || \
+ LogPrintError "Error producing DASD format code for $disk"
+ fi
+done < <(grep "^disk " "$LAYOUT_FILE")
+
+cat <<EOF >>"$DASD_FORMAT_CODE"
+
+set +x
+set +e
+
+LogPrint "DASD(s) formatted."
+
+EOF
diff --git a/usr/share/rear/layout/prepare/Linux-s390/370_confirm_dasd_format_code.sh b/usr/share/rear/layout/prepare/Linux-s390/370_confirm_dasd_format_code.sh
new file mode 100644
index 00000000..5ba4edd5
--- /dev/null
+++ b/usr/share/rear/layout/prepare/Linux-s390/370_confirm_dasd_format_code.sh
@@ -0,0 +1,69 @@
+# adapted from 100_confirm_layout_code.sh
+#
+# Let the user confirm the
+# DASD format code (dasdformat.sh) script.
+#
+
+is_false "$FORMAT_DASDS" && return 0
+
+# Show the user confirmation dialog in any case but when not in migration mode
+# automatically proceed with less timeout USER_INPUT_INTERRUPT_TIMEOUT (by default 10 seconds)
+# to avoid longer delays (USER_INPUT_TIMEOUT is by default 300 seconds) in case of unattended recovery:
+# (taken from 120_confirm_wipedisk_disks.sh)
+local timeout="$USER_INPUT_TIMEOUT"
+is_true "$MIGRATION_MODE" || timeout="$USER_INPUT_INTERRUPT_TIMEOUT"
+
+rear_workflow="rear $WORKFLOW"
+original_disk_space_usage_file="$VAR_DIR/layout/config/df.txt"
+rear_shell_history="$( echo -e "cd $VAR_DIR/layout/\nvi $DASD_FORMAT_CODE\nless $DASD_FORMAT_CODE" )"
+unset choices
+choices[0]="Confirm DASD format script and continue '$rear_workflow'"
+choices[1]="Edit DASD format script ($DASD_FORMAT_CODE)"
+choices[2]="View DASD format script ($DASD_FORMAT_CODE)"
+choices[3]="View original disk space usage ($original_disk_space_usage_file)"
+choices[4]="Confirm what is currently on the DASDs, skip formatting them and continue '$rear_workflow'"
+choices[5]="Use Relax-and-Recover shell and return back to here"
+choices[6]="Abort '$rear_workflow'"
+prompt="Confirm or edit the DASD format script"
+choice=""
+wilful_input=""
+# When USER_INPUT_DASD_FORMAT_CODE_CONFIRMATION has any 'true' value be liberal in what you accept and
+# assume choices[0] 'Confirm DASD format' was actually meant:
+is_true "$USER_INPUT_DASD_FORMAT_CODE_CONFIRMATION" && USER_INPUT_DASD_FORMAT_CODE_CONFIRMATION="${choices[0]}"
+while true ; do
+ choice="$( UserInput -I DASD_FORMAT_CODE_CONFIRMATION -t "$timeout" -p "$prompt" -D "${choices[0]}" "${choices[@]}" )" && wilful_input="yes" || wilful_input="no"
+ case "$choice" in
+ (${choices[0]})
+ # Confirm DASD format file and continue:
+ is_true "$wilful_input" && LogPrint "User confirmed DASD format script" || LogPrint "Continuing '$rear_workflow' by default"
+ break
+ ;;
+ (${choices[1]})
+ # Run 'vi' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user:
+ vi $DASD_FORMAT_CODE 0<&6 1>&7 2>&8
+ ;;
+ (${choices[2]})
+ # Run 'less' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user:
+ less $DASD_FORMAT_CODE 0<&6 1>&7 2>&8
+ ;;
+ (${choices[3]})
+ # Run 'less' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user:
+ less $original_disk_space_usage_file 0<&6 1>&7 2>&8
+ ;;
+ (${choices[4]})
+ # Confirm what is on the disks and continue without formatting
+ FORMAT_DASDS="false"
+ ;;
+ (${choices[5]})
+ # rear_shell runs 'bash' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user:
+ rear_shell "" "$rear_shell_history"
+ ;;
+ (${choices[6]})
+ abort_dasd_format
+ Error "User chose to abort '$rear_workflow' in ${BASH_SOURCE[0]}"
+ ;;
+ esac
+done
+
+chmod +x $DASD_FORMAT_CODE
+
diff --git a/usr/share/rear/layout/prepare/Linux-s390/400_run_dasd_format_code.sh b/usr/share/rear/layout/prepare/Linux-s390/400_run_dasd_format_code.sh
new file mode 100644
index 00000000..16451af6
--- /dev/null
+++ b/usr/share/rear/layout/prepare/Linux-s390/400_run_dasd_format_code.sh
@@ -0,0 +1,185 @@
+# adapted from 200_run_layout_code.sh
+#
+# Run the DASD format code (dasdformat.sh)
+# again and again until it succeeds or the user aborts.
+#
+
+# Skip DASD formatting when the user has explicitly specified to not format them
+# or when the user selected "Confirm what is currently on the DASDs, skip formatting them"
+# in 370_confirm_dasd_format_code.sh
+
+is_false "$FORMAT_DASDS" && return 0
+
+function lsdasd_output () {
+ lsdasd 1>> >( tee -a "$RUNTIME_LOGFILE" 1>&7 )
+}
+
+rear_workflow="rear $WORKFLOW"
+original_disk_space_usage_file="$VAR_DIR/layout/config/df.txt"
+rear_shell_history="$( echo -e "cd $VAR_DIR/layout/\nvi $DASD_FORMAT_CODE\nless $RUNTIME_LOGFILE" )"
+wilful_input=""
+
+unset choices
+choices[0]="Rerun DASD format script ($DASD_FORMAT_CODE)"
+choices[1]="View '$rear_workflow' log file ($RUNTIME_LOGFILE)"
+choices[2]="Edit DASD format script ($DASD_FORMAT_CODE)"
+choices[3]="Show what is currently on the disks ('lsdasd' device list)"
+choices[4]="View original disk space usage ($original_disk_space_usage_file)"
+choices[5]="Use Relax-and-Recover shell and return back to here"
+choices[6]="Confirm what is currently on the disks and continue '$rear_workflow'"
+choices[7]="Abort '$rear_workflow'"
+prompt="DASD format choices"
+
+choice=""
+# When USER_INPUT_DASD_FORMAT_CODE_RUN has any 'true' value be liberal in what you accept and
+# assume choices[0] 'Rerun DASD format script' was actually meant
+# regardless that this likely lets 'rear recover' run an endless loop
+# of failed DASD format attempts but ReaR must obey what the user specified
+# (perhaps it is intended to let 'rear recover' loop here until an admin intervenes):
+is_true "$USER_INPUT_DASD_FORMAT_CODE_RUN" && USER_INPUT_DASD_FORMAT_CODE_RUN="${choices[0]}"
+
+unset confirm_choices
+confirm_choices[0]="Confirm recreated DASD format and continue '$rear_workflow'"
+confirm_choices[1]="Go back one step to redo DASD format"
+confirm_choices[2]="Use Relax-and-Recover shell and return back to here"
+confirm_choices[3]="Abort '$rear_workflow'"
+confirm_prompt="Confirm the recreated DASD format or go back one step"
+confirm_choice=""
+# When USER_INPUT_DASD_FORMAT_MIGRATED_CONFIRMATION has any 'true' value be liberal in what you accept and
+# assume confirm_choices[0] 'Confirm recreated DASD format and continue' was actually meant:
+is_true "$USER_INPUT_DASD_FORMAT_MIGRATED_CONFIRMATION" && USER_INPUT_DASD_FORMAT_MIGRATED_CONFIRMATION="${confirm_choices[0]}"
+
+# Run the DASD format code (dasdformat.sh)
+# again and again until it succeeds or the user aborts
+# or the user confirms to continue with what is currently on the disks
+# (the user may have setup manually what he needs via the Relax-and-Recover shell):
+while true ; do
+ prompt="The DASD format had failed"
+ # After switching to recreating with DASD format script
+ # change choices[0] from "Run ..." to "Rerun ...":
+ choices[0]="Rerun DASD format script ($DASD_FORMAT_CODE)"
+ # Run DASD_FORMAT_CODE in a sub-shell because it sets 'set -e'
+ # so that it exits the running shell in case of an error
+ # but that exit must not exit this running bash here:
+ ( source $DASD_FORMAT_CODE )
+ # One must explicitly test whether or not $? is zero in a separated bash command
+ # because with bash 3.x and bash 4.x code like
+ # # ( set -e ; cat qqq ; echo "hello" ) && echo ok || echo failed
+ # cat: qqq: No such file or directory
+ # hello
+ # ok
+ # does not work as one may expect (cf. what "man bash" describes for 'set -e').
+ # There is a subtle behavioural difference between bash 3.x and bash 4.x
+ # when a script that has 'set -e' set gets sourced:
+ # With bash 3.x the 'set -e' inside the sourced script is effective:
+ # # echo 'set -e ; cat qqq ; echo hello' >script.sh
+ # # ( source script.sh ) && echo ok || echo failed
+ # cat: qqq: No such file or directory
+ # failed
+ # With bash 4.x the 'set -e' inside the sourced script gets noneffective:
+ # # echo 'set -e ; cat qqq ; echo hello' >script.sh
+ # # ( source script.sh ) && echo ok || echo failed
+ # cat: qqq: No such file or directory
+ # hello
+ # ok
+ # With bash 3.x and bash 4.x testing $? in a separated bash command
+ # keeps the 'set -e' inside the sourced script effective:
+ # # echo 'set -e ; cat qqq ; echo hello' >script.sh
+ # # ( source script.sh ) ; (( $? == 0 )) && echo ok || echo failed
+ # cat: qqq: No such file or directory
+ # failed
+ # See also https://github.com/rear/rear/pull/1573#issuecomment-344303590
+ if (( $? == 0 )) ; then
+ prompt="DASD format had been successful"
+ # When DASD_FORMAT_CODE succeeded and when not in migration mode
+ # break the outer while loop and continue the "rear recover" workflow
+ # which means continue with restoring the backup:
+ is_true "$MIGRATION_MODE" || break
+ # When DASD_FORMAT_CODE succeeded in migration mode
+ # let the user explicitly confirm the recreated (and usually migrated) format
+ # before continuing the "rear recover" workflow with restoring the backup.
+ # Show the recreated DASD format to the user on his terminal (and also in the log file):
+ LogPrint "Recreated DASD format:"
+ lsdasd_output
+ # Run an inner while loop with a user dialog so that the user can inspect the recreated DASD format
+ # and perhaps even manually fix the recreated DASD format if it is not what the user wants
+ # (e.g. by using the Relax-and-Recover shell and returning back to this user dialog):
+ while true ; do
+ confirm_choice="$( UserInput -I DASD_FORMAT_MIGRATED_CONFIRMATION -p "$confirm_prompt" -D "${confirm_choices[0]}" "${confirm_choices[@]}" )" && wilful_input="yes" || wilful_input="no"
+ case "$confirm_choice" in
+ (${confirm_choices[0]})
+ # Confirm recreated DASD format and continue:
+ is_true "$wilful_input" && LogPrint "User confirmed recreated DASD format" || LogPrint "Continuing with recreated DASD format by default"
+ # Break the outer while loop and continue with restoring the backup:
+ break 2
+ ;;
+ (${confirm_choices[1]})
+ # Go back one step to redo DASD format:
+ # Only break the inner while loop (i.e. this user dialog loop)
+ # and continue with the next user dialog below:
+ break
+ ;;
+ (${confirm_choices[2]})
+ # rear_shell runs 'bash' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user:
+ rear_shell "" "$rear_shell_history"
+ ;;
+ (${confirm_choices[3]})
+ abort_dasd_format
+ Error "User did not confirm the recreated DASD format but aborted '$rear_workflow' in ${BASH_SOURCE[0]}"
+ ;;
+ esac
+ done
+ fi
+ # Run an inner while loop with a user dialog so that the user can fix things
+ # when DASD_FORMAT_CODE failed.
+ # Such a fix does not necessarily mean the user must change
+ # the dasdformat.sh script when DASD_FORMAT_CODE failed.
+ # The user might also fix things by only using the Relax-and-Recover shell and
+ # then confirm what is on the disks and continue with restoring the backup
+ # or abort this "rear recover" run to re-try from scratch.
+ while true ; do
+ choice="$( UserInput -I DASD_FORMAT_CODE_RUN -p "$prompt" -D "${choices[0]}" "${choices[@]}" )" && wilful_input="yes" || wilful_input="no"
+ case "$choice" in
+ (${choices[0]})
+ # Rerun or run (after switching to recreating with DASD format script) DASD format script:
+ is_true "$wilful_input" && LogPrint "User runs DASD format script" || LogPrint "Running DASD format script by default"
+ # Only break the inner while loop (i.e. the user dialog loop):
+ break
+ ;;
+ (${choices[1]})
+ # Run 'less' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user:
+ less $RUNTIME_LOGFILE 0<&6 1>&7 2>&8
+ ;;
+ (${choices[2]})
+ # Run 'vi' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user:
+ vi $DASD_FORMAT_CODE 0<&6 1>&7 2>&8
+ ;;
+ (${choices[3]})
+ LogPrint "This is the current list of DASDs:"
+ lsdasd_output
+ ;;
+ (${choices[4]})
+ # Run 'less' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user:
+ less $original_disk_space_usage_file 0<&6 1>&7 2>&8
+ ;;
+ (${choices[5]})
+ # rear_shell runs 'bash' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user:
+ rear_shell "" "$rear_shell_history"
+ ;;
+ (${choices[6]})
+ # Confirm what is on the disks and continue:
+ # Break the outer while loop and continue with restoring the backup:
+ break 2
+ ;;
+ (${choices[7]})
+ abort_dasd_format
+ Error "User chose to abort '$rear_workflow' in ${BASH_SOURCE[0]}"
+ ;;
+ esac
+ done
+# End of the outer while loop:
+done
+
+# Local functions must be 'unset' because bash does not support 'local function ...'
+# cf. https://unix.stackexchange.com/questions/104755/how-can-i-create-a-local-function-in-my-bashrc
+unset -f lsdasd_output
diff --git a/usr/share/rear/layout/prepare/default/010_prepare_files.sh b/usr/share/rear/layout/prepare/default/010_prepare_files.sh
index 7a980e63..4191be33 100644
--- a/usr/share/rear/layout/prepare/default/010_prepare_files.sh
+++ b/usr/share/rear/layout/prepare/default/010_prepare_files.sh
@@ -7,6 +7,8 @@ LAYOUT_CODE="$VAR_DIR/layout/diskrestore.sh"
LAYOUT_XFS_OPT_DIR="$VAR_DIR/layout/xfs"
LAYOUT_XFS_OPT_DIR_RESTORE="$LAYOUT_XFS_OPT_DIR/restore"
+DASD_FORMAT_CODE="$VAR_DIR/layout/dasdformat.sh"
+
FS_UUID_MAP="$VAR_DIR/layout/fs_uuid_mapping"
LUN_WWID_MAP="$VAR_DIR/layout/lun_wwid_mapping"
diff --git a/usr/share/rear/layout/prepare/default/250_compare_disks.sh b/usr/share/rear/layout/prepare/default/250_compare_disks.sh
index c459b928..751433ba 100644
--- a/usr/share/rear/layout/prepare/default/250_compare_disks.sh
+++ b/usr/share/rear/layout/prepare/default/250_compare_disks.sh
@@ -54,7 +54,9 @@ local more_than_one_same_orig_size=''
# Cf. the "Compare disks one by one" code below:
while read disk dev size junk ; do
if IsInArray "$size" "${original_system_used_disk_sizes[@]}" ; then
- more_than_one_same_orig_size='true'
+ if ! has_mapping_hint "$dev" ; then
+ more_than_one_same_orig_size='true'
+ fi
else
original_system_used_disk_sizes+=( "$size" )
fi
@@ -109,14 +111,17 @@ fi
# No further disk comparisons are needed when MIGRATION_MODE is already set true above:
if ! is_true "$MIGRATION_MODE" ; then
# Compare original disks and their possible target disk one by one:
- while read disk dev size junk ; do
- dev=$( get_sysfs_name $dev )
+ while read disk devnode size junk ; do
+ dev=$( get_sysfs_name $devnode )
Log "Comparing $dev"
if test -e "/sys/block/$dev" ; then
Log "Device /sys/block/$dev exists"
newsize=$( get_disk_size $dev )
if test "$newsize" -eq "$size" ; then
LogPrint "Device $dev has expected (same) size $size bytes (will be used for '$WORKFLOW')"
+ elif test "$( get_mapping_hint $devnode )" == "$devnode" ; then
+ Debug "Found identical mapping hint ${devnode} -> ${devnode}"
+ LogPrint "Device $dev found according to mapping hints (will be used for '$WORKFLOW')"
else
LogPrint "Device $dev has size $newsize bytes but $size bytes is expected (needs manual configuration)"
MIGRATION_MODE='true'
diff --git a/usr/share/rear/layout/prepare/default/300_map_disks.sh b/usr/share/rear/layout/prepare/default/300_map_disks.sh
index 2e90768c..468aa35c 100644
--- a/usr/share/rear/layout/prepare/default/300_map_disks.sh
+++ b/usr/share/rear/layout/prepare/default/300_map_disks.sh
@@ -112,7 +112,14 @@ while read keyword orig_device orig_size junk ; do
# Continue with next original device when it is already used as source in the mapping file:
is_mapping_source "$orig_device" && continue
# First, try to find if there is a current disk with same name and same size as the original:
- sysfs_device_name="$( get_sysfs_name "$orig_device" )"
+ # (possibly influenced by mapping hints if known)
+ if has_mapping_hint "$orig_device" ; then
+ candidate_target_device_name="$( get_mapping_hint "$orig_device" )"
+ Debug "Using mapping hint ${candidate_target_device_name} as candidate for $orig_device mapping"
+ else
+ candidate_target_device_name="$orig_device"
+ fi
+ sysfs_device_name="$( get_sysfs_name "$candidate_target_device_name" )"
current_device="/sys/block/$sysfs_device_name"
if test -e $current_device ; then
current_size=$( get_disk_size $sysfs_device_name )
@@ -122,11 +129,16 @@ while read keyword orig_device orig_size junk ; do
# Continue with next one if the current one is already used as target in the mapping file:
is_mapping_target "$preferred_target_device_name" && continue
# Use the current one if it is of same size as the old one:
- if test "$orig_size" -eq "$current_size" ; then
+ if has_mapping_hint "$orig_device" || test "$orig_size" -eq "$current_size" ; then
# Ensure the determined target device is really a block device:
if test -b "$preferred_target_device_name" ; then
+ if has_mapping_hint "$orig_device" ; then
+ mapping_reason="determined by mapping hint"
+ else
+ mapping_reason="same name and same size $current_size"
+ fi
add_mapping "$orig_device" "$preferred_target_device_name"
- LogPrint "Using $preferred_target_device_name (same name and same size) for recreating $orig_device"
+ LogPrint "Using $preferred_target_device_name ($mapping_reason) for recreating $orig_device"
# Continue with next original device in the LAYOUT_FILE:
continue
fi
diff --git a/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh b/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh
index 3ab7357d..da6ce64c 100644
--- a/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh
+++ b/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh
@@ -362,18 +362,27 @@ Log "Saving disk partitions."
if [[ $blockd == dasd* && "$ARCH" == "Linux-s390" ]] ; then
devname=$(get_device_name $disk)
+ dasdnum=$( lsdasd | awk "\$3 == \"$blockd\" { print \$1}" )
+ dasdstatus=$( lsdasd | awk "\$3 == \"$blockd\" { print \$2}" )
+ # ECKD or FBA
+ dasdtype=$( lsdasd | awk "\$3 == \"$blockd\" { print \$5}" )
+ if [ "$dasdtype" != ECKD ] && [ "$dasdtype" != FBA ]; then
+ LogPrint "Type $dasdtype of DASD $blockd unexpected: neither ECKD nor FBA"
+ fi
- echo "# active dasd bus and channel"
- echo "# bus-id <name device> type"
- echo "dasd_channel $( lsdasd|grep $blockd|awk '{ print $1 " " $2 " " $3 " " $4}' )"
-
- echo "# dasdfmt - disk layout is either cdl for the compatible disk layout (default) or ldl"
- echo "# example usage: dasdfmt -b 4096 -d cdl -y /dev/dasda"
- layout=$(dasdview -x /dev/$blockd|grep "^format"|awk '{print $7}')
- blocksize=$( dasdview -i /dev/$blockd|grep blocksize|awk '{print $6}' )
- echo "# dasdfmt $devname"
- echo "# dasdfmt -b <blocksize> -d <layout> -y <devname>"
- echo "dasdfmt -b $blocksize -d $layout -y $devname"
+ echo "# every DASD bus and channel"
+ echo "# Format: dasd_channel <bus-id> <device name>"
+ echo "dasd_channel $dasdnum $blockd"
+
+ # We need to print the dasd_channel line even for ignored devices,
+ # otherwise we could have naming gaps and naming would change when
+ # recreating layout.
+ # E.g. if dasda is ignored, and dasdb is not, we would create only dasdb
+ # during recreation, but it would be named dasda.
+ if [ "$dasdstatus" != active ]; then
+ Log "Ignoring $blockd: it is not active (Status is $dasdstatus)"
+ continue
+ fi
fi
#FIXME: exclude *rpmb (Replay Protected Memory Block) for nvme*, mmcblk* and uas
@@ -387,11 +396,38 @@ Log "Saving disk partitions."
devname=$(get_device_name $disk)
devsize=$(get_disk_size ${disk#/sys/block/})
disktype=$(parted -s $devname print | grep -E "Partition Table|Disk label" | cut -d ":" -f "2" | tr -d " ")
-
- echo "# Disk $devname"
- echo "# Format: disk <devname> <size(bytes)> <partition label type>"
- echo "disk $devname $devsize $disktype"
-
+ if [ "$disktype" != "dasd" ]; then
+ echo "# Disk $devname"
+ echo "# Format: disk <devname> <size(bytes)> <partition label type>"
+ echo "disk $devname $devsize $disktype"
+ elif [[ $blockd == dasd* && "$ARCH" == "Linux-s390" ]] ; then
+ layout=$(dasdview -x $devname |grep "^format"|awk '{print $7}')
+ case "$layout" in
+ (NOT)
+ # NOT -> dasdview has printed "NOT formatted"
+ LogPrintError "Ignoring $blockd: it is not formatted"
+ continue
+ ;;
+ (LDL|CDL)
+ ;;
+ (*)
+ BugError "Invalid 'disk $devname' entry (unknown DASD layout $layout)"
+ ;;
+ esac
+ test $disktype || Error "No partition label type for DASD entry 'disk $devname'"
+ blocksize=$( get_block_size "$blockd" )
+ if ! test $blocksize ; then
+ # fallback - ugly method
+ blocksize=$( dasdview -i $devname |grep blocksize|awk '{print $6}' )
+ test $blocksize || Error "Unknown block size of DASD $devname"
+ fi
+ dasdcyls=$( get_dasd_cylinders "$blockd" )
+ echo "# Disk $devname"
+ echo "# Format: disk <devname> <size(bytes)> <partition label type> <logical block size> <DASD layout> <DASD type> <size(cylinders)>"
+ echo "disk $devname $devsize $disktype $blocksize $layout $dasdtype $dasdcyls"
+ else
+ Error "Invalid 'disk $devname' entry (DASD partition label on non-s390 arch $ARCH)"
+ fi
echo "# Partitions on $devname"
echo "# Format: part <device> <partition size(bytes)> <partition start(bytes)> <partition type|name> <flags> /dev/<partition>"
extract_partitions "$devname"
diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh
index 91c5ff73..4f5b8f6f 100644
--- a/usr/share/rear/lib/layout-functions.sh
+++ b/usr/share/rear/lib/layout-functions.sh
@@ -93,6 +93,12 @@ abort_recreate() {
restore_original_file "$LAYOUT_FILE"
}
+abort_dasd_format() {
+ Log "Error detected during DASD formatting."
+ Log "Restoring saved original $DASD_FORMAT_FILE"
+ restore_original_file "$DASD_FORMAT_FILE"
+}
+
# Test and log if a component $1 (type $2) needs to be recreated.
create_component() {
local device="$1"
@@ -722,6 +728,46 @@ get_block_size() {
fi
}
+# Get the number of cylinders of a DASD.
+# The number of cylinders has the advantage of being fixed - size depends on formatting
+# and number of cylinders is valid even for unformatted DASDs, size is not.
+get_dasd_cylinders() {
+ local disk_name="${1##*/}" # /some/path/dasda -> dasda
+ local dasd_cyls
+
+ dasd_cyls=$(dasdview -i /dev/$disk_name | grep cylinders | cut -d ':' -f2 | awk '{print $4}')
+ ### Make sure we always return a number
+ echo $(( dasd_cyls ))
+}
+
+# Sometimes we know what the new device for the original device should be in a more reliable way
+# than by looking at disk sizes. THis information is called "mapping hints". Let's pass them
+# to the mapping code using the DISK_MAPPING_HINTS array. Each element of the array has the form
+# "/dev/source /dev/target" (space-separated).
+
+# Output the mapping hint for the original device.
+function get_mapping_hint () {
+ local device="$1"
+ local hint mapping_hint_source mapping_hint_target
+
+ for hint in "${DISK_MAPPING_HINTS[@]}"; do
+ mapping_hint_source=${hint%% *}
+ mapping_hint_target=${hint##* }
+ if [ "${device}" == "${mapping_hint_source}" ] ; then
+ echo "$mapping_hint_target"
+ return 0
+ fi
+ done
+ return 1
+}
+
+# Determine if there is a mapping hint for the original device.
+function has_mapping_hint () {
+ local device="$1"
+
+ get_mapping_hint "$device" > /dev/null
+}
+
# Get the UUID of a device.
# Device is something like /dev/sda1.
blkid_uuid_of_device() {

@ -0,0 +1,296 @@
# this is purely a shell script, so no debug packages
%global debug_package %{nil}
Name: rear
Version: 2.6
Release: 17%{?dist}
Summary: Relax-and-Recover is a Linux disaster recovery and system migration tool
URL: http://relax-and-recover.org/
License: GPLv3
Source0: https://github.com/rear/rear/archive/%{version}.tar.gz#/rear-%{version}.tar.gz
# Add cronjob and systemd timer as documentation
Source1: rear.cron
Source2: rear.service
Source3: rear.timer
# Skip buildin modules, RHBZ#1831311
Patch0: 0001-skip-kernel-buildin-modules.patch
Patch4: rear-bz1492177-warning.patch
Patch29: rear-bz1832394.patch
Patch30: rear-sfdc02772301.patch
Patch31: rear-bz1945869.patch
Patch32: rear-bz1958247.patch
Patch33: rear-bz1930662.patch
Patch34: rear-tmpdir.patch
Patch35: rear-bz1983013.patch
Patch36: rear-bz1993296.patch
Patch37: rear-bz1747468.patch
Patch38: rear-bz2049091.patch
Patch39: rear-pr2675.patch
Patch40: rear-bz2048454.patch
Patch41: rear-bz2035939.patch
Patch42: rear-bz2083272.patch
Patch43: rear-bz2111049.patch
Patch44: rear-bz2104005.patch
Patch45: rear-bz2097437.patch
Patch46: rear-bz2096916.patch
Patch47: rear-bz2096900.patch
Patch48: rear-bz2111059.patch
Patch49: rsync-output.patch
Patch50: rear-bz2119501.patch
Patch51: rear-bz2120736.patch
Patch52: rear-bz2117937.patch
Patch53: rear-bz2091163.patch
Patch54: rear-bz2130945.patch
Patch55: rear-bz2131946.patch
Patch56: s390-no-clobber-disks.patch
# rear contains only bash scripts plus documentation so that on first glance it could be "BuildArch: noarch"
# but actually it is not "noarch" because it only works on those architectures that are explicitly supported.
# Of course the rear bash scripts can be installed on any architecture just as any binaries can be installed on any architecture.
# But the meaning of architecture dependent packages should be on what architectures they will work.
# Therefore only those architectures that are actually supported are explicitly listed.
# This avoids that rear can be "just installed" on architectures that are actually not supported (e.g. ARM):
ExclusiveArch: %ix86 x86_64 ppc ppc64 ppc64le ia64 s390x
# Furthermore for some architectures it requires architecture dependent packages (like syslinux for x86 and x86_64)
# so that rear must be architecture dependent because ifarch conditions never match in case of "BuildArch: noarch"
# see the GitHub issue https://github.com/rear/rear/issues/629
%ifarch %ix86 x86_64
Requires: syslinux
# We need mkfs.vfat for recreating EFI System Partition
Recommends: dosfstools
%endif
%ifarch ppc ppc64 ppc64le
# Called by grub2-install (except on PowerNV)
Requires: /usr/sbin/ofpathname
# Needed to make PowerVM LPARs bootable
Requires: /usr/sbin/bootlist
%endif
%ifarch s390x
# Contain many utilities for working with DASDs
Requires: s390utils-base
Requires: s390utils-core
%endif
# In the end this should tell the user that rear is known to work only on ix86 x86_64 ppc ppc64 ppc64le ia64
# and on ix86 x86_64 syslinux is explicitly required to make the bootable ISO image
# (in addition to the default installed bootloader grub2) while on ppc ppc64 the
# default installed bootloader yaboot is also useed to make the bootable ISO image.
# Required for HTML user guide
BuildRequires: make
BuildRequires: asciidoctor
### Mandatory dependencies:
Requires: binutils
Requires: ethtool
Requires: gzip
Requires: iputils
Requires: parted
Requires: tar
Requires: openssl
Requires: gawk
Requires: attr
Requires: bc
Requires: iproute
# No ISO image support on s390x (may change when we add support for LPARs)
%ifnarch s390x
Requires: xorriso
%endif
Requires: file
Requires: dhcp-client
%if 0%{?rhel}
Requires: util-linux
%endif
%description
Relax-and-Recover is the leading Open Source disaster recovery and system
migration solution. It comprises of a modular
frame-work and ready-to-go workflows for many common situations to produce
a bootable image and restore from backup using this image. As a benefit,
it allows to restore to different hardware and can therefore be used as
a migration tool as well.
Currently Relax-and-Recover supports various boot media (incl. ISO, PXE,
OBDR tape, USB or eSATA storage), a variety of network protocols (incl.
sftp, ftp, http, nfs, cifs) as well as a multitude of backup strategies
(incl. IBM TSM, MircroFocus Data Protector, Symantec NetBackup, EMC NetWorker,
Bacula, Bareos, BORG, Duplicity, rsync).
Relax-and-Recover was designed to be easy to set up, requires no maintenance
and is there to assist when disaster strikes. Its setup-and-forget nature
removes any excuse for not having a disaster recovery solution implemented.
Professional services and support are available.
#-- PREP, BUILD & INSTALL -----------------------------------------------------#
%prep
%autosetup -p1
### Add a specific os.conf so we do not depend on LSB dependencies
%{?fedora:echo -e "OS_VENDOR=Fedora\nOS_VERSION=%{?fedora}" >etc/rear/os.conf}
%{?rhel:echo -e "OS_VENDOR=RedHatEnterpriseServer\nOS_VERSION=%{?rhel}" >etc/rear/os.conf}
# Change /lib to /usr/lib for COPY_AS_IS
sed -E -e "s:([\"' ])/lib:\1/usr/lib:g" \
-i usr/share/rear/prep/GNU/Linux/*include*.sh
# Same for Linux.conf
sed -e 's:/lib/:/usr/lib/:g' \
-e 's:/lib\*/:/usr/lib\*/:g' \
-e 's:/usr/usr/lib:/usr/lib:g' \
-i 'usr/share/rear/conf/GNU/Linux.conf'
%build
# build HTML user guide
# asciidoc writes a timestamp to files it produces, based on the last
# modified date of the source file, but is sensitive to the timezone.
# This makes the results differ according to the timezone of the build machine
# and spurious changes will be seen.
# Set the timezone to UTC as a workaround.
# https://wiki.debian.org/ReproducibleBuilds/TimestampsInDocumentationGeneratedByAsciidoc
TZ=UTC make doc
%install
%{make_install}
install -p -d %{buildroot}%{_docdir}/%{name}/
install -m 0644 %{SOURCE1} %{buildroot}%{_docdir}/%{name}/
install -m 0644 %{SOURCE2} %{buildroot}%{_docdir}/%{name}/
install -m 0644 %{SOURCE3} %{buildroot}%{_docdir}/%{name}/
#-- FILES ---------------------------------------------------------------------#
%files
%doc MAINTAINERS COPYING README.adoc doc/*.txt doc/user-guide/*.html
%doc %{_mandir}/man8/rear.8*
%doc %{_docdir}/%{name}/rear.*
%config(noreplace) %{_sysconfdir}/rear/
%{_datadir}/rear/
%{_sharedstatedir}/rear/
%{_sbindir}/rear
#-- CHANGELOG -----------------------------------------------------------------#
%changelog
* Fri Apr 14 2023 MSVSphere Packaging Team <packager@msvsphere.ru> - 2.6-17
- Rebuilt for MSVSphere 9.2 beta
* Wed Feb 22 2023 Pavel Cahyna <pcahyna@redhat.com> - 2.6-17
- Backport PR2943 to fix s390x dasd formatting
- Require s390utils-{core,base} on s390x
* Sun Jan 15 2023 Pavel Cahyna <pcahyna@redhat.com> - 2.6-16
- Apply PR2903 to protect against colons in pvdisplay output
- Apply PR2873 to fix initrd regeneration on s390x
- Apply PR2431 to migrate XFS configuration files
* Thu Aug 25 2022 Pavel Cahyna <pcahyna@redhat.com> - 2.6-15
- Exclude /etc/lvm/devices from the rescue system to work around a segfault
in lvm pvcreate
* Wed Aug 24 2022 Pavel Cahyna <pcahyna@redhat.com> - 2.6-14
- Avoid stderr message about irrelevant broken links
- Changes for NetBackup (NBU) 9.x support
* Tue Aug 9 2022 Pavel Cahyna <pcahyna@redhat.com> - 2.6-13
- Backport PR2831 - rsync URL refactoring
fixes rsync OUTPUT_URL when different from BACKUP_URL
* Mon Aug 8 2022 Pavel Cahyna <pcahyna@redhat.com> - 2.6-12
- Apply PR2795 to detect changes in system files between backup
and rescue image
- Apply PR2808 to exclude dev/watchdog* from recovery system
- Backport upstream PRs 2827 and 2839 to pass -y to lvcreate instead of one "y"
on stdin
- Apply PR2811 to add the PRE/POST_RECOVERY_COMMANDS directives
- Recommend dosfstools on x86_64, needed for EFI System Partition
- Backport PR2825 to replace defunct mkinitrd with dracut
- Apply PR2580 to load the nvram module in the rescue environment in order
to be able to set the boot order on ppc64le LPARs
- Backport PR2822 to include the true vi executable in rescue ramdisk
* Sun Feb 27 2022 Pavel Cahyna <pcahyna@redhat.com> - 2.6-11
- Apply PR2675 to fix leftover temp dir bug (introduced in backported PR2625)
- Apply PR2603 to ignore unused PV devices
- Apply upstream PR2750 to avoid exclusion of wanted multipath devices
- Remove unneeded xorriso dep on s390x (no ISO image support there)
- Apply upstream PR2736 to add the EXCLUDE_{IP_ADDRESSES,NETWORK_INTERFACES}
options
- Add patch for better handling of thin pools and other LV types not supported
by vgcfgrestore
* Mon Aug 16 2021 Pavel Cahyna <pcahyna@redhat.com> - 2.6-10
- Sync spec changes and downstream patches from RHEL 8 rear-2.6-2
- Fix multipath performance regression in 2.6, introduced by upstream PR #2299.
Resolves: rhbz1993296
- On POWER add bootlist & ofpathname to the list of required programs
conditionally (bootlist only if running under PowerVM, ofpathname
always except on PowerNV) - upstream PR2665, add them to package
dependencies
Resolves: rhbz1983013
- Backport PR2608:
Fix setting boot path in case of UEFI partition (ESP) on MD RAID
Resolves: rhbz1945869
- Backport PR2625
Prevents accidental backup removal in case of errors
Resolves: rhbz1958247
- Fix rsync error and option handling
Resolves: rhbz1930662
* Wed Aug 11 2021 Pavel Cahyna <pcahyna@redhat.com> - 2.6-9
- Put TMPDIR on /var/tmp by default, otherwise it may lack space
RHBZ #1988420, upstream PR2664
* Tue Aug 10 2021 Mohan Boddu <mboddu@redhat.com> - 2.6-8
- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags
Related: rhbz#1991688
* Wed Jun 30 2021 Pavel Cahyna <pcahyna@redhat.com> - 2.6-7
- Sync spec changes and downstream patches from RHEL 8
- Require xorriso instead of genisoimage
- Add S/390 support and forgotten dependency on the file utility
- Backport upstream code related to LUKS2 support
- Modify the cron command to avoid an e-mail with error message after
ReaR is installed but not properly configured when the cron command
is triggered for the first time
- Changes for NetBackup (NBU) support, upstream PR2544
- Add dependency on dhcp-client, RHBZ #1926451
* Fri Apr 16 2021 Mohan Boddu <mboddu@redhat.com> - 2.6-6
- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937
* Fri Feb 26 2021 Christopher Engelhard <ce@lcts.de> - 2.6-5
- Change /lib to /usr/lib in scripts to fix RHBZ #1931112
* Wed Jan 27 2021 Fedora Release Engineering <releng@fedoraproject.org> - 2.6-4
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
* Wed Sep 23 2020 Christopher Engelhard <ce@lcts.de> - 2.6-3
- Stop auto-creating a cronjob, but ship example cronjob/
systemd timer units in docdir instead (upstream issue #1829)
- Build & ship HTML user guide
- Remove %pre scriptlet, as it was introduced only to fix a
specific upgrade issue with v1.15 in 2014
* Tue Sep 22 2020 Christopher Engelhard <ce@lcts.de> - 2.6-2
- Backport upstream PR#2469 to fix RHBZ #1831311
* Tue Sep 22 2020 Christopher Engelhard <ce@lcts.de> - 2.6-1
- Update to 2.6
- Streamline & clean up spec file
* Wed Jul 29 2020 Fedora Release Engineering <releng@fedoraproject.org> - 2.4-6
- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
* Thu Jan 30 2020 Fedora Release Engineering <releng@fedoraproject.org> - 2.4-5
- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
* Fri Jul 26 2019 Fedora Release Engineering <releng@fedoraproject.org> - 2.4-4
- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
* Sat Feb 02 2019 Fedora Release Engineering <releng@fedoraproject.org> - 2.4-3
- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
* Sat Jul 14 2018 Fedora Release Engineering <releng@fedoraproject.org> - 2.4-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
* Thu Jul 30 2015 Johannes Meixner <jsmeix@suse.de>
- For a changelog see the rear-release-notes.txt file.
Loading…
Cancel
Save