From f84803d0d296d7297bed706021dc74954ad0c651 Mon Sep 17 00:00:00 2001 From: MSVSphere Packaging Team Date: Fri, 29 Mar 2024 16:26:27 +0300 Subject: [PATCH] import rear-2.6-12.el8 --- .gitignore | 1 + .rear.metadata | 1 + SOURCES/pxe-rsync-output.patch | 36 + SOURCES/rear-CVE-2024-23301.patch | 32 + SOURCES/rear-asciidoc.patch | 584 +++++ SOURCES/rear-bz1492177-warning.patch | 15 + SOURCES/rear-bz1747468.patch | 112 + SOURCES/rear-bz1832394.patch | 351 +++ SOURCES/rear-bz1930662.patch | 693 ++++++ SOURCES/rear-bz1945869.patch | 274 +++ SOURCES/rear-bz1958247.patch | 2040 +++++++++++++++++ SOURCES/rear-bz1983013.patch | 68 + SOURCES/rear-bz1993296.patch | 34 + SOURCES/rear-bz2035939.patch | 56 + SOURCES/rear-bz2048454.patch | 78 + SOURCES/rear-bz2049091.patch | 25 + SOURCES/rear-bz2083272.patch | 171 ++ SOURCES/rear-bz2091163.patch | 46 + SOURCES/rear-bz2104005.patch | 21 + SOURCES/rear-bz2111049.patch | 37 + SOURCES/rear-bz2111059.patch | 105 + SOURCES/rear-bz2119501.patch | 39 + SOURCES/rear-bz2120736.patch | 18 + SOURCES/rear-bz2130945.patch | 20 + SOURCES/rear-bz2131946.patch | 129 ++ SOURCES/rear-device-shrinking-bz2223895.patch | 32 + SOURCES/rear-luks-key-bz2228779.patch | 25 + SOURCES/rear-pr2675.patch | 60 + ...restore-hybrid-bootloader-RHEL-16864.patch | 569 +++++ ...-save-lvm-poolmetadatasize-RHEL-6984.patch | 102 + SOURCES/rear-sfdc02772301.patch | 38 + .../rear-skip-invalid-drives-RHEL-22863.patch | 60 + ...useless-xfs-mount-options-RHEL-10478.patch | 85 + .../rear-uefi-usb-secureboot-bz2196445.patch | 82 + .../rear-usb-uefi-part-size-bz2228402.patch | 41 + .../rear-vg-command-not-found-bz2121476.patch | 21 + SOURCES/s390-no-clobber-disks.patch | 751 ++++++ SPECS/rear.spec | 496 ++++ 38 files changed, 7348 insertions(+) create mode 100644 .gitignore create mode 100644 .rear.metadata create mode 100644 SOURCES/pxe-rsync-output.patch create mode 100644 SOURCES/rear-CVE-2024-23301.patch create mode 100644 SOURCES/rear-asciidoc.patch create mode 100644 SOURCES/rear-bz1492177-warning.patch create mode 100644 SOURCES/rear-bz1747468.patch create mode 100644 SOURCES/rear-bz1832394.patch create mode 100644 SOURCES/rear-bz1930662.patch create mode 100644 SOURCES/rear-bz1945869.patch create mode 100644 SOURCES/rear-bz1958247.patch create mode 100644 SOURCES/rear-bz1983013.patch create mode 100644 SOURCES/rear-bz1993296.patch create mode 100644 SOURCES/rear-bz2035939.patch create mode 100644 SOURCES/rear-bz2048454.patch create mode 100644 SOURCES/rear-bz2049091.patch create mode 100644 SOURCES/rear-bz2083272.patch create mode 100644 SOURCES/rear-bz2091163.patch create mode 100644 SOURCES/rear-bz2104005.patch create mode 100644 SOURCES/rear-bz2111049.patch create mode 100644 SOURCES/rear-bz2111059.patch create mode 100644 SOURCES/rear-bz2119501.patch create mode 100644 SOURCES/rear-bz2120736.patch create mode 100644 SOURCES/rear-bz2130945.patch create mode 100644 SOURCES/rear-bz2131946.patch create mode 100644 SOURCES/rear-device-shrinking-bz2223895.patch create mode 100644 SOURCES/rear-luks-key-bz2228779.patch create mode 100644 SOURCES/rear-pr2675.patch create mode 100644 SOURCES/rear-restore-hybrid-bootloader-RHEL-16864.patch create mode 100644 SOURCES/rear-save-lvm-poolmetadatasize-RHEL-6984.patch create mode 100644 SOURCES/rear-sfdc02772301.patch create mode 100644 SOURCES/rear-skip-invalid-drives-RHEL-22863.patch create mode 100644 SOURCES/rear-skip-useless-xfs-mount-options-RHEL-10478.patch create mode 100644 SOURCES/rear-uefi-usb-secureboot-bz2196445.patch create mode 100644 SOURCES/rear-usb-uefi-part-size-bz2228402.patch create mode 100644 SOURCES/rear-vg-command-not-found-bz2121476.patch create mode 100644 SOURCES/s390-no-clobber-disks.patch create mode 100644 SPECS/rear.spec diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5d90595 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/rear-2.6.tar.gz diff --git a/.rear.metadata b/.rear.metadata new file mode 100644 index 0000000..a5f7fb3 --- /dev/null +++ b/.rear.metadata @@ -0,0 +1 @@ +13c23ad59254438ffcd0cde6400fd991cbfe194e SOURCES/rear-2.6.tar.gz diff --git a/SOURCES/pxe-rsync-output.patch b/SOURCES/pxe-rsync-output.patch new file mode 100644 index 0000000..c7844ff --- /dev/null +++ b/SOURCES/pxe-rsync-output.patch @@ -0,0 +1,36 @@ +diff --git a/usr/share/rear/output/PXE/default/820_copy_to_net.sh b/usr/share/rear/output/PXE/default/820_copy_to_net.sh +new file mode 100644 +index 00000000..dba1e526 +--- /dev/null ++++ b/usr/share/rear/output/PXE/default/820_copy_to_net.sh +@@ -0,0 +1,30 @@ ++ ++# 820_copy_to_net.sh ++ ++# Check if we have a target location OUTPUT_URL ++test "$OUTPUT_URL" || return 0 ++ ++local scheme=$( url_scheme $OUTPUT_URL ) ++local result_file="" ++local path="" ++ ++case "$scheme" in ++ (nfs|cifs|usb|tape|file|davfs) ++ # The ISO has already been transferred by NETFS. ++ return 0 ++ ;; ++ (fish|ftp|ftps|hftp|http|https|sftp) ++ # output/default/950_copy_result_files.sh will transfer them ++ return 0 ++ ;; ++ (rsync) ++ LogPrint "Transferring PXE files to $OUTPUT_URL" ++ for result_file in "${RESULT_FILES[@]}" ; do ++ LogPrint "Transferring file: $result_file" ++ rsync -a $v "$result_file" "$OUTPUT_URL" || Error "Problem transferring '$result_file' to $OUTPUT_URL" ++ done ++ ;; ++ (*) Error "Invalid scheme '$scheme' in '$OUTPUT_URL'." ++ ;; ++esac ++ diff --git a/SOURCES/rear-CVE-2024-23301.patch b/SOURCES/rear-CVE-2024-23301.patch new file mode 100644 index 0000000..1361f52 --- /dev/null +++ b/SOURCES/rear-CVE-2024-23301.patch @@ -0,0 +1,32 @@ +From 89b61793d80bc2cb2abe47a7d0549466fb087d16 Mon Sep 17 00:00:00 2001 +From: Johannes Meixner +Date: Fri, 12 Jan 2024 08:04:40 +0100 +Subject: [PATCH] Make initrd accessible only by root (#3123) + +In pack/GNU/Linux/900_create_initramfs.sh call +chmod 0600 "$TMP_DIR/$REAR_INITRD_FILENAME" +to let only 'root' access the ReaR initrd because +the ReaR recovery system in the initrd can contain secrets +(not by default but when certain things are explicitly +configured by the user like SSH keys without passphrase) +see https://github.com/rear/rear/issues/3122 +and https://bugzilla.opensuse.org/show_bug.cgi?id=1218728 +--- + usr/share/rear/pack/GNU/Linux/900_create_initramfs.sh | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/usr/share/rear/pack/GNU/Linux/900_create_initramfs.sh b/usr/share/rear/pack/GNU/Linux/900_create_initramfs.sh +index 1e0c11039..12be718ed 100644 +--- a/usr/share/rear/pack/GNU/Linux/900_create_initramfs.sh ++++ b/usr/share/rear/pack/GNU/Linux/900_create_initramfs.sh +@@ -125,4 +125,10 @@ case "$REAR_INITRD_COMPRESSION" in + fi + ;; + esac ++ ++# Only root should be allowed to access the initrd ++# because the ReaR recovery system can contain secrets ++# cf. https://github.com/rear/rear/issues/3122 ++test -s "$TMP_DIR/$REAR_INITRD_FILENAME" && chmod 0600 "$TMP_DIR/$REAR_INITRD_FILENAME" ++ + popd >/dev/null diff --git a/SOURCES/rear-asciidoc.patch b/SOURCES/rear-asciidoc.patch new file mode 100644 index 0000000..d224eb7 --- /dev/null +++ b/SOURCES/rear-asciidoc.patch @@ -0,0 +1,584 @@ +diff --git a/doc/user-guide/12-BLOCKCLONE.adoc b/doc/user-guide/12-BLOCKCLONE.adoc +index 061f0f49..2d4e0ed1 100644 +--- a/doc/user-guide/12-BLOCKCLONE.adoc ++++ b/doc/user-guide/12-BLOCKCLONE.adoc +@@ -40,17 +40,17 @@ First we need to set some global options in _local.conf_, + In our small example backups will be stored in _/mnt/rear_ directory + on BACKUP_URL NFS server. + +-``` ++-------------------------- + # cat local.conf + OUTPUT=ISO + BACKUP=NETFS + BACKUP_OPTIONS="nfsvers=3,nolock" + BACKUP_URL=nfs:///mnt/rear +-``` ++-------------------------- + + Now we will define variables that will apply only for targeted block device + +-``` ++-------------------------- + # cat alien.conf + BACKUP=BLOCKCLONE # Define BLOCKCLONE as backup method + BACKUP_PROG_ARCHIVE="alien" # Name of image file +@@ -66,16 +66,16 @@ BLOCKCLONE_MBR_FILE="alien_boot_strap.img" # Output filename for b + BLOCKCLONE_PARTITIONS_CONF_FILE="alien_partitions.conf" # Output filename for partition configuration + + BLOCKCLONE_ALLOW_MOUNTED="yes" # Device can be mounted during backup (default NO) +-``` ++-------------------------- + + ==== Running backup + Save partitions configuration, bootstrap code and create actual backup of /dev/sdc1 +-``` ++-------------------------- + # rear -C alien mkbackuponly +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system +-``` ++-------------------------- + # rear -C alien restoreonly + Restore alien.dd.img to device: [/dev/sdc1] # User is always prompted for restore destination + Device /dev/sdc1 was not found. # If destination does not exist ReaR will try to create it (or fail if BLOCKCLONE_SAVE_MBR_DEV was not set during backup) +@@ -102,7 +102,7 @@ Device Boot Start End Sectors Size Id Type + The partition table has been altered. + Calling ioctl() to re-read partition table. + Syncing disks. +-``` ++-------------------------- + + ==== Summary + In first example we have run backup of /dev/sdc1 partition and stored it on NFS +@@ -127,37 +127,37 @@ In next example we will do backup/restore using BLOCKCLONE and `ntfsclone` + of Linux (installed on /dev/sda) and Windows 10 (installed on /dev/sdb). + + TIP: You can locate right disk devices using `df` and `os-prober` +-``` ++-------------------------- + # df -h /boot + Filesystem Size Used Avail Use% Mounted on + /dev/sda1 10G 4.9G 5.2G 49% / # Linux is most probably installed on /dev/sda + + # os-prober + /dev/sdb1:Windows 10 (loader):Windows:chain # Windows 10 is most probably installed on /dev/sdb +-``` ++-------------------------- + + First we will configure some ReaR backup global options + (similar to link:12-BLOCKCLONE.adoc#1-backuprestore-of-arbitrary-block-device-with-blockclone-and-dd-on-nfs-server[first example] + we will do backup/restore with help of NFS server). + +-``` ++-------------------------- + # cat local.conf + OUTPUT=ISO + BACKUP=NETFS + BACKUP_OPTIONS="nfsvers=3,nolock" + BACKUP_URL=nfs:///mnt/rear + REQUIRED_PROGS+=( ntfsclone ) +-``` ++-------------------------- + + Now we will define backup parameters for Linux. + +-``` ++-------------------------- + # cat base_os.conf + this_file_name=$( basename ${BASH_SOURCE[0]} ) + LOGFILE="$LOG_DIR/rear-$HOSTNAME-$WORKFLOW-${this_file_name%.*}.log" + BACKUP_PROG_ARCHIVE="backup-${this_file_name%.*}" + BACKUP_PROG_EXCLUDE+=( '/media/*' ) +-``` ++-------------------------- + + Our Windows 10 is by default installed on two separate partitions + (partition 1 for boot data and partition 2 for disk C:), +@@ -165,7 +165,7 @@ Our Windows 10 is by default installed on two separate partitions + + Windows boot partition: + +-``` ++-------------------------- + # cat windows_boot.conf + BACKUP=BLOCKCLONE + BACKUP_PROG_ARCHIVE="windows_boot" +@@ -179,10 +179,10 @@ BLOCKCLONE_PROG_OPTS="--quiet" + BLOCKCLONE_SAVE_MBR_DEV="/dev/sdb" + BLOCKCLONE_MBR_FILE="windows_boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="windows_partitions.conf" +-``` ++-------------------------- + + Windows data partition (disk C:\): +-``` ++-------------------------- + # cat windows_data.conf + BACKUP=BLOCKCLONE + BACKUP_PROG_ARCHIVE="windows_data" +@@ -196,35 +196,35 @@ BLOCKCLONE_PROG_OPTS="--quiet" + BLOCKCLONE_SAVE_MBR_DEV="/dev/sdb" + BLOCKCLONE_MBR_FILE="windows_boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="windows_partitions.conf" +-``` ++-------------------------- + + ==== Running backup + First we will create backup of Linux. `mkbackup` command will create bootable + ISO image with ReaR rescue/recovery system that will be later used for + booting broken system and consecutive recovery. +-``` ++-------------------------- + # rear -C base_os mkbackup +-``` ++-------------------------- + + Now we create backup of Windows 10 boot partition. Command `mkbackuponly` + will ensure that only partition data and partition layout will be saved + (ReaR rescue/recovery system will not be created which is exactly what we want). +-``` ++-------------------------- + # rear -C windows_boot mkbackuponly +-``` ++-------------------------- + + Similarly, we create backup of Windows 10 data partition (disk C:\) +-``` ++-------------------------- + # rear -C windows_data mkbackuponly +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system + As a first step after ReaR rescue/recovery system booted, + we will recover Linux. This step will recover all Linux file systems, + OS data and bootloader. Windows disk will remain untouched. +-``` ++-------------------------- + # rear -C base_os recover +-``` ++-------------------------- + + In second step will recover Windows 10 boot partition. During this step ReaR + will detect that destination partition is not present and ask us for device +@@ -234,25 +234,25 @@ In second step will recover Windows 10 boot partition. During this step ReaR + partition(s) configuration (currently mounted under _/mnt/local_) will + remain untouched. Before starting Windows 10 recovery we should identify + right disk for recovery, as mentioned earlier disk size could be a good start. +-``` ++-------------------------- + # fdisk -l /dev/sdb + Disk /dev/sdb: 50 GiB, 53687091200 bytes, 104857600 sectors +-``` ++-------------------------- + + _/dev/sdb_ looks to be right destination, so we can proceed with restore. +-``` ++-------------------------- + # rear -C windows_boot restoreonly + Restore windows_boot.img to device: [/dev/sdb1] + Device /dev/sdb1 was not found. + Restore partition layout to (^c to abort): [/dev/sdb] + Checking that no-one is using this disk right now ... OK + ... +-``` ++-------------------------- + + Last step is to recover Windows 10 OS data (C:\). + Partitions on _/dev/sdb_ were already created in previous step, + hence ReaR will skip prompt for restoring partition layout. +-``` ++-------------------------- + # rear -C windows_data restoreonly + Restore windows_data.img to device: [/dev/sdb2] + Ntfsclone image version: 10.1 +@@ -263,7 +263,7 @@ Space in use : 9396 MB (27.8%) + Offset to image data : 56 (0x38) bytes + Restoring NTFS from image ... + ... +-``` ++-------------------------- + + At this stage Linux together with Windows 10 is successfully restored. + +@@ -286,7 +286,7 @@ In this example we will do backup/restore using BLOCKCLONE and `ntfsclone` + Backups will be stored on NFS server. + + First we set global ReaR options +-``` ++-------------------------- + # cat local.conf + OUTPUT=ISO + BACKUP=NETFS +@@ -300,23 +300,23 @@ BLOCKCLONE_SAVE_MBR_DEV="/dev/sda" + BLOCKCLONE_MBR_FILE="boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="partitions.conf" + +-``` ++-------------------------- + + IMPORTANT: BLOCKCLONE_STRICT_PARTITIONING is mandatory if backing up + Linux / Windows that shares one disk. Not using this option might result to + unbootable Windows 10 installation. + + Linux configuration +-``` ++-------------------------- + # cat base_os.conf + this_file_name=$( basename ${BASH_SOURCE[0]} ) + LOGFILE="$LOG_DIR/rear-$HOSTNAME-$WORKFLOW-${this_file_name%.*}.log" + BACKUP_PROG_ARCHIVE="backup-${this_file_name%.*}" + BACKUP_PROG_EXCLUDE+=( '/media/*' ) +-``` ++-------------------------- + + Windows 10 boot partition configuration +-``` ++-------------------------- + # cat windows_boot.conf + BACKUP=BLOCKCLONE + +@@ -328,10 +328,10 @@ BLOCKCLONE_PROG=ntfsclone + BLOCKCLONE_PROG_OPTS="--quiet" + + BLOCKCLONE_SOURCE_DEV="/dev/sda1" +-``` ++-------------------------- + + Windows 10 data partition configuration +-``` ++-------------------------- + # cat windows_data.conf + BACKUP=BLOCKCLONE + BACKUP_PROG_ARCHIVE="windows_data" +@@ -342,42 +342,42 @@ BLOCKCLONE_PROG=ntfsclone + BLOCKCLONE_PROG_OPTS="--quiet" + + BLOCKCLONE_SOURCE_DEV="/dev/sda2" +-``` ++-------------------------- + + ==== Running backup + + Backup of Linux +-``` ++-------------------------- + # rear -C base_os mkbackup +-``` ++-------------------------- + + Backup of Windows 10 boot partition +-``` ++-------------------------- + # rear -C windows_boot mkbackuponly +-``` ++-------------------------- + + Backup of Windows 10 data partition +-``` ++-------------------------- + # rear -C windows_data mkbackuponly +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system + Restore Linux +-``` ++-------------------------- + # rear -C base_os recover +-``` ++-------------------------- + + During this step ReaR will also create both Windows 10 partitions + + Restore Windows 10 data partition +-``` ++-------------------------- + # rear -C windows_data restoreonly +-``` ++-------------------------- + + Restore Windows 10 boot partition +-``` ++-------------------------- + # rear -C windows_boot restoreonly +-``` ++-------------------------- + + === 4. Backup/restore of Linux / Windows 10 dual boot setup sharing same disk with USB as destination + +@@ -389,7 +389,7 @@ In this example we will do backup/restore using BLOCKCLONE and `ntfsclone` + Backups will be stored on USB disk drive (_/dev/sdb_ in this example). + + Global options +-``` ++-------------------------- + # cat local.conf + OUTPUT=USB + BACKUP=NETFS +@@ -407,10 +407,10 @@ BLOCKCLONE_SAVE_MBR_DEV="/dev/sda" + + BLOCKCLONE_MBR_FILE="boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="partitions.conf" +-``` ++-------------------------- + + Options used during Linux backup/restore. +-``` ++-------------------------- + # cat local.conf + OUTPUT=USB + BACKUP=NETFS +@@ -428,14 +428,14 @@ BLOCKCLONE_SAVE_MBR_DEV="/dev/sda" + + BLOCKCLONE_MBR_FILE="boot_strap.img" + BLOCKCLONE_PARTITIONS_CONF_FILE="partitions.conf" +-``` ++-------------------------- + + IMPORTANT: USB_SUFFIX option is mandatory as it avoids ReaR to hold every + backup in separate directory, this behavior is essential for BLOCKCLONE + backup method to work correctly. + + Windows boot partition options +-``` ++-------------------------- + # cat windows_boot.conf + BACKUP=BLOCKCLONE + +@@ -447,10 +447,10 @@ BLOCKCLONE_PROG=ntfsclone + BLOCKCLONE_PROG_OPTS="--quiet" + + BLOCKCLONE_SOURCE_DEV="/dev/sda1" +-``` ++-------------------------- + + Windows data partition options +-``` ++-------------------------- + # cat windows_data.conf + BACKUP=BLOCKCLONE + BACKUP_PROG_ARCHIVE="windows_data" +@@ -461,11 +461,11 @@ BLOCKCLONE_PROG=ntfsclone + BLOCKCLONE_PROG_OPTS="--quiet" + + BLOCKCLONE_SOURCE_DEV="/dev/sda2" +-``` ++-------------------------- + + ==== Running backup + First we need to format target USB device, with `rear format` command +-``` ++-------------------------- + # rear -v format /dev/sdb + Relax-and-Recover 2.00 / Git + Using log file: /var/log/rear/rear-centosd.log +@@ -477,15 +477,15 @@ Creating ReaR data partition up to 100% of '/dev/sdb' + Setting 'boot' flag on /dev/sdb + Creating ext3 filesystem with label 'REAR-000' on '/dev/sdb1' + Adjusting filesystem parameters on '/dev/sdb1' +-``` ++-------------------------- + + Backup of Linux +-``` ++-------------------------- + # rear -C base_os mkbackup +-``` ++-------------------------- + + Backup of Windows 10 boot partition +-``` ++-------------------------- + # rear -C windows_boot mkbackuponly + NTFS volume version: 3.1 + Cluster size : 4096 bytes +@@ -496,10 +496,10 @@ Accounting clusters ... + Space in use : 338 MB (64.4%) + Saving NTFS to image ... + Syncing ... +-``` ++-------------------------- + + Backup of Windows 10 data partition +-``` ++-------------------------- + # rear -C windows_data mkbackuponly + NTFS volume version: 3.1 + Cluster size : 4096 bytes +@@ -510,7 +510,7 @@ Accounting clusters ... + Space in use : 9833 MB (54.3%) + Saving NTFS to image ... + Syncing ... +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system + For sake of this demonstration I've purposely used ReaR's rescue/recovery media +@@ -519,7 +519,7 @@ For sake of this demonstration I've purposely used ReaR's rescue/recovery media + demonstrate possibility of ReaR to recover backup to arbitrary disk. + + As first step Linux will be restored, this will create all the partitions + needed, even those used by Windows 10. +-``` ++-------------------------- + RESCUE centosd:~ # rear -C base_os recover + Relax-and-Recover 2.00 / Git + Using log file: /var/log/rear/rear-centosd.log +@@ -541,13 +541,13 @@ Original disk /dev/sda does not exist in the target system. Please choose an app + 2) /dev/sdb + 3) Do not map disk. + #? +-``` ++-------------------------- + + Now ReaR recover command stops as it detected that disk layout is not identical. + As our desired restore target is _/dev/sdb_ we choose right disk and continue + recovery. ReaR will ask to check created restore scripts, but this is not + needed in our scenario. +-``` ++-------------------------- + #? 2 + 2017-01-25 20:54:01 Disk /dev/sdb chosen as replacement for /dev/sda. + Disk /dev/sdb chosen as replacement for /dev/sda. +@@ -607,11 +607,11 @@ Skip installing GRUB Legacy boot loader because GRUB 2 is installed (grub-probe + Installing GRUB2 boot loader + Finished recovering your system. You can explore it under '/mnt/local'. + Saving /var/log/rear/rear-centosd.log as /var/log/rear/rear-centosd-recover-base_os.log +-``` ++-------------------------- + + Now we have Linux part restored, GRUB installed and partitions created, hence + we can continue with Windows 10 boot partition recovery. +-``` ++-------------------------- + RESCUE centosd:~ # rear -C windows_boot restoreonly + Restore windows_boot.nc.img to device: [/dev/sda1] /dev/sdb1 + Ntfsclone image version: 10.1 +@@ -622,12 +622,12 @@ Space in use : 338 MB (64.4%) + Offset to image data : 56 (0x38) bytes + Restoring NTFS from image ... + Syncing ... +-``` ++-------------------------- + + Similarly to Linux restore, we were prompted for restore destination, which + is /dev/sdb1 in our case. + + As the last step we will recover Windows 10 data partition +-``` ++-------------------------- + RESCUE centosd:~ # rear -C windows_data restoreonly + Restore windows_data.nc.img to device: [/dev/sda2] /dev/sdb2 + Ntfsclone image version: 10.1 +@@ -638,7 +638,7 @@ Space in use : 9867 MB (54.5%) + Offset to image data : 56 (0x38) bytes + Restoring NTFS from image ... + Syncing ... +-``` ++-------------------------- + + Again after restoreonly command is launched, ReaR prompts for restore + destination. + +@@ -662,25 +662,25 @@ The _BLOCKCLONE_TRY_UNMOUNT_ is important here: it will attempt to unmount the + run the risk that the data may be inconsistent. + + Global options +-``` ++-------------------------- + # cat site.conf + OUTPUT=ISO + KEEP_OLD_OUTPUT_COPY=1 + BACKUP_URL="nfs:///Stations_bkup/rear/" +-``` ++-------------------------- + + Options used for the base OS backup: +-``` ++-------------------------- + # cat base_system.conf + this_file_name=$( basename ${BASH_SOURCE[0]} ) + LOGFILE="$LOG_DIR/rear-$HOSTNAME-$WORKFLOW-${this_file_name%.*}.log" + BACKUP_PROG_EXCLUDE+=( '/products/*' ) + BACKUP_PROG_ARCHIVE="backup-${this_file_name%.*}" + BACKUP=NETFS +-``` ++-------------------------- + + Options used to take the encrypted filesystem image: +-``` ++-------------------------- + this_file_name=$( basename ${BASH_SOURCE[0]} ) + LOGFILE="$LOG_DIR/rear-$HOSTNAME-$WORKFLOW-${this_file_name%.*}.log" + BACKUP=BLOCKCLONE +@@ -694,18 +694,18 @@ BLOCKCLONE_SOURCE_DEV="/dev/vg00/lvol4" + + BLOCKCLONE_ALLOW_MOUNTED="yes" + BLOCKCLONE_TRY_UNMOUNT="yes" +-``` ++-------------------------- + + ==== Running backup + Base OS backup: +-``` ++-------------------------- + # rear -C base_system mkbackup +-``` ++-------------------------- + + Create image of encrypted filesystem: +-``` ++-------------------------- + # rear -C products_backup mkbackuponly +-``` ++-------------------------- + + ==== Running restore from ReaR restore/recovery system + First recover the base OS. This will create all the partitions needed, including +@@ -713,7 +713,7 @@ First recover the base OS. This will create all the partitions needed, including + As illustrated below, you will be prompted to chose a new encryption passphrase. + Please provide one, but you need not care about its value as it will get overwritten + during the next phase: +-``` ++-------------------------- + RESCUE pc-pan:~ # rear -C base_system.conf recover + [...] + Please enter the password for LUKS device cr_vg00-lvol4 (/dev/mapper/vg00-lvol4): +@@ -724,7 +724,7 @@ Creating filesystem of type xfs with mount point /products on /dev/mapper/cr_vg0 + Mounting filesystem /products + Disk layout created. + [...] +-``` ++-------------------------- + + Now we can proceed and restore the encrypted filesystem image. The target filesystem + will have been mounted by ReaR during the previous phase, but this will be +@@ -732,12 +732,12 @@ Now we can proceed and restore the encrypted filesystem image. The target filesy + to "yes". + + As illustrated below, you will be prompted for the target block device to use. + Confirm by pressing Enter or type in another value: +-``` ++-------------------------- + RESCUE pc-pan:~ # rear -C products_backup.conf restoreonly + [...] + Restore backup-products_backup.dd.img to device: [/dev/vg00/lvol4] + [...] +-``` ++-------------------------- + + Please note that the target device will not be re-mounted by the script at the end + of the restore phase. If needed, this should be done manually. + +diff --git a/doc/user-guide/16-Rubrik-CDM.adoc b/doc/user-guide/16-Rubrik-CDM.adoc +index 41f37d20..3ac23b7b 100644 +--- a/doc/user-guide/16-Rubrik-CDM.adoc ++++ b/doc/user-guide/16-Rubrik-CDM.adoc +@@ -84,7 +84,7 @@ To make CentOS v8.0 work the following line was needed: + == Test Matrix + + .Test Matrix +-[%header,format=csv] ++[options="header",format="csv"] + |=== + Operating System,DHCP,Static IP,Virtual,Physical,LVM Root Disk,Plain Root Disk,EXT3,EXT4,XFS,BTRFS,Original Cluster,Replication Cluster + CentOS 7.3,,pass,Pass,,Pass,,,,Pass,,Pass, diff --git a/SOURCES/rear-bz1492177-warning.patch b/SOURCES/rear-bz1492177-warning.patch new file mode 100644 index 0000000..1f5556f --- /dev/null +++ b/SOURCES/rear-bz1492177-warning.patch @@ -0,0 +1,15 @@ +diff --git a/usr/share/rear/output/ISO/Linux-i386/249_check_rhel_grub2_efi_package.sh b/usr/share/rear/output/ISO/Linux-i386/249_check_rhel_grub2_efi_package.sh +new file mode 100644 +index 00000000..4c4ded08 +--- /dev/null ++++ b/usr/share/rear/output/ISO/Linux-i386/249_check_rhel_grub2_efi_package.sh +@@ -0,0 +1,9 @@ ++# 249_check_rhel_grub2_efi_package.sh ++ ++is_true $USING_UEFI_BOOTLOADER || return # empty or 0 means NO UEFI ++ ++( ++ VERBOSE=1 ++ test -r /usr/lib/grub/x86_64-efi/moddep.lst ++ PrintIfError "WARNING: /usr/lib/grub/x86_64-efi/moddep.lst not found, grub2-mkimage will likely fail. Please install the grub2-efi-x64-modules package to fix this." ++) diff --git a/SOURCES/rear-bz1747468.patch b/SOURCES/rear-bz1747468.patch new file mode 100644 index 0000000..a2f7bb3 --- /dev/null +++ b/SOURCES/rear-bz1747468.patch @@ -0,0 +1,112 @@ +diff --git a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh +index 7cfdfcf2..1be17ba8 100644 +--- a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh ++++ b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh +@@ -68,9 +68,9 @@ create_lvmgrp() { + local vg=${vgrp#/dev/} + + cat >> "$LAYOUT_CODE" <> "$LAYOUT_CODE" <&2 ; then + + LogPrint "Sleeping 3 seconds to let udev or systemd-udevd create their devices..." + sleep 3 >&2 +- create_volume_group=0 +- create_logical_volumes=0 ++ create_volume_group=( \$( RmInArray "$vg" "\${create_volume_group[@]}" ) ) ++ create_logical_volumes=( \$( RmInArray "$vg" "\${create_logical_volumes[@]}" ) ) + ++EOF ++ if is_true "${FORCE_VGCFGRESTORE-no}"; then ++ cat >> "$LAYOUT_CODE" <&2 ; then + sleep 3 >&2 + + # All logical volumes have been created, except Thin volumes and pools +- create_volume_group=0 +- create_thin_volumes_only=1 ++ create_volume_group=( \$( RmInArray "$vg" "\${create_volume_group[@]}" ) ) ++ create_thin_volumes_only+=( "$vg" ) + ++EOF ++ fi ++ cat >> "$LAYOUT_CODE" <> "$LAYOUT_CODE" < in the first argument) ++# doesn't contain any LVs that use kernel metadata. ++# If the function returns true, we can safely use vgcfgrestore to restore the VG. ++function lvmgrp_supports_vgcfgrestore() { ++ if is_true "${FORCE_VGCFGRESTORE-no}"; then ++ # If we are willing to use vgcfgrestore --force and then remove broken volumes, ++ # then everything can be considered supported. Don't do it by default though. ++ return 0 ++ fi ++ ++ local lvmvol vgrp lvname size layout kval ++ ++ local supported_layouts=("linear" "striped") ++ ++ while read lvmvol vgrp lvname size layout kval; do ++ [ "$vgrp" == "$1" ] || BugError "vgrp '$vgrp' != '$1'" ++ if ! IsInArray $layout "${supported_layouts[@]}"; then ++ LogPrint "Layout '$layout' of LV '$lvname' in VG '$vgrp' not supported by vgcfgrestore" ++ return 1 ++ fi ++ done < <(grep "^lvmvol $1 " "$LAYOUT_FILE") ++} ++ + # vim: set et ts=4 sw=4: diff --git a/SOURCES/rear-bz1832394.patch b/SOURCES/rear-bz1832394.patch new file mode 100644 index 0000000..3405422 --- /dev/null +++ b/SOURCES/rear-bz1832394.patch @@ -0,0 +1,351 @@ +diff --git a/doc/user-guide/06-layout-configuration.adoc b/doc/user-guide/06-layout-configuration.adoc +index f59384db..88ba0420 100644 +--- a/doc/user-guide/06-layout-configuration.adoc ++++ b/doc/user-guide/06-layout-configuration.adoc +@@ -630,7 +630,7 @@ lvmvol [key:value ...] + + === LUKS Devices === + ---------------------------------- +-crypt /dev/mapper/ [cipher=] [key_size=] [hash=] [uuid=] [keyfile=] [password=] ++crypt /dev/mapper/ [type=] [cipher=] [key_size=] [hash=] [uuid=] [keyfile=] [password=] + ---------------------------------- + + === DRBD === +diff --git a/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh +index 05279bc8..0c662f67 100644 +--- a/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh ++++ b/usr/share/rear/layout/prepare/GNU/Linux/160_include_luks_code.sh +@@ -1,35 +1,75 @@ ++ + # Code to recreate and/or open LUKS volumes. + + create_crypt() { ++ # See the create_device() function in lib/layout-functions.sh what "device type" means: ++ local device_type="$1" ++ if ! grep -q "^crypt $device_type " "$LAYOUT_FILE" ; then ++ LogPrintError "Skip recreating LUKS volume $device_type (no 'crypt $device_type' entry in $LAYOUT_FILE)" ++ # FIXME: The return code is ignored in the create_device() function in lib/layout-functions.sh: ++ return 1 ++ fi ++ + local crypt target_device source_device options +- read crypt target_device source_device options < <(grep "^crypt $1 " "$LAYOUT_FILE") ++ local mapping_name option key value ++ local cryptsetup_options="" keyfile="" password="" + +- local target_name=${target_device#/dev/mapper/} ++ read crypt target_device source_device options < <( grep "^crypt $device_type " "$LAYOUT_FILE" ) ++ ++ # Careful! One cannot 'test -b $source_device' here at the time when this code is run ++ # because the source device is usually a disk partition block device like /dev/sda2 ++ # but disk partition block devices usually do not yet exist (in particular not on a new clean disk) ++ # because partitions are actually created later when the diskrestore.sh script is run ++ # but not here when this code is run which only generates the diskrestore.sh script: ++ if ! test $source_device ; then ++ LogPrintError "Skip recreating LUKS volume $device_type: No source device (see the 'crypt $device_type' entry in $LAYOUT_FILE)" ++ # FIXME: The return code is ignored in the create_device() function in lib/layout-functions.sh: ++ return 1 ++ fi ++ ++ mapping_name=${target_device#/dev/mapper/} ++ if ! test $mapping_name ; then ++ LogPrintError "Skip recreating LUKS volume $device_type on $source_device: No /dev/mapper/... mapping name (see the 'crypt $device_type' entry in $LAYOUT_FILE)" ++ # FIXME: The return code is ignored in the create_device() function in lib/layout-functions.sh: ++ return 1 ++ fi + +- local cryptsetup_options="" keyfile="" password="" +- local option key value + for option in $options ; do +- key=${option%=*} ++ # $option is of the form keyword=value and ++ # we assume keyword has no '=' character but value could be anything that may have a '=' character ++ # so we split keyword=value at the leftmost '=' character so that ++ # e.g. keyword=foo=bar gets split into key="keyword" and value="foo=bar": ++ key=${option%%=*} + value=${option#*=} +- ++ # The "cryptseup luksFormat" command does not require any of the type, cipher, key-size, hash, uuid option values ++ # because if omitted a cryptseup default value is used so we treat those values as optional. ++ # Using plain test to ensure the value is a single non empty and non blank word ++ # without quoting because test " " would return zero exit code ++ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style + case "$key" in +- cipher) +- cryptsetup_options+=" --cipher $value" ++ (type) ++ test $value && cryptsetup_options+=" --type $value" ++ ;; ++ (cipher) ++ test $value && cryptsetup_options+=" --cipher $value" ++ ;; ++ (key_size) ++ test $value && cryptsetup_options+=" --key-size $value" + ;; +- key_size) +- cryptsetup_options+=" --key-size $value" ++ (hash) ++ test $value && cryptsetup_options+=" --hash $value" + ;; +- hash) +- cryptsetup_options+=" --hash $value" ++ (uuid) ++ test $value && cryptsetup_options+=" --uuid $value" + ;; +- uuid) +- cryptsetup_options+=" --uuid $value" ++ (keyfile) ++ test $value && keyfile=$value + ;; +- keyfile) +- keyfile=$value ++ (password) ++ test $value && password=$value + ;; +- password) +- password=$value ++ (*) ++ LogPrintError "Skipping unsupported LUKS cryptsetup option '$key' in 'crypt $target_device $source_device' entry in $LAYOUT_FILE" + ;; + esac + done +@@ -37,26 +77,25 @@ create_crypt() { + cryptsetup_options+=" $LUKS_CRYPTSETUP_OPTIONS" + + ( +- echo "Log \"Creating LUKS device $target_name on $source_device\"" ++ echo "LogPrint \"Creating LUKS volume $mapping_name on $source_device\"" + if [ -n "$keyfile" ] ; then + # Assign a temporary keyfile at this stage so that original keyfiles do not leak onto the rescue medium. + # The original keyfile will be restored from the backup and then re-assigned to the LUKS device in the + # 'finalize' stage. + # The scheme for generating a temporary keyfile path must be the same here and in the 'finalize' stage. +- keyfile="${TMPDIR:-/tmp}/LUKS-keyfile-$target_name" ++ keyfile="$TMP_DIR/LUKS-keyfile-$mapping_name" + dd bs=512 count=4 if=/dev/urandom of="$keyfile" + chmod u=rw,go=- "$keyfile" +- + echo "cryptsetup luksFormat --batch-mode $cryptsetup_options $source_device $keyfile" +- echo "cryptsetup luksOpen --key-file $keyfile $source_device $target_name" ++ echo "cryptsetup luksOpen --key-file $keyfile $source_device $mapping_name" + elif [ -n "$password" ] ; then + echo "echo \"$password\" | cryptsetup luksFormat --batch-mode $cryptsetup_options $source_device" +- echo "echo \"$password\" | cryptsetup luksOpen $source_device $target_name" ++ echo "echo \"$password\" | cryptsetup luksOpen $source_device $mapping_name" + else +- echo "LogPrint \"Please enter the password for LUKS device $target_name ($source_device):\"" ++ echo "LogUserOutput \"Set the password for LUKS volume $mapping_name (for 'cryptsetup luksFormat' on $source_device):\"" + echo "cryptsetup luksFormat --batch-mode $cryptsetup_options $source_device" +- echo "LogPrint \"Please re-enter the password for LUKS device $target_name ($source_device):\"" +- echo "cryptsetup luksOpen $source_device $target_name" ++ echo "LogUserOutput \"Enter the password for LUKS volume $mapping_name (for 'cryptsetup luksOpen' on $source_device):\"" ++ echo "cryptsetup luksOpen $source_device $mapping_name" + fi + echo "" + ) >> "$LAYOUT_CODE" +@@ -64,38 +103,61 @@ create_crypt() { + + # Function open_crypt() is meant to be used by the 'mountonly' workflow + open_crypt() { ++ # See the do_mount_device() function in lib/layout-functions.sh what "device type" means: ++ local device_type="$1" ++ if ! grep -q "^crypt $device_type " "$LAYOUT_FILE" ; then ++ LogPrintError "Skip opening LUKS volume $device_type (no 'crypt $device_type' entry in $LAYOUT_FILE)" ++ # FIXME: The return code is ignored in the do_mount_device() function in lib/layout-functions.sh: ++ return 1 ++ fi ++ + local crypt target_device source_device options +- read crypt target_device source_device options < <(grep "^crypt $1 " "$LAYOUT_FILE") ++ local mapping_name option key value ++ local cryptsetup_options="" keyfile="" password="" + +- local target_name=${target_device#/dev/mapper/} ++ read crypt target_device source_device options < <( grep "^crypt $device_type " "$LAYOUT_FILE" ) ++ ++ if ! test -b "$source_device" ; then ++ LogPrintError "Skip opening LUKS volume $device_type on device '$source_device' that is no block device (see the 'crypt $device_type' entry in $LAYOUT_FILE)" ++ # FIXME: The return code is ignored in the do_mount_device() function in lib/layout-functions.sh: ++ return 1 ++ fi ++ ++ mapping_name=${target_device#/dev/mapper/} ++ if ! test $mapping_name ; then ++ LogPrintError "Skip opening LUKS volume $device_type on $source_device: No /dev/mapper/... mapping name (see the 'crypt $device_type' entry in $LAYOUT_FILE)" ++ # FIXME: The return code is ignored in the do_mount_device() function in lib/layout-functions.sh: ++ return 1 ++ fi + +- local cryptsetup_options="" keyfile="" password="" +- local option key value + for option in $options ; do +- key=${option%=*} ++ # $option is of the form keyword=value and ++ # we assume keyword has no '=' character but value could be anything that may have a '=' character ++ # so we split keyword=value at the leftmost '=' character so that ++ # e.g. keyword=foo=bar gets split into key="keyword" and value="foo=bar": ++ key=${option%%=*} + value=${option#*=} +- + case "$key" in +- keyfile) +- keyfile=$value ++ (keyfile) ++ test $value && keyfile=$value + ;; +- password) +- password=$value ++ (password) ++ test $value && password=$value + ;; + esac + done + + ( +- echo "Log \"Opening LUKS device $target_name on $source_device\"" ++ echo "LogPrint \"Opening LUKS volume $mapping_name on $source_device\"" + if [ -n "$keyfile" ] ; then + # During a 'mountonly' workflow, the original keyfile is supposed to be + # available at this point. +- echo "cryptsetup luksOpen --key-file $keyfile $source_device $target_name" ++ echo "cryptsetup luksOpen --key-file $keyfile $source_device $mapping_name" + elif [ -n "$password" ] ; then +- echo "echo \"$password\" | cryptsetup luksOpen $source_device $target_name" ++ echo "echo \"$password\" | cryptsetup luksOpen $source_device $mapping_name" + else +- echo "LogPrint \"Please enter the password for LUKS device $target_name ($source_device):\"" +- echo "cryptsetup luksOpen $source_device $target_name" ++ echo "LogUserOutput \"Enter the password for LUKS volume $mapping_name (for 'cryptsetup luksOpen' on $source_device):\"" ++ echo "cryptsetup luksOpen $source_device $mapping_name" + fi + echo "" + ) >> "$LAYOUT_CODE" +diff --git a/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh b/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh +index c1e1cfd5..afeabf6a 100644 +--- a/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh ++++ b/usr/share/rear/layout/save/GNU/Linux/260_crypt_layout.sh +@@ -9,6 +9,8 @@ Log "Saving Encrypted volumes." + REQUIRED_PROGS+=( cryptsetup dmsetup ) + COPY_AS_IS+=( /usr/share/cracklib/\* /etc/security/pwquality.conf ) + ++local invalid_cryptsetup_option_value="no" ++ + while read target_name junk ; do + # find the target device we're mapping + if ! [ -e /dev/mapper/$target_name ] ; then +@@ -30,29 +32,96 @@ while read target_name junk ; do + source_device="$(get_device_name ${slave##*/})" + done + +- if ! cryptsetup isLuks $source_device >/dev/null 2>&1; then ++ if ! blkid -p -o export $source_device >$TMP_DIR/blkid.output ; then ++ LogPrintError "Error: Cannot get attributes for $target_name ('blkid -p -o export $source_device' failed)" + continue + fi + +- # gather crypt information +- cipher=$(cryptsetup luksDump $source_device | grep "Cipher name" | sed -r 's/^.+:\s*(.+)$/\1/') +- mode=$(cryptsetup luksDump $source_device | grep "Cipher mode" | cut -d: -f2- | awk '{printf("%s",$1)};') +- key_size=$(cryptsetup luksDump $source_device | grep "MK bits" | sed -r 's/^.+:\s*(.+)$/\1/') +- hash=$(cryptsetup luksDump $source_device | grep "Hash spec" | sed -r 's/^.+:\s*(.+)$/\1/') +- uuid=$(cryptsetup luksDump $source_device | grep "UUID" | sed -r 's/^.+:\s*(.+)$/\1/') +- keyfile_option=$([ -f /etc/crypttab ] && awk '$1 == "'"$target_name"'" && $3 != "none" && $3 != "-" && $3 != "" { print "keyfile=" $3; }' /etc/crypttab) ++ if ! grep -q "TYPE=crypto_LUKS" $TMP_DIR/blkid.output ; then ++ Log "Skipping $target_name (no 'TYPE=crypto_LUKS' in 'blkid -p -o export $source_device' output)" ++ continue ++ fi + +- # LUKS version 2 is not yet suppported, see https://github.com/rear/rear/issues/2204 +- # When LUKS version 2 is used the above code fails at least to determine the hash value +- # so we use an empty hash value as a simple test if gathering crypt information was successful: +- test "$hash" || Error "No hash value for LUKS device '$target_name' at '$source_device' (only LUKS version 1 is supported)" ++ # Detect LUKS version: ++ # Remove all non-digits in particular to avoid leading or trailing spaces in the version string ++ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style ++ # that could happen if the blkid output contains "VERSION = 2" so that 'cut -d= -f2' results " 2". ++ version=$( grep "VERSION" $TMP_DIR/blkid.output | cut -d= -f2 | tr -c -d '[:digit:]' ) ++ if ! test "$version" = "1" -o "$version" = "2" ; then ++ LogPrintError "Error: Unsupported LUKS version for $target_name ('blkid -p -o export $source_device' shows 'VERSION=$version')" ++ continue ++ fi ++ luks_type=luks$version + +- echo "crypt /dev/mapper/$target_name $source_device cipher=$cipher-$mode key_size=$key_size hash=$hash uuid=$uuid $keyfile_option" >> $DISKLAYOUT_FILE +-done < <( dmsetup ls --target crypt ) ++ # Gather crypt information: ++ if ! cryptsetup luksDump $source_device >$TMP_DIR/cryptsetup.luksDump ; then ++ LogPrintError "Error: Cannot get LUKS$version values for $target_name ('cryptsetup luksDump $source_device' failed)" ++ continue ++ fi ++ uuid=$( grep "UUID" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) ++ keyfile_option=$( [ -f /etc/crypttab ] && awk '$1 == "'"$target_name"'" && $3 != "none" && $3 != "-" && $3 != "" { print "keyfile=" $3; }' /etc/crypttab ) ++ if test $luks_type = "luks1" ; then ++ cipher_name=$( grep "Cipher name" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) ++ cipher_mode=$( grep "Cipher mode" $TMP_DIR/cryptsetup.luksDump | cut -d: -f2- | awk '{printf("%s",$1)};' ) ++ cipher=$cipher_name-$cipher_mode ++ key_size=$( grep "MK bits" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) ++ hash=$( grep "Hash spec" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) ++ elif test $luks_type = "luks2" ; then ++ cipher=$( grep "cipher:" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) ++ # More than one keyslot may be defined - use key_size from the first slot. ++ # Depending on the version the "cryptsetup luksDump" command outputs the key_size value ++ # as a line like ++ # Key: 512 bits ++ # and/or as a line like ++ # Cipher key: 512 bits ++ # cf. https://github.com/rear/rear/pull/2504#issuecomment-718729198 and subsequent comments ++ # so we grep for both lines but use only the first match from the first slot: ++ key_size=$( egrep -m 1 "Key:|Cipher key:" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+) bits$/\1/' ) ++ hash=$( grep "Hash" $TMP_DIR/cryptsetup.luksDump | sed -r 's/^.+:\s*(.+)$/\1/' ) ++ fi + +-# cryptsetup is required in the recovery system if disklayout.conf contains at least one 'crypt' entry +-# see the create_crypt function in layout/prepare/GNU/Linux/160_include_luks_code.sh +-# what program calls are written to diskrestore.sh +-# cf. https://github.com/rear/rear/issues/1963 +-grep -q '^crypt ' $DISKLAYOUT_FILE && REQUIRED_PROGS+=( cryptsetup ) || true ++ # Basic checks that the cipher key_size hash uuid values exist ++ # cf. https://github.com/rear/rear/pull/2504#issuecomment-718729198 ++ # because some values are needed during "rear recover" ++ # to set cryptsetup options in layout/prepare/GNU/Linux/160_include_luks_code.sh ++ # and it seems cryptsetup fails when options with empty values are specified ++ # cf. https://github.com/rear/rear/pull/2504#issuecomment-719479724 ++ # For example a LUKS1 crypt entry in disklayout.conf looks like ++ # crypt /dev/mapper/luks1test /dev/sda7 type=luks1 cipher=aes-xts-plain64 key_size=256 hash=sha256 uuid=1b4198c9-d9b0-4c57-b9a3-3433e391e706 ++ # and a LUKS1 crypt entry in disklayout.conf looks like ++ # crypt /dev/mapper/luks2test /dev/sda8 type=luks2 cipher=aes-xts-plain64 key_size=256 hash=sha256 uuid=3e874a28-7415-4f8c-9757-b3f28a96c4d2 ++ # Only the keyfile_option value is optional and the luks_type value is already tested above. ++ # Using plain test to ensure a value is a single non empty and non blank word ++ # without quoting because test " " would return zero exit code ++ # cf. "Beware of the emptiness" in https://github.com/rear/rear/wiki/Coding-Style ++ # Do not error out instantly here but only report errors here so the user can see all messages ++ # and actually error out at the end of this script if there was one actually invalid value: ++ if ! test $cipher ; then ++ LogPrint "No 'cipher' value for LUKS$version volume $target_name in $source_device" ++ fi ++ if test $key_size ; then ++ if ! is_positive_integer $key_size ; then ++ LogPrintError "Error: 'key_size=$key_size' is no positive integer for LUKS$version volume $target_name in $source_device" ++ invalid_cryptsetup_option_value="yes" ++ fi ++ else ++ LogPrint "No 'key_size' value for LUKS$version volume $target_name in $source_device" ++ fi ++ if ! test $hash ; then ++ LogPrint "No 'hash' value for LUKS$version volume $target_name in $source_device" ++ fi ++ if ! test $uuid ; then ++ # Report a missig uuid value as an error to have the user informed ++ # but do not error out here because things can be fixed manually during "rear recover" ++ # cf. https://github.com/rear/rear/pull/2506#issuecomment-721757810 ++ # and https://github.com/rear/rear/pull/2506#issuecomment-722315498 ++ # and https://github.com/rear/rear/issues/2509 ++ LogPrintError "Error: No 'uuid' value for LUKS$version volume $target_name in $source_device (mounting it or booting the recreated system may fail)" ++ fi ++ ++ echo "crypt /dev/mapper/$target_name $source_device type=$luks_type cipher=$cipher key_size=$key_size hash=$hash uuid=$uuid $keyfile_option" >> $DISKLAYOUT_FILE ++ ++done < <( dmsetup ls --target crypt ) + ++# Let this script return successfully when invalid_cryptsetup_option_value is not true: ++is_true $invalid_cryptsetup_option_value && Error "Invalid or empty LUKS cryptsetup option value(s) in $DISKLAYOUT_FILE" || true diff --git a/SOURCES/rear-bz1930662.patch b/SOURCES/rear-bz1930662.patch new file mode 100644 index 0000000..aaeae6f --- /dev/null +++ b/SOURCES/rear-bz1930662.patch @@ -0,0 +1,693 @@ +diff --git a/usr/share/rear/backup/NETFS/default/500_make_backup.sh b/usr/share/rear/backup/NETFS/default/500_make_backup.sh +index 02c204c5..60c80b5f 100644 +--- a/usr/share/rear/backup/NETFS/default/500_make_backup.sh ++++ b/usr/share/rear/backup/NETFS/default/500_make_backup.sh +@@ -16,6 +16,8 @@ function set_tar_features () { + FEATURE_TAR_IS_SET=1 + } + ++local backup_prog_rc ++ + local scheme=$( url_scheme $BACKUP_URL ) + local path=$( url_path $BACKUP_URL ) + local opath=$( backup_path $scheme $path ) +diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh +index c560ec94..1692ba4c 100644 +--- a/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh ++++ b/usr/share/rear/backup/RSYNC/GNU/Linux/610_start_selinux.sh +@@ -1,5 +1,7 @@ + # Start SELinux if it was stopped - check presence of $TMP_DIR/selinux.mode + ++local backup_prog_rc ++ + [ -f $TMP_DIR/selinux.mode ] && { + touch "${TMP_DIR}/selinux.autorelabel" + cat $TMP_DIR/selinux.mode > $SELINUX_ENFORCE +@@ -13,19 +15,19 @@ + ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null + $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \ + "$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null +- _rc=$? +- if [ $_rc -ne 0 ]; then +- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]" ++ backup_prog_rc=$? ++ if [ $backup_prog_rc -ne 0 ]; then ++ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" + #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" + fi + ;; + + (rsync) +- $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" ${BACKUP_RSYNC_OPTIONS[@]} \ ++ $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \ + "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" +- _rc=$? +- if [ $_rc -ne 0 ]; then +- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]" ++ backup_prog_rc=$? ++ if [ $backup_prog_rc -ne 0 ]; then ++ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" + #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" + fi + ;; +diff --git a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh +index cae12e38..9a17d6bb 100644 +--- a/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh ++++ b/usr/share/rear/backup/RSYNC/GNU/Linux/620_force_autorelabel.sh +@@ -1,3 +1,5 @@ ++local backup_prog_rc ++ + [ -f $TMP_DIR/force.autorelabel ] && { + + > "${TMP_DIR}/selinux.autorelabel" +@@ -11,19 +13,19 @@ + ssh $RSYNC_USER@$RSYNC_HOST "chmod $v 755 ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" 2>/dev/null + $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" \ + "$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" 2>/dev/null +- _rc=$? +- if [ $_rc -ne 0 ]; then +- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]" ++ backup_prog_rc=$? ++ if [ $backup_prog_rc -ne 0 ]; then ++ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" + #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" + fi + ;; + + (rsync) +- $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" ${BACKUP_RSYNC_OPTIONS[@]} \ ++ $BACKUP_PROG -a "${TMP_DIR}/selinux.autorelabel" "${BACKUP_RSYNC_OPTIONS[@]}" \ + "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup/.autorelabel" +- _rc=$? +- if [ $_rc -ne 0 ]; then +- LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$_rc]}]" ++ backup_prog_rc=$? ++ if [ $backup_prog_rc -ne 0 ]; then ++ LogPrint "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup [${rsync_err_msg[$backup_prog_rc]}]" + #StopIfError "Failed to create .autorelabel on ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" + fi + ;; +@@ -35,8 +37,7 @@ + # probably using the BACKUP=NETFS workflow instead + if [ -d "${opath}" ]; then + if [ ! -f "${opath}/selinux.autorelabel" ]; then +- > "${opath}/selinux.autorelabel" +- StopIfError "Failed to create selinux.autorelabel on ${opath}" ++ > "${opath}/selinux.autorelabel" || Error "Failed to create selinux.autorelabel on ${opath}" + fi + fi + ;; +diff --git a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh b/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh +index 60330007..cedee9ce 100644 +--- a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh ++++ b/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh +@@ -4,7 +4,7 @@ + # check for the --relative option in BACKUP_RSYNC_OPTIONS array + # for the default values see the standard definition in conf/default.conf file + +-if ! grep -q relative <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then ++if ! grep -q relative <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then + BACKUP_RSYNC_OPTIONS+=( --relative ) + Log "Added option '--relative' to the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow" + fi +diff --git a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh +index 0d67d362..750a04ca 100644 +--- a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh ++++ b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh +@@ -2,6 +2,9 @@ + # This file is part of Relax-and-Recover, licensed under the GNU General + # Public License. Refer to the included COPYING for full text of license. + ++local backup_prog_rc ++local backup_log_message ++ + Log "Include list:" + while read -r ; do + Log " $REPLY" +@@ -11,9 +14,9 @@ while read -r ; do + Log " $REPLY" + done < $TMP_DIR/backup-exclude.txt + +-LogPrint "Creating $BACKUP_PROG archive on '${RSYNC_HOST}:${RSYNC_PATH}'" ++LogPrint "Creating $BACKUP_PROG backup on '${RSYNC_HOST}:${RSYNC_PATH}'" + +-ProgressStart "Running archive operation" ++ProgressStart "Running backup operation" + ( + case "$(basename $BACKUP_PROG)" in + +@@ -37,7 +40,7 @@ ProgressStart "Running archive operation" + ;; + + (*) +- # no other backup programs foreseen then rsync so far ++ # no other backup programs foreseen than rsync so far + : + ;; + +@@ -96,7 +99,7 @@ case "$(basename $BACKUP_PROG)" in + ;; + esac + +- ProgressInfo "Archived $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec]" ++ ProgressInfo "Backed up $((size/1024/1024)) MiB [avg $((size/1024/(SECONDS-starttime))) KiB/sec]" + done + ;; + +@@ -113,24 +116,23 @@ ProgressStop + wait $BackupPID + + transfertime="$((SECONDS-starttime))" +-_rc="$(cat $TMP_DIR/retval)" ++backup_prog_rc="$(cat $TMP_DIR/retval)" + + sleep 1 + # everyone should see this warning, even if not verbose +-test "$_rc" -gt 0 && VERBOSE=1 LogPrint "WARNING ! +-There was an error (${rsync_err_msg[$_rc]}) during archive creation. +-Please check the archive and see '$RUNTIME_LOGFILE' for more information. ++test "$backup_prog_rc" -gt 0 && Error " ++There was an error (${rsync_err_msg[$backup_prog_rc]}) during backup creation. ++Please check the destination and see '$RUNTIME_LOGFILE' for more information. + +-Since errors are often related to files that cannot be saved by +-$BACKUP_PROG, we will continue the $WORKFLOW process. However, you MUST +-verify the backup yourself before trusting it ! ++If the error is related to files that cannot and should not be saved by ++$BACKUP_PROG, they should be excluded from the backup. + + " + +-_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log)" +-if [ $_rc -eq 0 -a "$_message" ] ; then +- LogPrint "$_message in $transfertime seconds." ++backup_log_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log)" ++if [ $backup_prog_rc -eq 0 -a "$backup_log_message" ] ; then ++ LogPrint "$backup_log_message in $transfertime seconds." + elif [ "$size" ]; then +- LogPrint "Archived $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]" ++ LogPrint "Backed up $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]" + fi + +diff --git a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh +index 01801a4e..b90d459b 100644 +--- a/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh ++++ b/usr/share/rear/backup/RSYNC/default/700_copy_backup_log.sh +@@ -1,6 +1,8 @@ + + # copy the backup.log & rear.log file to remote destination with timestamp added +-Timestamp=$( date +%Y%m%d.%H%M ) ++local timestamp ++ ++timestamp=$( date +%Y%m%d.%H%M ) + + # compress the log file first + gzip "$TMP_DIR/$BACKUP_PROG_ARCHIVE.log" || Error "Failed to 'gzip $TMP_DIR/$BACKUP_PROG_ARCHIVE.log'" +@@ -10,15 +12,15 @@ case $RSYNC_PROTO in + # FIXME: Add an explanatory comment why "2>/dev/null" is useful here + # or remove it according to https://github.com/rear/rear/issues/1395 + $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" \ +- "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${Timestamp}.log.gz" 2>/dev/null ++ "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" 2>/dev/null + +- $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/rear-${Timestamp}.log" 2>/dev/null ++ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/rear-${timestamp}.log" 2>/dev/null + ;; + (rsync) +- $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" ${BACKUP_RSYNC_OPTIONS[@]} \ +- "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${Timestamp}.log.gz" ++ $BACKUP_PROG -a "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}.log.gz" "${BACKUP_RSYNC_OPTIONS[@]}" \ ++ "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/${BACKUP_PROG_ARCHIVE}-${timestamp}.log.gz" + +- $BACKUP_PROG -a "$RUNTIME_LOGFILE" ${BACKUP_RSYNC_OPTIONS[@]} "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}//rear-${Timestamp}.log" ++ $BACKUP_PROG -a "$RUNTIME_LOGFILE" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}//rear-${timestamp}.log" + ;; + esac + +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index 455aa3ce..0c230f38 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -1106,7 +1106,8 @@ BACKUP_ONLY_EXCLUDE="no" + MANUAL_INCLUDE=NO + # Disable SELinux policy during backup with NETFS or RSYNC (default yes) + BACKUP_SELINUX_DISABLE=1 +-# Enable integrity check of the backup archive (only with BACKUP=NETFS and BACKUP_PROG=tar) ++# Enable integrity check of the backup archive (full check only with BACKUP=NETFS and BACKUP_PROG=tar, ++# with BACKUP=rsync or BACKUP_PROG=rsync it only checks whether rsync completed the restore successfully) + BACKUP_INTEGRITY_CHECK= + # Define BACKUP_TYPE. + # By default BACKUP_TYPE is empty which means "rear mkbackup" will create a full backup. +diff --git a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh +index 32ac391d..519febf5 100644 +--- a/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh ++++ b/usr/share/rear/output/RSYNC/default/200_make_prefix_dir.sh +@@ -2,21 +2,19 @@ + # RSYNC_PREFIX=$HOSTNAME as set in default.conf + + # create temporary local work-spaces to collect files (we already make the remote backup dir with the correct mode!!) +-mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2 +-StopIfError "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'" +-mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2 +-StopIfError "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'" ++mkdir -p $v -m0750 "${TMP_DIR}/rsync/${RSYNC_PREFIX}" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}'" ++mkdir -p $v -m0755 "${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup" >&2 || Error "Could not mkdir '${TMP_DIR}/rsync/${RSYNC_PREFIX}/backup'" + + case $RSYNC_PROTO in + + (ssh) +- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}" >/dev/null 2>&1 +- StopIfError "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" ++ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}" >/dev/null 2>&1 \ ++ || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" + ;; + + (rsync) +- $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" ${BACKUP_RSYNC_OPTIONS[@]} "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/" >/dev/null +- StopIfError "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" ++ $BACKUP_PROG -a $v -r "${TMP_DIR}/rsync/${RSYNC_PREFIX}" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/" >/dev/null \ ++ || Error "Could not create '${RSYNC_PATH}/${RSYNC_PREFIX}' on remote ${RSYNC_HOST}" + ;; + + esac +diff --git a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh +index c7b430d8..96b62da1 100644 +--- a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh ++++ b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh +@@ -5,19 +5,19 @@ LogPrint "Copying resulting files to $OUTPUT_URL location" + + # if called as mkbackuponly then we just don't have any result files. + if test "$RESULT_FILES" ; then +- Log "Copying files '${RESULT_FILES[@]}' to $OUTPUT_URL location" +- cp $v "${RESULT_FILES[@]}" "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" +- StopIfError "Could not copy files to local rsync location" ++ Log "Copying files '${RESULT_FILES[*]}' to $OUTPUT_URL location" ++ cp $v "${RESULT_FILES[@]}" "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" \ ++ || Error "Could not copy files to local rsync location" + fi + +-echo "$VERSION_INFO" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/VERSION" +-StopIfError "Could not create VERSION file on local rsync location" ++echo "$VERSION_INFO" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/VERSION" \ ++ || Error "Could not create VERSION file on local rsync location" + +-cp $v $(get_template "RESULT_usage_$OUTPUT.txt") "${TMP_DIR}/rsync/${RSYNC_PREFIX}/README" +-StopIfError "Could not copy usage file to local rsync location" ++cp $v $(get_template "RESULT_usage_$OUTPUT.txt") "${TMP_DIR}/rsync/${RSYNC_PREFIX}/README" \ ++ || Error "Could not copy usage file to local rsync location" + +-cat "$RUNTIME_LOGFILE" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/rear.log" +-StopIfError "Could not copy $RUNTIME_LOGFILE to local rsync location" ++cat "$RUNTIME_LOGFILE" >"${TMP_DIR}/rsync/${RSYNC_PREFIX}/rear.log" \ ++ || Error "Could not copy $RUNTIME_LOGFILE to local rsync location" + + case $RSYNC_PROTO in + +@@ -25,20 +25,20 @@ case $RSYNC_PROTO in + Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" + # FIXME: Add an explanatory comment why "2>/dev/null" is useful here + # or remove it according to https://github.com/rear/rear/issues/1395 +- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null +- StopIfError "Could not copy '${RESULT_FILES[@]}' to $OUTPUT_URL location" ++ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \ ++ || Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location" + ;; + + (rsync) +- Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[@]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" ++ Log "$BACKUP_PROG -a ${TMP_DIR}/rsync/${RSYNC_PREFIX}/ ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" + # FIXME: Add an explanatory comment why "2>/dev/null" is useful here + # or remove it according to https://github.com/rear/rear/issues/1395 +- $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" ${BACKUP_RSYNC_OPTIONS[@]} "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null +- StopIfError "Could not copy '${RESULT_FILES[@]}' to $OUTPUT_URL location" ++ $BACKUP_PROG -a "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" "${BACKUP_RSYNC_OPTIONS[@]}" "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/" 2>/dev/null \ ++ || Error "Could not copy '${RESULT_FILES[*]}' to $OUTPUT_URL location" + ;; + + esac + + # cleanup the temporary space (need it for the log file during backup) +-rm -rf "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" +-LogIfError "Could not cleanup temoprary rsync space: ${TMP_DIR}/rsync/${RSYNC_PREFIX}/" ++rm -rf "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" \ ++ || Log "Could not cleanup temporary rsync space: ${TMP_DIR}/rsync/${RSYNC_PREFIX}/" +diff --git a/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh b/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh +index fadf9d72..3c719c44 100644 +--- a/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh ++++ b/usr/share/rear/prep/NETFS/default/400_automatic_exclude_recreate.sh +@@ -31,7 +31,7 @@ case $scheme in + backup_directory_mountpoint=$( df -P "$backup_directory" | tail -1 | awk '{print $6}' ) + test "/" = "$backup_directory_mountpoint" && Error "URL '$BACKUP_URL' has the backup directory '$backup_directory' in the '/' filesystem which is forbidden." + # When the mountpoint of the backup directory is not yet excluded add its mountpoint to the EXCLUDE_RECREATE array: +- if ! grep -q "$backup_directory_mountpoint" <<< $( echo ${EXCLUDE_RECREATE[@]} ) ; then ++ if ! grep -q "$backup_directory_mountpoint" <<< "${EXCLUDE_RECREATE[*]}" ; then + EXCLUDE_RECREATE+=( "fs:$backup_directory_mountpoint" ) + fi + ;; +diff --git a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh +index ac26edfa..eb7df29e 100644 +--- a/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh ++++ b/usr/share/rear/prep/RSYNC/GNU/Linux/200_selinux_in_use.sh +@@ -33,7 +33,7 @@ case $(basename $BACKUP_PROG) in + touch $TMP_DIR/force.autorelabel # after reboot the restored system do a forced SELinux relabeling + else + # if --xattrs is already set; no need to do it again +- if ! grep -q xattrs <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then ++ if ! grep -q xattrs <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then + BACKUP_RSYNC_OPTIONS+=( --xattrs ) + fi + RSYNC_SELINUX=1 # variable used in recover mode (means using xattr and not disable SELinux) +diff --git a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh +index b8535352..c964a148 100644 +--- a/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh ++++ b/usr/share/rear/prep/RSYNC/default/100_check_rsync.sh +@@ -33,22 +33,20 @@ RSYNC_PORT=873 # default port (of rsync server) + RSYNC_PATH= + + +-echo $BACKUP_URL | egrep -q '(::)' # new style '::' means rsync protocol +-if [[ $? -eq 0 ]]; then ++if egrep -q '(::)' <<< $BACKUP_URL ; then # new style '::' means rsync protocol + RSYNC_PROTO=rsync + else + RSYNC_PROTO=ssh + fi + +-echo $host | grep -q '@' +-if [[ $? -eq 0 ]]; then ++if grep -q '@' <<< $host ; then + RSYNC_USER="${host%%@*}" # grab user name + else + RSYNC_USER=root + fi + + # remove USER@ if present (we don't need it anymore) +-tmp2="${host#*@}" ++local tmp2="${host#*@}" + + case "$RSYNC_PROTO" in + +@@ -56,8 +54,7 @@ case "$RSYNC_PROTO" in + # tmp2=witsbebelnx02::backup or tmp2=witsbebelnx02:: + RSYNC_HOST="${tmp2%%::*}" + # path=/gdhaese1@witsbebelnx02::backup or path=/backup +- echo $path | grep -q '::' +- if [[ $? -eq 0 ]]; then ++ if grep -q '::' <<< $path ; then + RSYNC_PATH="${path##*::}" + else + RSYNC_PATH="${path##*/}" +@@ -79,8 +76,7 @@ esac + + # check if host is reachable + if test "$PING" ; then +- ping -c 2 "$RSYNC_HOST" >/dev/null +- StopIfError "Backup host [$RSYNC_HOST] not reachable." ++ ping -c 2 "$RSYNC_HOST" >/dev/null || Error "Backup host [$RSYNC_HOST] not reachable." + else + Log "Skipping ping test" + fi +@@ -89,15 +85,15 @@ fi + case "$RSYNC_PROTO" in + + (rsync) +- Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[@]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/" +- $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[@]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/ >/dev/null +- StopIfError "Rsync daemon not running on $RSYNC_HOST" ++ Log "Test: $BACKUP_PROG ${BACKUP_RSYNC_OPTIONS[*]} ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/" ++ $BACKUP_PROG "${BACKUP_RSYNC_OPTIONS[@]}" ${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/ >/dev/null \ ++ || Error "Rsync daemon not running on $RSYNC_HOST" + ;; + + (ssh) + Log "Test: ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true" +- ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true >/dev/null 2>&1 +- StopIfError "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" ++ ssh ${RSYNC_USER}@${RSYNC_HOST} /bin/true >/dev/null 2>&1 \ ++ || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" + ;; + + esac +diff --git a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh +index 446dd736..e9103531 100644 +--- a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh ++++ b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh +@@ -2,15 +2,17 @@ + # This file is part of Relax-and-Recover, licensed under the GNU General + # Public License. Refer to the included COPYING for full text of license. + # try to grab the rsync protocol version of rsync on the remote server ++ ++local remote_mountpoint ++ + if [ -z "$RSYNC_PROTOCOL_VERSION" ]; then + + case $RSYNC_PROTO in + + (ssh) +- ssh ${RSYNC_USER}@${RSYNC_HOST} rsync --version >"$TMP_DIR/rsync_protocol" 2>&1 +- StopIfError "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" +- grep -q "protocol version" "$TMP_DIR/rsync_protocol" +- if [ $? -eq 0 ]; then ++ ssh ${RSYNC_USER}@${RSYNC_HOST} rsync --version >"$TMP_DIR/rsync_protocol" 2>&1 \ ++ || Error "Secure shell connection not setup properly [$RSYNC_USER@$RSYNC_HOST]" ++ if grep -q "protocol version" "$TMP_DIR/rsync_protocol" ; then + RSYNC_PROTOCOL_VERSION=$(grep 'protocol version' "$TMP_DIR/rsync_protocol" | awk '{print $6}') + else + RSYNC_PROTOCOL_VERSION=29 # being conservative (old rsync version < 3.0) +@@ -30,25 +32,21 @@ else + + fi + +-if [ "${RSYNC_USER}" != "root" ]; then ++if [ "${RSYNC_USER}" != "root" -a $RSYNC_PROTO = "ssh" ]; then + if [ $RSYNC_PROTOCOL_VERSION -gt 29 ]; then + if grep -q "no xattrs" "$TMP_DIR/rsync_protocol"; then + # no xattrs available in remote rsync, so --fake-super is not possible + Error "rsync --fake-super not possible on system ($RSYNC_HOST) (no xattrs compiled in rsync)" + else + # when using --fake-super we must have user_xattr mount options on the remote mntpt +- _mntpt=$(ssh ${RSYNC_USER}@${RSYNC_HOST} 'cd ${RSYNC_PATH}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}') +- ssh ${RSYNC_USER}@${RSYNC_HOST} "cd ${RSYNC_PATH} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete" +- StopIfError "Remote file system $_mntpt does not have user_xattr mount option set!" +- #BACKUP_RSYNC_OPTIONS+=( --xattrs --rsync-path="""rsync --fake-super""" ) ++ remote_mountpoint=$(ssh ${RSYNC_USER}@${RSYNC_HOST} 'cd ${RSYNC_PATH}; df -P .' 2>/dev/null | tail -1 | awk '{print $6}') ++ ssh ${RSYNC_USER}@${RSYNC_HOST} "cd ${RSYNC_PATH} && touch .is_xattr_supported && setfattr -n user.comment -v 'File created by ReaR to test if this filesystems supports extended attributes.' .is_xattr_supported && getfattr -n user.comment .is_xattr_supported 1>/dev/null; find .is_xattr_supported -empty -delete" \ ++ || Error "Remote file system $remote_mountpoint does not have user_xattr mount option set!" ++ #BACKUP_RSYNC_OPTIONS+=( --xattrs --rsync-path="rsync --fake-super" ) + # see issue #366 for explanation of removing --xattrs +- BACKUP_RSYNC_OPTIONS+=( --rsync-path="""rsync --fake-super""" ) ++ BACKUP_RSYNC_OPTIONS+=( --rsync-path="rsync --fake-super" ) + fi + else +- if [ ${BACKUP_RSYNC_OPTIONS[@]/--fake-super/} != ${BACKUP_RSUNC_OPTIONS[@]} ]; then +- Error "rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)" +- else +- Log "Warning: rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)" +- fi ++ Error "rsync --fake-super not possible on system ($RSYNC_HOST) (please upgrade rsync to 3.x)" + fi + fi +diff --git a/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh b/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh +index 0a9c9648..220ccc57 100644 +--- a/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh ++++ b/usr/share/rear/restore/DUPLICITY/default/400_restore_duplicity.sh +@@ -5,6 +5,8 @@ + # Restore from remote backup via DUPLICIY over rsync + + if [ "$BACKUP_PROG" = "duplicity" ]; then ++ local backup_prog_rc ++ local restore_log_message + + LogPrint "========================================================================" + LogPrint "Restoring backup with $BACKUP_PROG from '$BACKUP_DUPLICITY_URL'" +@@ -49,7 +51,8 @@ if [ "$BACKUP_PROG" = "duplicity" ]; then + LogPrint "with CMD: $DUPLICITY_PROG -v 5 $GPG_KEY --force --tempdir=$DUPLICITY_TEMPDIR $BACKUP_DUPLICITY_URL/$HOSTNAME/ $TARGET_FS_ROOT" + $DUPLICITY_PROG -v 5 $GPG_KEY --force --tempdir="$DUPLICITY_TEMPDIR" $BACKUP_DUPLICITY_URL/$HOSTNAME/ $TARGET_FS_ROOT 0<&6 | tee $TMP_DIR/duplicity-restore.log + fi +- _rc=$? ++ # FIXME: this collects the exit code from "tee", not from $DUPLICITY_PROG ++ backup_prog_rc=$? + + transfertime="$((SECONDS-$starttime))" + sleep 1 +@@ -65,20 +68,20 @@ if [ "$BACKUP_PROG" = "duplicity" ]; then + LogPrint "========================================================================" + + +- if [ "$_rc" -gt 0 ]; then ++ if [ "$backup_prog_rc" -gt 0 ]; then + LogPrint "WARNING ! + There was an error while restoring the archive. + Please check '$RUNTIME_LOGFILE' and $TMP_DIR/duplicity-restore.log for more information. + You should also manually check the restored system to see whether it is complete. + " + +- _message="$(tail -14 ${TMP_DIR}/duplicity-restore.log)" ++ restore_log_message="$(tail -14 ${TMP_DIR}/duplicity-restore.log)" + + LogPrint "Last 14 Lines of ${TMP_DIR}/duplicity-restore.log:" +- LogPrint "$_message" ++ LogPrint "$restore_log_message" + fi + +- if [ $_rc -eq 0 ] ; then ++ if [ $backup_prog_rc -eq 0 ] ; then + LogPrint "Restore completed in $transfertime seconds." + fi + +diff --git a/usr/share/rear/restore/RBME/default/400_restore_backup.sh b/usr/share/rear/restore/RBME/default/400_restore_backup.sh +index 28a3c354..3e97e16b 100644 +--- a/usr/share/rear/restore/RBME/default/400_restore_backup.sh ++++ b/usr/share/rear/restore/RBME/default/400_restore_backup.sh +@@ -2,6 +2,8 @@ if [[ -z "$RBME_BACKUP" ]] ; then + Error "No RBME backup selected (BACKUP_URL?). Aborting." + fi + ++local backup_prog_rc ++ + scheme=$(url_scheme "$BACKUP_URL") + + LogPrint "Restoring from backup $RBME_BACKUP." +@@ -43,11 +45,11 @@ transfertime="$((SECONDS-starttime))" + # harvest return code from background job. The kill -0 $BackupPID loop above should + # have made sure that this wait won't do any real "waiting" :-) + wait $BackupPID +-_rc=$? ++backup_prog_rc=$? + + sleep 1 +-test "$_rc" -gt 0 && LogPrint "WARNING ! +-There was an error (${rsync_err_msg[$_rc]}) while restoring the archive. ++test "$backup_prog_rc" -gt 0 && LogPrint "WARNING ! ++There was an error (${rsync_err_msg[$backup_prog_rc]}) while restoring the archive. + Please check '$RUNTIME_LOGFILE' for more information. You should also + manually check the restored system to see whether it is complete. + " +diff --git a/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh b/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh +index 53915322..a792f195 100644 +--- a/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh ++++ b/usr/share/rear/restore/RSYNC/default/200_remove_relative_rsync_option.sh +@@ -4,11 +4,11 @@ + # without the --relative option ; my feeling says it is better to remove it from array BACKUP_RSYNC_OPTIONS + # If I'm wrong please let us know (use issue mentioned above to comment) + +-if grep -q relative <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then ++if grep -q -- "--relative" <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then + BACKUP_RSYNC_OPTIONS=( $( RmInArray "--relative" "${BACKUP_RSYNC_OPTIONS[@]}" ) ) + Log "Removed option '--relative' from the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow" + fi +-if grep -q "-R" <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then ++if grep -q -- "-R" <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then + BACKUP_RSYNC_OPTIONS=( $( RmInArray "-R" "${BACKUP_RSYNC_OPTIONS[@]}" ) ) + Log "Removed option '-R' from the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow" + fi +diff --git a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh +index 2a0bf15e..993088be 100644 +--- a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh ++++ b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh +@@ -4,10 +4,10 @@ get_size() { + echo $( stat --format '%s' "$TARGET_FS_ROOT/$1" ) + } + +-mkdir -p "${TMP_DIR}/rsync/${NETFS_PREFIX}" +-StopIfError "Could not mkdir '$TMP_DIR/rsync/${NETFS_PREFIX}'" ++local backup_prog_rc ++local restore_log_message + +-LogPrint "Restoring $BACKUP_PROG archive from '${RSYNC_HOST}:${RSYNC_PATH}'" ++LogPrint "Restoring $BACKUP_PROG backup from '${RSYNC_HOST}:${RSYNC_PATH}'" + + ProgressStart "Restore operation" + ( +@@ -33,9 +33,10 @@ ProgressStart "Restore operation" + ;; + + (*) +- # no other backup programs foreseen then rsync so far ++ # no other backup programs foreseen than rsync so far + : + ;; ++ + esac + echo $? >$TMP_DIR/retval + ) >"${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log" & +@@ -65,6 +66,7 @@ case "$(basename $BACKUP_PROG)" in + ProgressStep + done + ;; ++ + esac + ProgressStop + +@@ -72,20 +74,28 @@ transfertime="$((SECONDS-starttime))" + + # harvest return code from background job. The kill -0 $BackupPID loop above should + # have made sure that this wait won't do any real "waiting" :-) +-wait $BackupPID +-_rc=$? ++wait $BackupPID || LogPrintError "Restore job returned a nonzero exit code $?" ++# harvest the actual return code of rsync. Finishing the pipeline with an error code above is actually unlikely, ++# because rsync is not the last command in it. But error returns from rsync are common and must be handled. ++backup_prog_rc="$(cat $TMP_DIR/retval)" + + sleep 1 +-test "$_rc" -gt 0 && LogPrint "WARNING ! +-There was an error (${rsync_err_msg[$_rc]}) while restoring the archive. ++if test "$backup_prog_rc" -gt 0 ; then ++ # TODO: Shouldn't we tell the user to check ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log as well? ++ LogPrintError "WARNING ! ++There was an error (${rsync_err_msg[$backup_prog_rc]}) while restoring the backup. + Please check '$RUNTIME_LOGFILE' for more information. You should also + manually check the restored system to see whether it is complete. + " ++ is_true "$BACKUP_INTEGRITY_CHECK" && Error "Integrity check failed, restore aborted because BACKUP_INTEGRITY_CHECK is enabled" ++fi + +-_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log)" ++restore_log_message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log)" + +-if [ $_rc -eq 0 -a "$_message" ] ; then +- LogPrint "$_message in $transfertime seconds." ++if [ $backup_prog_rc -eq 0 -a "$restore_log_message" ] ; then ++ LogPrint "$restore_log_message in $transfertime seconds." + elif [ "$size" ]; then + LogPrint "Restored $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]" + fi ++ ++return $backup_prog_rc +diff --git a/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh b/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh +index 3622884a..890161f1 100644 +--- a/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh ++++ b/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh +@@ -3,8 +3,8 @@ + [[ $RSYNC_SELINUX ]] && { + + # if --xattrs is already set; no need to do it again +- if ! grep -q xattrs <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then +- RSYNC_OPTIONS=( "${BACKUP_RSYNC_OPTIONS[@]}" --xattrs ) ++ if ! grep -q xattrs <<< "${BACKUP_RSYNC_OPTIONS[*]}" ; then ++ BACKUP_RSYNC_OPTIONS+=( --xattrs ) + fi + + } +diff --git a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh +index 47ed9e02..b2fb72f5 100644 +--- a/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh ++++ b/usr/share/rear/verify/RSYNC/default/550_check_remote_backup_archive.sh +@@ -3,12 +3,12 @@ + case $RSYNC_PROTO in + + (ssh) +- ssh ${RSYNC_USER}@${RSYNC_HOST} "ls -ld ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 +- StopIfError "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" ++ ssh ${RSYNC_USER}@${RSYNC_HOST} "ls -ld ${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \ ++ || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" + ;; + + (rsync) +- $BACKUP_PROG "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 +- StopIfError "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" ++ $BACKUP_PROG "${RSYNC_PROTO}://${RSYNC_USER}@${RSYNC_HOST}:${RSYNC_PORT}/${RSYNC_PATH}/${RSYNC_PREFIX}/backup" >/dev/null 2>&1 \ ++ || Error "Archive not found on [$RSYNC_USER@$RSYNC_HOST:${RSYNC_PATH}/${RSYNC_PREFIX}]" + ;; + esac diff --git a/SOURCES/rear-bz1945869.patch b/SOURCES/rear-bz1945869.patch new file mode 100644 index 0000000..a17a7cd --- /dev/null +++ b/SOURCES/rear-bz1945869.patch @@ -0,0 +1,274 @@ +diff --git a/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh b/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh +old mode 100644 +new mode 100755 +index cc646359..33d87767 +--- a/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh ++++ b/usr/share/rear/finalize/Linux-i386/670_run_efibootmgr.sh +@@ -8,6 +8,10 @@ is_true $USING_UEFI_BOOTLOADER || return 0 + # (cf. finalize/Linux-i386/610_EFISTUB_run_efibootmgr.sh): + is_true $EFI_STUB && return + ++LogPrint "Creating EFI Boot Manager entries..." ++ ++local esp_mountpoint esp_mountpoint_inside boot_efi_parts boot_efi_dev ++ + # When UEFI_BOOTLOADER is not a regular file in the restored target system + # (cf. how esp_mountpoint is set below) it means BIOS is used + # (cf. rescue/default/850_save_sysfs_uefi_vars.sh) +@@ -15,64 +19,80 @@ is_true $EFI_STUB && return + # because when UEFI_BOOTLOADER is empty the test below evaluates to + # test -f /mnt/local/ + # which also returns false because /mnt/local/ is a directory +-# (cf. https://github.com/rear/rear/pull/2051/files#r258826856): +-test -f "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" || return 0 ++# (cf. https://github.com/rear/rear/pull/2051/files#r258826856) ++# but using BIOS conflicts with USING_UEFI_BOOTLOADER is true ++# i.e. we should create EFI Boot Manager entries but we cannot: ++if ! test -f "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" ; then ++ LogPrintError "Failed to create EFI Boot Manager entries (UEFI bootloader '$UEFI_BOOTLOADER' not found under target $TARGET_FS_ROOT)" ++ return 1 ++fi + + # Determine where the EFI System Partition (ESP) is mounted in the currently running recovery system: +-esp_mountpoint=$( df -P "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" | tail -1 | awk '{print $6}' ) +-# Use TARGET_FS_ROOT/boot/efi as fallback ESP mountpoint: +-test "$esp_mountpoint" || esp_mountpoint="$TARGET_FS_ROOT/boot/efi" ++esp_mountpoint=$( filesystem_name "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" ) ++# Use TARGET_FS_ROOT/boot/efi as fallback ESP mountpoint (filesystem_name returns "/" ++# if mountpoint not found otherwise): ++if [ "$esp_mountpoint" = "/" ] ; then ++ esp_mountpoint="$TARGET_FS_ROOT/boot/efi" ++ LogPrint "Mountpoint of $TARGET_FS_ROOT/$UEFI_BOOTLOADER not found, trying $esp_mountpoint" ++fi + + # Skip if there is no esp_mountpoint directory (e.g. the fallback ESP mountpoint may not exist). + # Double quotes are mandatory here because 'test -d' without any (possibly empty) argument results true: +-test -d "$esp_mountpoint" || return 0 +- +-BootEfiDev="$( mount | grep "$esp_mountpoint" | awk '{print $1}' )" +-# /dev/sda1 or /dev/mapper/vol34_part2 or /dev/mapper/mpath99p4 +-Dev=$( get_device_name $BootEfiDev ) +-# 1 (must anyway be a low nr <9) +-ParNr=$( get_partition_number $Dev ) +-# /dev/sda or /dev/mapper/vol34_part or /dev/mapper/mpath99p or /dev/mmcblk0p +-Disk=$( echo ${Dev%$ParNr} ) +- +-# Strip trailing partition remainders like '_part' or '-part' or 'p' +-# if we have 'mapper' in disk device name: +-if [[ ${Dev/mapper//} != $Dev ]] ; then +- # we only expect mpath_partX or mpathpX or mpath-partX +- case $Disk in +- (*p) Disk=${Disk%p} ;; +- (*-part) Disk=${Disk%-part} ;; +- (*_part) Disk=${Disk%_part} ;; +- (*) Log "Unsupported kpartx partition delimiter for $Dev" +- esac ++if ! test -d "$esp_mountpoint" ; then ++ LogPrintError "Failed to create EFI Boot Manager entries (no ESP mountpoint directory $esp_mountpoint)" ++ return 1 + fi + +-# For eMMC devices the trailing 'p' in the Disk value +-# (as in /dev/mmcblk0p that is derived from /dev/mmcblk0p1) +-# needs to be stripped (to get /dev/mmcblk0), otherwise the +-# efibootmgr call fails because of a wrong disk device name. +-# See also https://github.com/rear/rear/issues/2103 +-if [[ $Disk = *'/mmcblk'+([0-9])p ]] ; then +- Disk=${Disk%p} +-fi ++# Mount point inside the target system, ++# accounting for possible trailing slashes in TARGET_FS_ROOT ++esp_mountpoint_inside="${esp_mountpoint#${TARGET_FS_ROOT%%*(/)}}" + +-# For NVMe devices the trailing 'p' in the Disk value +-# (as in /dev/nvme0n1p that is derived from /dev/nvme0n1p1) +-# needs to be stripped (to get /dev/nvme0n1), otherwise the +-# efibootmgr call fails because of a wrong disk device name. +-# See also https://github.com/rear/rear/issues/1564 +-if [[ $Disk = *'/nvme'+([0-9])n+([0-9])p ]] ; then +- Disk=${Disk%p} ++boot_efi_parts=$( find_partition "fs:$esp_mountpoint_inside" fs ) ++if ! test "$boot_efi_parts" ; then ++ LogPrint "Unable to find ESP $esp_mountpoint_inside in layout" ++ LogPrint "Trying to determine device currently mounted at $esp_mountpoint as fallback" ++ boot_efi_dev="$( mount | grep "$esp_mountpoint" | awk '{print $1}' )" ++ if ! test "$boot_efi_dev" ; then ++ LogPrintError "Cannot create EFI Boot Manager entry (unable to find ESP $esp_mountpoint among mounted devices)" ++ return 1 ++ fi ++ if test $(get_component_type "$boot_efi_dev") = part ; then ++ boot_efi_parts="$boot_efi_dev" ++ else ++ boot_efi_parts=$( find_partition "$boot_efi_dev" ) ++ fi ++ if ! test "$boot_efi_parts" ; then ++ LogPrintError "Cannot create EFI Boot Manager entry (unable to find partition for $boot_efi_dev)" ++ return 1 ++ fi ++ LogPrint "Using fallback EFI boot partition(s) $boot_efi_parts (unable to find ESP $esp_mountpoint_inside in layout)" + fi + ++local bootloader partition_block_device partition_number disk efipart ++ + # EFI\fedora\shim.efi +-BootLoader=$( echo $UEFI_BOOTLOADER | cut -d"/" -f4- | sed -e 's;/;\\;g' ) +-LogPrint "Creating EFI Boot Manager entry '$OS_VENDOR $OS_VERSION' for '$BootLoader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')" +-Log efibootmgr --create --gpt --disk ${Disk} --part ${ParNr} --write-signature --label \"${OS_VENDOR} ${OS_VERSION}\" --loader \"\\${BootLoader}\" +-if efibootmgr --create --gpt --disk ${Disk} --part ${ParNr} --write-signature --label "${OS_VENDOR} ${OS_VERSION}" --loader "\\${BootLoader}" ; then +- # ok, boot loader has been set-up - tell rear we are done using following var. +- NOBOOTLOADER='' +- return +-fi ++bootloader=$( echo $UEFI_BOOTLOADER | cut -d"/" -f4- | sed -e 's;/;\\;g' ) ++ ++for efipart in $boot_efi_parts ; do ++ # /dev/sda1 or /dev/mapper/vol34_part2 or /dev/mapper/mpath99p4 ++ partition_block_device=$( get_device_name $efipart ) ++ # 1 or 2 or 4 for the examples above ++ partition_number=$( get_partition_number $partition_block_device ) ++ if ! disk=$( get_device_from_partition $partition_block_device $partition_number ) ; then ++ LogPrintError "Cannot create EFI Boot Manager entry for ESP $partition_block_device (unable to find the underlying disk)" ++ # do not error out - we may be able to locate other disks if there are more of them ++ continue ++ fi ++ LogPrint "Creating EFI Boot Manager entry '$OS_VENDOR $OS_VERSION' for '$bootloader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER') " ++ Log efibootmgr --create --gpt --disk $disk --part $partition_number --write-signature --label \"${OS_VENDOR} ${OS_VERSION}\" --loader \"\\${bootloader}\" ++ if efibootmgr --create --gpt --disk $disk --part $partition_number --write-signature --label "${OS_VENDOR} ${OS_VERSION}" --loader "\\${bootloader}" ; then ++ # ok, boot loader has been set-up - continue with other disks (ESP can be on RAID) ++ NOBOOTLOADER='' ++ else ++ LogPrintError "efibootmgr failed to create EFI Boot Manager entry on $disk partition $partition_number (ESP $partition_block_device )" ++ fi ++done + +-LogPrintError "efibootmgr failed to create EFI Boot Manager entry for '$BootLoader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')" ++is_true $NOBOOTLOADER || return 0 ++LogPrintError "efibootmgr failed to create EFI Boot Manager entry for '$bootloader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')" ++return 1 +diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh +index 54ddb50f..cdd81a14 100644 +--- a/usr/share/rear/lib/layout-functions.sh ++++ b/usr/share/rear/lib/layout-functions.sh +@@ -302,12 +302,20 @@ get_child_components() { + done + } + +-# Return all ancestors of component $1 [ of type $2 ] ++# Return all ancestors of component $1 [ of type $2 [ skipping types $3 during resolution ] ] + get_parent_components() { +- declare -a ancestors devlist +- declare current child parent ++ declare -a ancestors devlist ignoretypes ++ declare current child parent parenttype + + devlist=( "$1" ) ++ if [[ "$3" ]] ; then ++ # third argument should, if present, be a space-separated list ++ # of types to ignore when walking up the dependency tree. ++ # Convert it to array ++ ignoretypes=( $3 ) ++ else ++ ignoretypes=() ++ fi + while (( ${#devlist[@]} )) ; do + current=${devlist[0]} + +@@ -318,6 +326,13 @@ get_parent_components() { + if IsInArray "$parent" "${ancestors[@]}" ; then + continue + fi ++ ### ...test if parent is of a correct type if requested... ++ if [[ ${#ignoretypes[@]} -gt 0 ]] ; then ++ parenttype=$(get_component_type "$parent") ++ if IsInArray "$parenttype" "${ignoretypes[@]}" ; then ++ continue ++ fi ++ fi + ### ...and add them to the list + devlist+=( "$parent" ) + ancestors+=( "$parent" ) +@@ -345,22 +360,24 @@ get_parent_components() { + } + + # find_devices ++# ${2+"$2"} in the following functions ensures that $2 gets passed down quoted if present ++# and ignored if not present + # Find the disk device(s) component $1 resides on. + find_disk() { +- get_parent_components "$1" "disk" ++ get_parent_components "$1" "disk" ${2+"$2"} + } + + find_multipath() { +- get_parent_components "$1" "multipath" ++ get_parent_components "$1" "multipath" ${2+"$2"} + } + + find_disk_and_multipath() { +- find_disk "$1" +- is_true "$AUTOEXCLUDE_MULTIPATH" || find_multipath "$1" ++ find_disk "$1" ${2+"$2"} ++ is_true "$AUTOEXCLUDE_MULTIPATH" || find_multipath "$1" ${2+"$2"} + } + + find_partition() { +- get_parent_components "$1" "part" ++ get_parent_components "$1" "part" ${2+"$2"} + } + + # The get_partition_number function +@@ -413,6 +430,54 @@ get_partition_number() { + echo $partition_number + } + ++# Extract the underlying device name from the full partition device name. ++# Underlying device may be a disk, a multipath device or other devices that can be partitioned. ++# Should we use the information in $LAYOUT_DEPS, like get_parent_component does, ++# instead of string munging? ++function get_device_from_partition() { ++ local partition_block_device ++ local device ++ local partition_number ++ ++ partition_block_device=$1 ++ test -b "$partition_block_device" || BugError "get_device_from_partition called with '$partition_block_device' that is no block device" ++ partition_number=${2-$(get_partition_number $partition_block_device )} ++ # /dev/sda or /dev/mapper/vol34_part or /dev/mapper/mpath99p or /dev/mmcblk0p ++ device=${partition_block_device%$partition_number} ++ ++ # Strip trailing partition remainders like '_part' or '-part' or 'p' ++ # if we have 'mapper' in disk device name: ++ if [[ ${partition_block_device/mapper//} != $partition_block_device ]] ; then ++ # we only expect mpath_partX or mpathpX or mpath-partX ++ case $device in ++ (*p) device=${device%p} ;; ++ (*-part) device=${device%-part} ;; ++ (*_part) device=${device%_part} ;; ++ (*) Log "Unsupported kpartx partition delimiter for $partition_block_device" ++ esac ++ fi ++ ++ # For eMMC devices the trailing 'p' in the $device value ++ # (as in /dev/mmcblk0p that is derived from /dev/mmcblk0p1) ++ # needs to be stripped (to get /dev/mmcblk0), otherwise the ++ # efibootmgr call fails because of a wrong disk device name. ++ # See also https://github.com/rear/rear/issues/2103 ++ if [[ $device = *'/mmcblk'+([0-9])p ]] ; then ++ device=${device%p} ++ fi ++ ++ # For NVMe devices the trailing 'p' in the $device value ++ # (as in /dev/nvme0n1p that is derived from /dev/nvme0n1p1) ++ # needs to be stripped (to get /dev/nvme0n1), otherwise the ++ # efibootmgr call fails because of a wrong disk device name. ++ # See also https://github.com/rear/rear/issues/1564 ++ if [[ $device = *'/nvme'+([0-9])n+([0-9])p ]] ; then ++ device=${device%p} ++ fi ++ ++ test -b "$device" && echo $device ++} ++ + # Returns partition start block or 'unknown' + # sda/sda1 or + # dm-XX diff --git a/SOURCES/rear-bz1958247.patch b/SOURCES/rear-bz1958247.patch new file mode 100644 index 0000000..c85f6ad --- /dev/null +++ b/SOURCES/rear-bz1958247.patch @@ -0,0 +1,2040 @@ +diff --git a/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh +index 64b7a792..6ba7d543 100644 +--- a/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh ++++ b/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh +@@ -1,10 +1,4 @@ +-# create mount point + if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then +- mkdir -p $v "$BUILD_DIR/outputfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +- AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- + if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then + BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD" + fi +diff --git a/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh +index 185dbd95..8525ab1d 100644 +--- a/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh ++++ b/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh +@@ -6,10 +6,4 @@ if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" + fi + + umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs +- +- rmdir $v $BUILD_DIR/outputfs >&2 +- if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- fi + fi +diff --git a/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh b/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh +index 5c7696db..b6a955db 100644 +--- a/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh ++++ b/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh +@@ -1,9 +1,3 @@ +-# create mount point +-mkdir -p $v "$BUILD_DIR/outputfs" >&2 +-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +-AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- + if [[ "$BACKUP_MOUNTCMD" ]] ; then + BACKUP_URL="var://BACKUP_MOUNTCMD" + fi +diff --git a/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh b/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh +index d79653b4..9bf8f76a 100644 +--- a/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh ++++ b/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh +@@ -3,20 +3,17 @@ + [ -z "${NETFS_KEEP_OLD_BACKUP_COPY}" ] && return + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(backup_path $scheme $path) ++local scheme=$( url_scheme $BACKUP_URL ) ++local path=$( url_path $BACKUP_URL ) ++local opath=$( backup_path $scheme $path ) + + # if $opath is empty return silently (e.g. scheme tape) + [ -z "$opath" ] && return 0 + + if ! test -f "${opath}/.lockfile" ; then + if test -d "${opath}" ; then +- rm -rf $v "${opath}.old" >&2 +- StopIfError "Could not remove '${opath}.old'" +- mv -f $v "${opath}" "${opath}.old" >&2 +- StopIfError "Could not move '${opath}'" ++ rm -rf $v "${opath}.old" || Error "Could not remove '${opath}.old'" ++ mv -f $v "${opath}" "${opath}.old" || Error "Could not move '${opath}'" + fi + else + # lockfile was already made through the output workflow (hands off) +diff --git a/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh b/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh +index db15bca2..43f5b651 100644 +--- a/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh ++++ b/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh +@@ -2,13 +2,14 @@ + # to $HOSTNAME + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(backup_path $scheme $path) ++local scheme=$( url_scheme $BACKUP_URL ) ++local path=$( url_path $BACKUP_URL ) ++local opath=$( backup_path $scheme $path ) + + # if $opath is empty return silently (e.g. scheme tape) + [ -z "$opath" ] && return 0 + +-mkdir -p $v -m0750 "${opath}" >&2 +-StopIfError "Could not mkdir '${opath}'" ++mkdir -p $v -m0750 "${opath}" && return ++ ++# A failure to create the $NETFS_PREFIX sub-directory is fatal: ++Error "Failed to create '$opath' directory for BACKUP_URL=$BACKUP_URL" +diff --git a/usr/share/rear/backup/NETFS/default/250_create_lock.sh b/usr/share/rear/backup/NETFS/default/250_create_lock.sh +index 59090a22..36d547ec 100644 +--- a/usr/share/rear/backup/NETFS/default/250_create_lock.sh ++++ b/usr/share/rear/backup/NETFS/default/250_create_lock.sh +@@ -2,15 +2,13 @@ + # made by a previous mkbackup run when the variable NETFS_KEEP_OLD_BACKUP_COPY has been set + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(backup_path $scheme $path) ++local scheme=$( url_scheme $BACKUP_URL ) ++local path=$( url_path $BACKUP_URL ) ++local opath=$( backup_path $scheme $path ) + + # if $opath is empty return silently (e.g. scheme tape) + [ -z "$opath" ] && return 0 + + if test -d "${opath}" ; then +- > "${opath}/.lockfile" +- StopIfError "Could not create '${opath}/.lockfile'" ++ > "${opath}/.lockfile" || Error "Could not create '${opath}/.lockfile'" + fi +diff --git a/usr/share/rear/backup/NETFS/default/970_remove_lock.sh b/usr/share/rear/backup/NETFS/default/970_remove_lock.sh +index f69f7bd8..7038f5b9 100644 +--- a/usr/share/rear/backup/NETFS/default/970_remove_lock.sh ++++ b/usr/share/rear/backup/NETFS/default/970_remove_lock.sh +@@ -1,8 +1,7 @@ + # remove the lockfile +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(backup_path $scheme $path) ++local scheme=$( url_scheme $BACKUP_URL ) ++local path=$( url_path $BACKUP_URL ) ++local opath=$( backup_path $scheme $path ) + + # if $opath is empty return silently (e.g. scheme tape) + [ -z "$opath" ] && return 0 +diff --git a/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh b/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh +index f28c6cbf..e1954dc5 100644 +--- a/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh ++++ b/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh +@@ -5,9 +5,3 @@ if [[ "$BACKUP_UMOUNTCMD" ]] ; then + fi + + umount_url $BACKUP_URL $BUILD_DIR/outputfs +- +-rmdir $v $BUILD_DIR/outputfs >&2 +-if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +-fi +diff --git a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh +deleted file mode 100644 +index 6111f89b..00000000 +--- a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh ++++ /dev/null +@@ -1,33 +0,0 @@ +- +-# Backup all that is explicitly specified in BACKUP_PROG_INCLUDE: +-for backup_include_item in "${BACKUP_PROG_INCLUDE[@]}" ; do +- test "$backup_include_item" && echo "$backup_include_item" +-done > $TMP_DIR/backup-include.txt +- +-# Implicitly also backup all local filesystems as defined in mountpoint_device +-# except BACKUP_ONLY_INCLUDE or MANUAL_INCLUDE is set: +-if ! is_true "$BACKUP_ONLY_INCLUDE" ; then +- if [ "${MANUAL_INCLUDE:-NO}" != "YES" ] ; then +- # Add the mountpoints that will be recovered to the backup include list +- # unless a mountpoint is excluded: +- while read mountpoint device junk ; do +- if ! IsInArray "$mountpoint" "${EXCLUDE_MOUNTPOINTS[@]}" ; then +- echo "$mountpoint" +- fi +- done <"$VAR_DIR/recovery/mountpoint_device" >> $TMP_DIR/backup-include.txt +- fi +-fi +- +-# Exclude all that is explicitly specified in BACKUP_PROG_EXCLUDE: +-for backup_exclude_item in "${BACKUP_PROG_EXCLUDE[@]}" ; do +- test "$backup_exclude_item" && echo "$backup_exclude_item" +-done > $TMP_DIR/backup-exclude.txt +- +-# Implicitly also add excluded mountpoints to the backup exclude list +-# except BACKUP_ONLY_EXCLUDE is set: +-if ! is_true "$BACKUP_ONLY_EXCLUDE" ; then +- for excluded_mountpoint in "${EXCLUDE_MOUNTPOINTS[@]}" ; do +- test "$excluded_mountpoint" && echo "$excluded_mountpoint/" +- done >> $TMP_DIR/backup-exclude.txt +-fi +- +diff --git a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh +new file mode 120000 +index 00000000..d8d12c0b +--- /dev/null ++++ b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh +@@ -0,0 +1 @@ ++../../NETFS/default/400_create_include_exclude_files.sh +\ No newline at end of file +diff --git a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh +deleted file mode 100644 +index 29d85905..00000000 +--- a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh ++++ /dev/null +@@ -1,14 +0,0 @@ +-# Copied from ../../DUPLICITY/default/600_create_python_symlink.sh for YUM +-# make sure we have a symbolic link to the python binary +-( +- cd $ROOTFS_DIR/bin +- for py in $(find . -name "python*" ) +- do +- this_py=${py#./*} # should be without ./ +- case $this_py in +- python) break ;; +- python2*|python3*) ln -sf $v $this_py python >&2 ;; +- esac +- done +-) +- +diff --git a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh +new file mode 120000 +index 00000000..d776e5aa +--- /dev/null ++++ b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh +@@ -0,0 +1 @@ ++../../DUPLICITY/default/600_create_python_symlink.sh +\ No newline at end of file +diff --git a/usr/share/rear/lib/framework-functions.sh b/usr/share/rear/lib/framework-functions.sh +index f245861a..b5324747 100644 +--- a/usr/share/rear/lib/framework-functions.sh ++++ b/usr/share/rear/lib/framework-functions.sh +@@ -122,7 +122,7 @@ function cleanup_build_area_and_end_program () { + # Cleanup build area + Log "Finished in $((SECONDS-STARTTIME)) seconds" + if is_true "$KEEP_BUILD_DIR" ; then +- LogPrint "You should also rm -Rf $BUILD_DIR" ++ LogPrint "You should also rm -Rf --one-file-system $BUILD_DIR" + else + Log "Removing build area $BUILD_DIR" + rm -Rf $TMP_DIR +@@ -132,15 +132,11 @@ function cleanup_build_area_and_end_program () { + # in worst case it could not umount; so before remove the BUILD_DIR check if above outputfs is gone + if mountpoint -q "$BUILD_DIR/outputfs" ; then + # still mounted it seems +- LogPrint "Directory $BUILD_DIR/outputfs still mounted - trying lazy umount" + sleep 2 +- umount -f -l $BUILD_DIR/outputfs >&2 +- rm -Rf $v $BUILD_DIR/outputfs >&2 +- else +- # not mounted so we can safely delete $BUILD_DIR/outputfs +- rm -Rf $BUILD_DIR/outputfs ++ umount_mountpoint_lazy $BUILD_DIR/outputfs + fi +- rm -Rf $v $BUILD_DIR >&2 ++ remove_temporary_mountpoint '$BUILD_DIR/outputfs' || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove" ++ rmdir $v $BUILD_DIR >&2 + fi + Log "End of program reached" + } +diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh +index 4264bb53..a1aec604 100644 +--- a/usr/share/rear/lib/global-functions.sh ++++ b/usr/share/rear/lib/global-functions.sh +@@ -342,7 +342,44 @@ function url_path() { + echo /${url_without_scheme#*/} + } + +-backup_path() { ++### Returns true if one can upload files to the URL ++function scheme_accepts_files() { ++ local scheme=$1 ++ case $scheme in ++ (null|tape|obdr) ++ # tapes do not support uploading arbitrary files, one has to handle them ++ # as special case (usually passing the tape device as argument to tar) ++ # null means do not upload anything anywhere, leave the files under /var/lib/rear/output ++ return 1 ++ ;; ++ (*) ++ # most URL schemes support uploading files ++ return 0 ++ ;; ++ esac ++} ++ ++### Returns true if URLs with the given scheme corresponds to a path inside ++### a mountable fileystem and one can put files directly into it. ++### The actual path will be returned by backup_path() / output_path(). ++### If returns false, using backup_path() / output_path() has no sense ++### and one must use a scheme-specific method (like lftp or writing them to a tape) ++### to upload files to the destination instead of just "cp" or other direct filesystem access. ++### Returning true does not imply that the URL is currently mounted at a filesystem and usable, ++### only that it can be mounted (use mount_url() first) ++function scheme_supports_filesystem() { ++ local scheme=$1 ++ case $scheme in ++ (null|tape|obdr|rsync|fish|ftp|ftps|hftp|http|https|sftp) ++ return 1 ++ ;; ++ (*) ++ return 0 ++ ;; ++ esac ++} ++ ++function backup_path() { + local scheme=$1 + local path=$2 + case $scheme in +@@ -368,13 +405,21 @@ backup_path() { + echo "$path" + } + +-output_path() { ++function output_path() { + local scheme=$1 + local path=$2 ++ ++ # Abort for unmountable schemes ("tape-like" or "ftp-like" schemes). ++ # Returning an empty string for them is not satisfactory: it could lead to caller putting its files ++ # under / instead of the intended location if the result is not checked for emptiness. ++ # Returning ${BUILD_DIR}/outputfs/${OUTPUT_PREFIX} for unmountable URLs is also not satisfactory: ++ # caller could put its files there expecting them to be safely at their destination, ++ # but if the directory is not a mountpoint, they would get silently lost. ++ # The caller needs to check the URL/scheme using scheme_supports_filesystem() ++ # before calling this function. ++ scheme_supports_filesystem $scheme || BugError "output_path() called with scheme $scheme that does not support filesystem access" ++ + case $scheme in +- (null|tape) # no path for tape required +- path="" +- ;; + (file) # type file needs a local path (must be mounted by user) + path="$path/${OUTPUT_PREFIX}" + ;; +@@ -387,17 +432,33 @@ output_path() { + + + ### Mount URL $1 at mountpoint $2[, with options $3] +-mount_url() { ++function mount_url() { + local url=$1 + local mountpoint=$2 + local defaultoptions="rw,noatime" + local options=${3:-"$defaultoptions"} ++ local scheme ++ ++ scheme=$( url_scheme $url ) ++ ++ # The cases where we return 0 are those that do not need umount and also do not need ExitTask handling. ++ # They thus need to be kept in sync with umount_url() so that RemoveExitTasks is used ++ # iff AddExitTask was used in mount_url(). ++ ++ if ! scheme_supports_filesystem $scheme ; then ++ ### Stuff like null|tape|rsync|fish|ftp|ftps|hftp|http|https|sftp ++ ### Don't need to umount anything for these. ++ ### file: supports filesystem access, but is not mounted and unmounted, ++ ### so it has to be handled specially below. ++ ### Similarly for iso: which gets mounted and unmounted only during recovery. ++ return 0 ++ fi + + ### Generate a mount command + local mount_cmd +- case $(url_scheme $url) in +- (null|tape|file|rsync|fish|ftp|ftps|hftp|http|https|sftp) +- ### Don't need to mount anything for these ++ case $scheme in ++ (file) ++ ### Don't need to mount anything for file:, it is already mounted by user + return 0 + ;; + (iso) +@@ -558,22 +619,47 @@ mount_url() { + ;; + esac + ++ # create mount point ++ mkdir -p $v "$mountpoint" || Error "Could not mkdir '$mountpoint'" ++ AddExitTask "remove_temporary_mountpoint '$mountpoint'" ++ + Log "Mounting with '$mount_cmd'" + # eval is required when mount_cmd contains single quoted stuff (e.g. see the above mount_cmd for curlftpfs) + eval $mount_cmd || Error "Mount command '$mount_cmd' failed." + +- AddExitTask "umount -f $v '$mountpoint' >&2" ++ AddExitTask "perform_umount_url '$url' '$mountpoint' lazy" + return 0 + } + +-### Unmount url $1 at mountpoint $2 +-umount_url() { ++function remove_temporary_mountpoint() { ++ if test -d "$1" ; then ++ rmdir $v "$1" ++ fi ++} ++ ++### Unmount url $1 at mountpoint $2, perform mountpoint cleanup and exit task + error handling ++function umount_url() { + local url=$1 + local mountpoint=$2 ++ local scheme + +- case $(url_scheme $url) in +- (null|tape|file|rsync|fish|ftp|ftps|hftp|http|https|sftp) +- ### Don't need to umount anything for these ++ scheme=$( url_scheme $url ) ++ ++ # The cases where we return 0 are those that do not need umount and also do not need ExitTask handling. ++ # They thus need to be kept in sync with mount_url() so that RemoveExitTasks is used ++ # iff AddExitTask was used in mount_url(). ++ ++ if ! scheme_supports_filesystem $scheme ; then ++ ### Stuff like null|tape|rsync|fish|ftp|ftps|hftp|http|https|sftp ++ ### Don't need to umount anything for these. ++ ### file: supports filesystem access, but is not mounted and unmounted, ++ ### so it has to be handled specially below. ++ ### Similarly for iso: which gets mounted and unmounted only during recovery. ++ return 0 ++ fi ++ ++ case $scheme in ++ (file) + return 0 + ;; + (iso) +@@ -581,42 +667,106 @@ umount_url() { + return 0 + fi + ;; +- (sshfs) +- umount_cmd="fusermount -u $mountpoint" +- ;; +- (davfs) +- umount_cmd="umount $mountpoint" +- # Wait for 3 sek. then remove the cache-dir /var/cache/davfs +- sleep 30 +- # ToDo: put in here the cache-dir from /etc/davfs2/davfs.conf +- # and delete only the just used cache +- #rm -rf /var/cache/davfs2/** +- rm -rf /var/cache/davfs2/*outputfs* +- +- ;; +- (var) +- local var=$(url_host $url) +- umount_cmd="${!var} $mountpoint" ++ (*) ++ # Schemes that actually need nontrivial umount are handled below. ++ # We do not handle them in the default branch because in the case of iso: ++ # it depends on the current workflow whether umount is needed or not. ++ : ++ esac + +- Log "Unmounting with '$umount_cmd'" +- $umount_cmd +- StopIfError "Unmounting failed." ++ # umount_url() is a wrapper that takes care of exit tasks and error handling and mountpoint cleanup. ++ # Therefore it also determines if exit task and mountpoint handling is required and returns early if not. ++ # The actual umount job is performed inside perform_umount_url(). ++ # We do not request lazy umount here because we want umount errors to be reliably reported. ++ perform_umount_url $url $mountpoint || Error "Unmounting '$mountpoint' failed." + +- RemoveExitTask "umount -f $v '$mountpoint' >&2" +- return 0 ++ RemoveExitTask "perform_umount_url '$url' '$mountpoint' lazy" ++ ++ remove_temporary_mountpoint '$mountpoint' && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'" ++ return 0 ++} ++ ++### Unmount url $1 at mountpoint $2 [ lazily if $3 is set to 'lazy' and normal unmount fails ] ++function perform_umount_url() { ++ local url=$1 ++ local mountpoint=$2 ++ local lazy=${3:-} ++ ++ if test $lazy ; then ++ if test $lazy != "lazy" ; then ++ BugError "lazy = $lazy, but it must have the value of 'lazy' or empty" ++ fi ++ fi ++ ++ case $(url_scheme $url) in ++ (sshfs) ++ # does ftpfs need this special case as well? ++ fusermount -u ${lazy:+'-z'} $mountpoint ++ ;; ++ (davfs) ++ umount_davfs $mountpoint $lazy ++ ;; ++ (var) ++ local var ++ var=$(url_host $url) ++ Log "Unmounting with '${!var} $mountpoint'" ++ # lazy unmount not supported with custom umount command ++ ${!var} $mountpoint + ;; ++ (*) ++ # usual umount command ++ umount_mountpoint $mountpoint $lazy + esac ++ # The switch above must be the last statement in this function and the umount commands must be ++ # the last commands (or part of) in each branch. This ensures proper exit code propagation ++ # to the caller even when set -e is used. ++} + +- umount_mountpoint $mountpoint +- StopIfError "Unmounting '$mountpoint' failed." ++### Helper which unmounts davfs mountpoint $1 and cleans up the cache, ++### performing lazy unmount if $2 = 'lazy' and normal unmount fails. ++function umount_davfs() { ++ local mountpoint=$1 ++ local lazy="${2:-}" + +- RemoveExitTask "umount -f $v '$mountpoint' >&2" +- return 0 ++ if test $lazy ; then ++ if test $lazy != "lazy" ; then ++ BugError "lazy = $lazy, but it must have the value of 'lazy' or empty" ++ fi ++ fi ++ ++ if umount_mountpoint $mountpoint ; then ++ # Wait for 3 sek. then remove the cache-dir /var/cache/davfs ++ sleep 30 ++ # TODO: put in here the cache-dir from /etc/davfs2/davfs.conf ++ # and delete only the just used cache ++ #rm -rf /var/cache/davfs2/** ++ rm -rf /var/cache/davfs2/*outputfs* ++ else ++ local retval=$? ++ ++ if test $lazy ; then ++ # try again to unmount lazily and this time do not delete the cache, it is still in use. ++ LogPrintError "davfs cache /var/cache/davfs2/*outputfs* needs to be cleaned up manually after the lazy unmount finishes" ++ umount_mountpoint_lazy $mountpoint ++ else ++ # propagate errors from umount ++ return $retval ++ fi ++ fi + } + +-### Unmount mountpoint $1 +-umount_mountpoint() { ++### Unmount mountpoint $1 [ lazily if $2 = 'lazy' ] ++### Default implementation for filesystems that don't need anything fancy ++### For special umount commands use perform_umount_url() ++function umount_mountpoint() { + local mountpoint=$1 ++ local lazy=${2:-} ++ ++ if test $lazy ; then ++ if test $lazy != "lazy" ; then ++ BugError "lazy = $lazy, but it must have the value of 'lazy' or empty" ++ fi ++ fi + + ### First, try a normal unmount, + Log "Unmounting '$mountpoint'" +@@ -636,7 +786,21 @@ umount_mountpoint() { + fi + + Log "Unmounting '$mountpoint' failed." +- return 1 ++ ++ if test $lazy ; then ++ umount_mountpoint_lazy $mountpoint ++ else ++ return 1 ++ fi ++} ++ ++### Unmount mountpoint $1 lazily ++### Preferably use "umount_mountpoint $mountpoint lazy", which attempts non-lazy unmount first. ++function umount_mountpoint_lazy() { ++ local mountpoint=$1 ++ ++ LogPrint "Directory $mountpoint still mounted - trying lazy umount" ++ umount $v -f -l $mountpoint >&2 + } + + # Change $1 to user input or leave default value on empty input +diff --git a/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh b/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh +index a43dff13..3e7512ee 100644 +--- a/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh ++++ b/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh +@@ -8,10 +8,12 @@ + if [[ ! -z "$PXE_TFTP_URL" ]] ; then + # E.g. PXE_TFTP_URL=nfs://server/export/nfs/tftpboot + local scheme=$( url_scheme $PXE_TFTP_URL ) +- local path=$( url_path $PXE_TFTP_URL ) +- mkdir -p $v "$BUILD_DIR/tftpbootfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/tftpbootfs'" +- AddExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" ++ ++ # We need filesystem access to the destination (schemes like ftp:// are not supported) ++ if ! scheme_supports_filesystem $scheme ; then ++ Error "Scheme $scheme for PXE output not supported, use a scheme that supports mounting (like nfs: )" ++ fi ++ + mount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs $BACKUP_OPTIONS + # However, we copy under $OUTPUT_PREFIX_PXE directory (usually HOSTNAME) to have different clients on one pxe server + PXE_TFTP_LOCAL_PATH=$BUILD_DIR/tftpbootfs +@@ -74,10 +76,6 @@ fi + if [[ ! -z "$PXE_TFTP_URL" ]] ; then + LogPrint "Copied kernel+initrd $( du -shc $KERNEL_FILE "$TMP_DIR/$REAR_INITRD_FILENAME" | tail -n 1 | tr -s "\t " " " | cut -d " " -f 1 ) to $PXE_TFTP_URL/$OUTPUT_PREFIX_PXE" + umount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs +- rmdir $BUILD_DIR/tftpbootfs >&2 +- if [[ $? -eq 0 ]] ; then +- RemoveExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" +- fi + else + # legacy way PXE_TFTP_PATH + LogPrint "Copied kernel+initrd $( du -shc $KERNEL_FILE "$TMP_DIR/$REAR_INITRD_FILENAME" | tail -n 1 | tr -s "\t " " " | cut -d " " -f 1 ) to $PXE_TFTP_PATH" +diff --git a/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh b/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh +index fce4bcf1..5041a3bc 100644 +--- a/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh ++++ b/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh +@@ -1,4 +1,4 @@ +-# 81_create_pxelinux_cfg.sh ++# 810_create_pxelinux_cfg.sh + # + # create pxelinux config on PXE server for Relax-and-Recover + # +@@ -11,10 +11,12 @@ if [[ ! -z "$PXE_CONFIG_URL" ]] ; then + # E.g. PXE_CONFIG_URL=nfs://server/export/nfs/tftpboot/pxelinux.cfg + # Better be sure that on 'server' the directory /export/nfs/tftpboot/pxelinux.cfg exists + local scheme=$( url_scheme $PXE_CONFIG_URL ) +- local path=$( url_path $PXE_CONFIG_URL ) +- mkdir -p $v "$BUILD_DIR/tftpbootfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/tftpbootfs'" +- AddExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" ++ ++ # We need filesystem access to the destination (schemes like ftp:// are not supported) ++ if ! scheme_supports_filesystem $scheme ; then ++ Error "Scheme $scheme for PXE output not supported, use a scheme that supports mounting (like nfs: )" ++ fi ++ + mount_url $PXE_CONFIG_URL $BUILD_DIR/tftpbootfs $BACKUP_OPTIONS + PXE_LOCAL_PATH=$BUILD_DIR/tftpbootfs + else +@@ -105,10 +107,6 @@ popd >/dev/null + if [[ ! -z "$PXE_CONFIG_URL" ]] ; then + LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_URL" + umount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs +- rmdir $BUILD_DIR/tftpbootfs >&2 +- if [[ $? -eq 0 ]] ; then +- RemoveExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" +- fi + else + LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_PATH" + # Add to result files +diff --git a/usr/share/rear/output/PXE/default/820_copy_to_net.sh b/usr/share/rear/output/PXE/default/820_copy_to_net.sh +deleted file mode 100644 +index 39cd316d..00000000 +--- a/usr/share/rear/output/PXE/default/820_copy_to_net.sh ++++ /dev/null +@@ -1,41 +0,0 @@ +- +-# 820_copy_to_net.sh +- +-# Check if we have a target location OUTPUT_URL +-test "$OUTPUT_URL" || return 0 +- +-local scheme=$( url_scheme $OUTPUT_URL ) +-local result_file="" +-local path="" +- +-case "$scheme" in +- (nfs|cifs|usb|tape|file|davfs) +- # The ISO has already been transferred by NETFS. +- return 0 +- ;; +- (fish|ftp|ftps|hftp|http|https|sftp) +- LogPrint "Transferring PXE files to $OUTPUT_URL" +- for result_file in "${RESULT_FILES[@]}" ; do +- path=$(url_path $OUTPUT_URL) +- +- # Make sure that destination directory exists, otherwise lftp would copy +- # RESULT_FILES into last available directory in the path. +- # e.g. OUTPUT_URL=sftp:///iso/server1 and have "/iso/server1" +- # directory missing, would upload RESULT_FILES into sftp:///iso/ +- lftp -c "$OUTPUT_LFTP_OPTIONS; open $OUTPUT_URL; mkdir -fp ${path}" +- +- LogPrint "Transferring file: $result_file" +- lftp -c "$OUTPUT_LFTP_OPTIONS; open $OUTPUT_URL; mput $result_file" || Error "lftp failed to transfer '$result_file' to '$OUTPUT_URL' (lftp exit code: $?)" +- done +- ;; +- (rsync) +- LogPrint "Transferring PXE files to $OUTPUT_URL" +- for result_file in "${RESULT_FILES[@]}" ; do +- LogPrint "Transferring file: $result_file" +- rsync -a $v "$result_file" "$OUTPUT_URL" || Error "Problem transferring '$result_file' to $OUTPUT_URL" +- done +- ;; +- (*) Error "Invalid scheme '$scheme' in '$OUTPUT_URL'." +- ;; +-esac +- +diff --git a/usr/share/rear/output/default/100_mount_output_path.sh b/usr/share/rear/output/default/100_mount_output_path.sh +index 22ef36de..34ea8e5e 100644 +--- a/usr/share/rear/output/default/100_mount_output_path.sh ++++ b/usr/share/rear/output/default/100_mount_output_path.sh +@@ -1,9 +1,3 @@ +-# create mount point +-mkdir -p $v "$BUILD_DIR/outputfs" >&2 +-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +-AddExitTask "rm -Rf $v $BUILD_DIR/outputfs >&2" +- + if [[ "$OUTPUT_MOUNTCMD" ]] ; then + OUTPUT_URL="var://$OUTPUT_MOUNTCMD" + fi +diff --git a/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh b/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh +index 00339a96..06326114 100644 +--- a/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh ++++ b/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh +@@ -3,22 +3,20 @@ + [ -z "${KEEP_OLD_OUTPUT_COPY}" ] && return + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(output_path $scheme $path) ++local scheme=$( url_scheme $OUTPUT_URL ) ++local path=$( url_path $OUTPUT_URL ) + +-# if $opath is empty return silently (e.g. scheme tape) +-[ -z "$opath" ] && return 0 ++# if filesystem access to url is unsupported return silently (e.g. scheme tape) ++scheme_supports_filesystem $scheme || return 0 ++ ++local opath=$( output_path $scheme $path ) + + # an old lockfile from a previous run not cleaned up by output is possible + [[ -f ${opath}/.lockfile ]] && rm -f ${opath}/.lockfile >&2 + + if test -d "${opath}" ; then +- rm -rf $v "${opath}.old" >&2 +- StopIfError "Could not remove '${opath}.old'" ++ rm -rf $v "${opath}.old" || Error "Could not remove '${opath}.old'" + # below statement was 'cp -af' instead of 'mv -f' (see issue #192) +- mv -f $v "${opath}" "${opath}.old" >&2 +- StopIfError "Could not move '${opath}'" ++ mv -f $v "${opath}" "${opath}.old" || Error "Could not move '${opath}'" + fi + # the ${BUILD_DIR}/outputfs/${OUTPUT_PREFIX} will be created by output/default/200_make_prefix_dir.sh +diff --git a/usr/share/rear/output/default/200_make_prefix_dir.sh b/usr/share/rear/output/default/200_make_prefix_dir.sh +index b8892f2f..606e1c86 100644 +--- a/usr/share/rear/output/default/200_make_prefix_dir.sh ++++ b/usr/share/rear/output/default/200_make_prefix_dir.sh +@@ -3,25 +3,21 @@ + # The $OUTPUT_PREFIX directory defaults to $HOSTNAME. + # + # This happens usually under a mounted network filesystem share +-# e.g. in case of BACKUP_URL=nfs://NFS.server.IP.address/remote/nfs/share +-# but it is also happens for local stuff like BACKUP_URL=usb:///dev/disk/by-label/REAR-000 ++# e.g. in case of OUTPUT_URL=nfs://NFS.server.IP.address/remote/nfs/share ++# but it is also happens for local stuff like OUTPUT_URL=usb:///dev/disk/by-label/REAR-000 + # + # Do not do this for tapes and special attention for file:///path ++local scheme=$( url_scheme $OUTPUT_URL ) ++local path=$( url_path $OUTPUT_URL ) + +-# Generate url variable name that depends on the current stage, +-# e.g. BACKUP_URL or OUTPUT_URL: +-url="$( echo $stage | tr '[:lower:]' '[:upper:]' )_URL" ++# If filesystem access to url is unsupported return silently (e.g. scheme tape) ++scheme_supports_filesystem $scheme || return 0 + +-local scheme=$( url_scheme ${!url} ) +-local path=$( url_path ${!url} ) + local opath=$( output_path $scheme $path ) + +-# If $opath is empty return silently (e.g. scheme tape): +-test "$opath" || return 0 +- + # Create $OUTPUT_PREFIX sub-directory: + mkdir -p $v -m0750 "$opath" && return + +-# A failure to cerate the $OUTPUT_PREFIX sub-directory is fatal: +-Error "Failed to create '$opath' directory for $url=${!url}" ++# A failure to create the $OUTPUT_PREFIX sub-directory is fatal: ++Error "Failed to create '$opath' directory for OUTPUT_URL=$OUTPUT_URL" + +diff --git a/usr/share/rear/output/default/250_create_lock.sh b/usr/share/rear/output/default/250_create_lock.sh +index 49c75601..d792b036 100644 +--- a/usr/share/rear/output/default/250_create_lock.sh ++++ b/usr/share/rear/output/default/250_create_lock.sh +@@ -2,15 +2,14 @@ + # made by a previous mkrescue run when the variable KEEP_OLD_OUTPUT_COPY has been set + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(output_path $scheme $path) ++local scheme=$( url_scheme $OUTPUT_URL ) ++local path=$( url_path $OUTPUT_URL ) + +-# if $opath is empty return silently (e.g. scheme tape) +-[ -z "$opath" ] && return 0 ++# if filesystem access to url is unsupported return silently (e.g. scheme tape) ++scheme_supports_filesystem $scheme || return 0 ++ ++local opath=$( output_path $scheme $path ) + + if test -d "${opath}" ; then +- > "${opath}/.lockfile" +- StopIfError "Could not create '${opath}/.lockfile'" ++ > "${opath}/.lockfile" || Error "Could not create '${opath}/.lockfile'" + fi +diff --git a/usr/share/rear/output/default/950_copy_result_files.sh b/usr/share/rear/output/default/950_copy_result_files.sh +index 545b3f7d..77f54d51 100644 +--- a/usr/share/rear/output/default/950_copy_result_files.sh ++++ b/usr/share/rear/output/default/950_copy_result_files.sh +@@ -5,16 +5,25 @@ + + # For example for "rear mkbackuponly" there are usually no result files + # that would need to be copied here to the output location: +-test "$RESULT_FILES" || return 0 ++test "${RESULT_FILES[*]:-}" || return 0 + + local scheme=$( url_scheme $OUTPUT_URL ) + local host=$( url_host $OUTPUT_URL ) + local path=$( url_path $OUTPUT_URL ) +-local opath=$( output_path $scheme $path ) + +-# if $opath is empty return silently (e.g. scheme tape) +-if [[ -z "$opath" || -z "$OUTPUT_URL" || "$scheme" == "obdr" || "$scheme" == "tape" ]] ; then +- return 0 ++if [ -z "$OUTPUT_URL" ] || ! scheme_accepts_files $scheme ; then ++ if [ "$scheme" == "null" -o -z "$OUTPUT_URL" ] ; then ++ # There are result files to copy, but OUTPUT_URL=null indicates that we are not interested in them ++ # TODO: empty OUTPUT_URL seems to be equivalent to null, should we continue to allow that, ++ # or enforce setting it explicitly? ++ return 0 ++ else ++ # There are files to copy, but schemes like tape: do not allow files to be stored. The files would be lost. ++ # Do not allow that. ++ # Schemes like obdr: that store the results themselves should clear RESULT_FILES to indicate that nothing is to be done. ++ # Is this considered a bug in ReaR (BugError), or a user misconfiguration (Error) when this happens? ++ BugError "Output scheme $scheme does not accept result files ${RESULT_FILES[*]}, use OUTPUT_URL=null if you don't want to copy them anywhere." ++ fi + fi + + LogPrint "Copying resulting files to $scheme location" +@@ -38,66 +47,76 @@ RESULT_FILES+=( "$TMP_DIR/$final_logfile_name" ) + LogPrint "Saving $RUNTIME_LOGFILE as $final_logfile_name to $scheme location" + + # The real work (actually copying resulting files to the output location): ++if scheme_supports_filesystem $scheme ; then ++ # We can access the destination as a mounted filesystem. Do nothing special, ++ # simply copy the output files there. (Covers stuff like nfs|cifs|usb|file|sshfs|ftpfs|davfs.) ++ # This won't work for iso:// , but iso can't be a OUTPUT_URL scheme, this is checked in ++ # prep/default/040_check_backup_and_output_scheme.sh ++ # This covers also unknown schemes, because mount_url() will attempt to mount them and fail if this is not possible, ++ # so if we got here, the URL had been mounted successfully. ++ local opath ++ opath=$( output_path $scheme $path ) ++ LogPrint "Copying result files '${RESULT_FILES[*]}' to $opath at $scheme location" ++ # Copy each result file one by one to avoid usually false error exits as in ++ # https://github.com/rear/rear/issues/1711#issuecomment-380009044 ++ # where in case of an improper RESULT_FILES array member 'cp' can error out with something like ++ # cp: will not overwrite just-created '/tmp/rear.XXX/outputfs/f121/rear-f121.log' with '/tmp/rear.XXX/tmp/rear-f121.log' ++ # See ++ # https://stackoverflow.com/questions/4669420/have-you-ever-got-this-message-when-moving-a-file-mv-will-not-overwrite-just-c ++ # which is about the same for 'mv', how to reproduce it: ++ # mkdir a b c ++ # touch a/f b/f ++ # mv a/f b/f c/ ++ # mv: will not overwrite just-created 'c/f' with 'b/f' ++ # It happens because two different files with the same name would be moved to the same place with only one command. ++ # The -f option won't help for this case, it only applies when there already is a target file that will be overwritten. ++ # Accordingly it is sufficient (even without '-f') to copy each result file one by one: ++ for result_file in "${RESULT_FILES[@]}" ; do ++ ++ # note: s390 kernel copy is only through nfs ++ # ++ # s390 optional naming override of initrd and kernel to match the s390 filesytem naming conventions ++ # on s390a there is an option to name the initrd and kernel in the form of ++ # file name on s390 are in the form of name type mode ++ # the name is the userid or vm name and the type is initrd or kernel ++ # if the vm name (cp q userid) is HOSTA then the files written will be HOSTA kernel and HOSTA initrd ++ # vars needed: ++ # ZVM_NAMING - set in local.conf, if Y then enable naming override ++ # ZVM_KERNEL_NAME - keeps track of kernel name in results array ++ # ARCH - override only if ARCH is Linux-s390 ++ # ++ # initrd name override is handled in 900_create_initramfs.sh ++ # kernel name override is handled in 400_guess_kernel.sh ++ # kernel name override is handled in 950_copy_result_files.sh ++ ++ if [[ "$ZVM_NAMING" == "Y" && "$ARCH" == "Linux-s390" ]] ; then ++ if [[ -z $opath ]] ; then ++ Error "Output path is not set, please check OUTPUT_URL in local.conf." ++ fi ++ ++ if [ "$ZVM_KERNEL_NAME" == "$result_file" ] ; then ++ VM_UID=$(vmcp q userid |awk '{ print $1 }') ++ ++ if [[ -z $VM_UID ]] ; then ++ Error "VM UID is not set, VM UID is set from call to vmcp. Please make sure vmcp is available and 'vmcp q userid' returns VM ID" ++ fi ++ ++ LogPrint "s390 kernel naming override: $result_file will be written as $VM_UID.kernel" ++ cp $v "$result_file" $opath/$VM_UID.kernel || Error "Could not copy result file $result_file to $opath/$VM_UID.kernel at $scheme location" ++ else ++ cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location" ++ fi ++ else ++ cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location" ++ fi ++ done ++ ++ return 0 ++fi ++ ++# Filesystem access to output destination not supported, use a scheme-specific tool (rsync, lftp) + case "$scheme" in +- (nfs|cifs|usb|file|sshfs|ftpfs|davfs) +- LogPrint "Copying result files '${RESULT_FILES[@]}' to $opath at $scheme location" +- # Copy each result file one by one to avoid usually false error exits as in +- # https://github.com/rear/rear/issues/1711#issuecomment-380009044 +- # where in case of an improper RESULT_FILES array member 'cp' can error out with something like +- # cp: will not overwrite just-created '/tmp/rear.XXX/outputfs/f121/rear-f121.log' with '/tmp/rear.XXX/tmp/rear-f121.log' +- # See +- # https://stackoverflow.com/questions/4669420/have-you-ever-got-this-message-when-moving-a-file-mv-will-not-overwrite-just-c +- # which is about the same for 'mv', how to reproduce it: +- # mkdir a b c +- # touch a/f b/f +- # mv a/f b/f c/ +- # mv: will not overwrite just-created 'c/f' with 'b/f' +- # It happens because two different files with the same name would be moved to the same place with only one command. +- # The -f option won't help for this case, it only applies when there already is a target file that will be overwritten. +- # Accordingly it is sufficient (even without '-f') to copy each result file one by one: +- for result_file in "${RESULT_FILES[@]}" ; do +- +- # note: s390 kernel copy is only through nfs +- # +- # s390 optional naming override of initrd and kernel to match the s390 filesytem naming conventions +- # on s390a there is an option to name the initrd and kernel in the form of +- # file name on s390 are in the form of name type mode +- # the name is the userid or vm name and the type is initrd or kernel +- # if the vm name (cp q userid) is HOSTA then the files written will be HOSTA kernel and HOSTA initrd +- # vars needed: +- # ZVM_NAMING - set in local.conf, if Y then enable naming override +- # ZVM_KERNEL_NAME - keeps track of kernel name in results array +- # ARCH - override only if ARCH is Linux-s390 +- # +- # initrd name override is handled in 900_create_initramfs.sh +- # kernel name override is handled in 400_guess_kernel.sh +- # kernel name override is handled in 950_copy_result_files.sh +- +- if [[ "$ZVM_NAMING" == "Y" && "$ARCH" == "Linux-s390" ]] ; then +- if [[ -z $opath ]] ; then +- Error "Output path is not set, please check OUTPUT_URL in local.conf." +- fi +- +- if [ "$ZVM_KERNEL_NAME" == "$result_file" ] ; then +- VM_UID=$(vmcp q userid |awk '{ print $1 }') +- +- if [[ -z $VM_UID ]] ; then +- Error "VM UID is not set, VM UID is set from call to vmcp. Please make sure vmcp is available and 'vmcp q userid' returns VM ID" +- fi +- +- LogPrint "s390 kernel naming override: $result_file will be written as $VM_UID.kernel" +- cp $v "$result_file" $opath/$VM_UID.kernel || Error "Could not copy result file $result_file to $opath/$VM_UID.kernel at $scheme location" +- else +- cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location" +- fi +- else +- cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location" +- fi +- done +- ;; + (fish|ftp|ftps|hftp|http|https|sftp) +- # FIXME: Verify if usage of $array[*] instead of "${array[@]}" is actually intended here +- # see https://github.com/rear/rear/issues/1068 + LogPrint "Copying result files '${RESULT_FILES[*]}' to $scheme location" + Log "lftp -c $OUTPUT_LFTP_OPTIONS; open $OUTPUT_URL; mput ${RESULT_FILES[*]}" + +@@ -111,12 +130,15 @@ case "$scheme" in + (rsync) + # If BACKUP = RSYNC output/RSYNC/default/900_copy_result_files.sh took care of it: + test "$BACKUP" = "RSYNC" && return 0 +- LogPrint "Copying result files '${RESULT_FILES[@]}' to $scheme location" +- Log "rsync -a $v ${RESULT_FILES[@]} ${host}:${path}" ++ LogPrint "Copying result files '${RESULT_FILES[*]}' to $scheme location" ++ Log "rsync -a $v ${RESULT_FILES[*]} ${host}:${path}" + rsync -a $v "${RESULT_FILES[@]}" "${host}:${path}" || Error "Problem transferring result files to $OUTPUT_URL" + ;; + (*) +- Error "Invalid scheme '$scheme' in '$OUTPUT_URL'." ++ # Should be unreachable, if we got here, it is a bug. ++ # Unknown schemes are handled in mount_url(), which tries to mount them and aborts if they are unsupported. ++ # If they can be mounted, they fall under the scheme_supports_filesystem branch above. ++ BugError "Invalid scheme '$scheme' in '$OUTPUT_URL'." + ;; + esac + +diff --git a/usr/share/rear/output/default/970_remove_lock.sh b/usr/share/rear/output/default/970_remove_lock.sh +index 56640839..3b1b97cc 100644 +--- a/usr/share/rear/output/default/970_remove_lock.sh ++++ b/usr/share/rear/output/default/970_remove_lock.sh +@@ -1,10 +1,11 @@ + # remove the lockfile + local scheme=$(url_scheme $OUTPUT_URL) + local path=$(url_path $OUTPUT_URL) +-local opath=$(output_path $scheme $path) + +-# if $opath is empty return silently (e.g. scheme tape) +-[ -z "$opath" ] && return 0 ++# if filesystem access to url is unsupported return silently (e.g. scheme tape) ++scheme_supports_filesystem $scheme || return 0 ++ ++local opath=$( output_path $scheme $path ) + + # when OUTPUT_URL=BACKUP_URL we keep the lockfile to avoid double moves of the directory + [[ "$OUTPUT_URL" != "$BACKUP_URL" ]] && rm -f $v "${opath}/.lockfile" >&2 +diff --git a/usr/share/rear/output/default/980_umount_output_dir.sh b/usr/share/rear/output/default/980_umount_output_dir.sh +index 9a9995bd..abf0cd53 100644 +--- a/usr/share/rear/output/default/980_umount_output_dir.sh ++++ b/usr/share/rear/output/default/980_umount_output_dir.sh +@@ -9,12 +9,3 @@ if [[ -z "$OUTPUT_URL" ]] ; then + fi + + umount_url $OUTPUT_URL $BUILD_DIR/outputfs +- +-[[ -d $BUILD_DIR/outputfs/$NETFS_PREFIX ]] && rm -rf $v $BUILD_DIR/outputfs/$NETFS_PREFIX +-[[ -d $BUILD_DIR/outputfs/$RSYNC_PREFIX ]] && rm -rf $v $BUILD_DIR/outputfs/$RSYNC_PREFIX +- +-rmdir $v $BUILD_DIR/outputfs >&2 +-if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rm -Rf $v $BUILD_DIR/outputfs >&2" +-fi +diff --git a/usr/share/rear/prep/BORG/default/250_mount_usb.sh b/usr/share/rear/prep/BORG/default/250_mount_usb.sh +index c13fd088..05be0179 100644 +--- a/usr/share/rear/prep/BORG/default/250_mount_usb.sh ++++ b/usr/share/rear/prep/BORG/default/250_mount_usb.sh +@@ -8,10 +8,5 @@ + # When BORGBACKUP_HOST is set, we don't need to mount anything as SSH + # backup destination will be handled internally by Borg it self. + if [[ -z $BORGBACKUP_HOST ]]; then +- # Has to be $verbose, not "$verbose", since it's used as option. +- # shellcheck disable=SC2086,SC2154 +- mkdir -p $verbose "$borg_dst_dev" >&2 +- StopIfError "Could not mkdir '$borg_dst_dev'" +- + mount_url "usb://$USB_DEVICE" "$borg_dst_dev" + fi +diff --git a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh +deleted file mode 100644 +index 2fbcc6cd..00000000 +--- a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh ++++ /dev/null +@@ -1,300 +0,0 @@ +-# Copied from ../../NETFS/default/070_set_backup_archive.sh for YUM +-### Determine the name of the backup archive +-### This needs to be after we special case USB devices. +- +-# FIXME: backuparchive is no local variable (regardless that it is lowercased) +- +-# If TAPE_DEVICE is specified, use that: +-if test "$TAPE_DEVICE" ; then +- backuparchive="$TAPE_DEVICE" +- LogPrint "Using backup archive '$backuparchive'" +- return +-fi +- +-local backup_file_suffix="$BACKUP_PROG_SUFFIX$BACKUP_PROG_COMPRESS_SUFFIX" +-local backup_file_name="$BACKUP_PROG_ARCHIVE$backup_file_suffix" +- +-local scheme=$( url_scheme $BACKUP_URL ) +-local path=$( url_path $BACKUP_URL ) +-case "$scheme" in +- (file|iso) +- # Define the output path according to the scheme +- local outputpath=$( backup_path $scheme $path ) +- backuparchive="$outputpath/$backup_file_name" +- LogPrint "Using backup archive '$backuparchive'" +- return +- ;; +- (tape) +- # TODO: Check if that case is really needed. +- # Perhaps prep/default/030_translate_tape.sh does already all what is needed. +- backuparchive=$path +- LogPrint "Using backup archive '$backuparchive'" +- return +- ;; +-esac +- +-local backup_directory=$BUILD_DIR/outputfs/$NETFS_PREFIX +- +-# Normal (i.e. non-incremental/non-differential) backup: +-if ! test "incremental" = "$BACKUP_TYPE" -o "differential" = "$BACKUP_TYPE" ; then +- # In case of normal (i.e. non-incremental) backup there is only one restore archive +- # and its name is the same as the backup archive (usually 'backup.tar.gz'): +- backuparchive="$backup_directory/$backup_file_name" +- LogPrint "Using backup archive '$backuparchive'" +- # This script is also run during "rear recover/restoreonly" where RESTORE_ARCHIVES must be set. +- local backup_restore_workflows=( "recover" "restoreonly" ) +- if IsInArray $WORKFLOW ${backup_restore_workflows[@]} ; then +- # Only set RESTORE_ARCHIVES the backup archive is actually accessible +- # cf. https://github.com/rear/rear/issues/1166 +- if test -r "$backuparchive" ; then +- RESTORE_ARCHIVES=( "$backuparchive" ) +- else +- # In case of USB backup there is the subsequent 540_choose_backup_archive.sh script +- # that shows a backup selection dialog when RESTORE_ARCHIVES is not already set. +- if test "usb" = "$scheme" ; then +- LogPrint "Backup archive '$backuparchive' not readable. Need to select another one." +- else +- Error "Backup archive '$backuparchive' not readable." +- fi +- fi +- fi +- return +-fi +- +-# Incremental or differential backup: +-set -e -u -o pipefail +-# Incremental or differential backup only works for the NETFS backup method +-# and only with the 'tar' backup program: +-if ! test "NETFS" = "$BACKUP" -a "tar" = "$BACKUP_PROG" ; then +- Error "BACKUP_TYPE incremental or differential only works with BACKUP=NETFS and BACKUP_PROG=tar" +-fi +-# Incremental or differential backup is currently only known to work with BACKUP_URL=nfs://. +-# Other BACKUP_URL schemes may work and at least BACKUP_URL=usb:///... needs special setup +-# to work with incremental or differential backup (see https://github.com/rear/rear/issues/1145): +-if test "usb" = "$scheme" ; then +- # When USB_SUFFIX is set the compliance mode is used where +- # backup on USB works in compliance with backup on NFS which means +- # a fixed backup directory where incremental or differential backups work. +- # Use plain $USB_SUFFIX and not "$USB_SUFFIX" because when USB_SUFFIX contains only blanks +- # test "$USB_SUFFIX" would result true because test " " results true: +- test $USB_SUFFIX || Error "BACKUP_TYPE incremental or differential requires USB_SUFFIX for BACKUP_URL=usb" +-fi +-# Incremental or differential backup and keeping old backup contradict each other (mutual exclusive) +-# so that NETFS_KEEP_OLD_BACKUP_COPY must not be 'true' in case of incremental or differential backup: +-if test "$NETFS_KEEP_OLD_BACKUP_COPY" ; then +- NETFS_KEEP_OLD_BACKUP_COPY="" +- LogPrint "Disabled NETFS_KEEP_OLD_BACKUP_COPY because BACKUP_TYPE incremental or differential does not work with that" +-fi +-# For incremental or differential backup some date values (weekday, YYYY-MM-DD, HHMM) are needed +-# that must be consistent for one single point of the current time which means +-# one cannot call the 'date' command several times because then there would be +-# a small probability that e.g. weekday, YYYY-MM-DD, HHMM do not match +-# one single point in time (in particular when midnight passes in between). +-# Therefore the output of one single 'date' call is storend in an array and +-# the array elements are then assinged to individual variables as needed: +-local current_date_output=( $( date '+%a %Y-%m-%d %H%M' ) ) +-local current_weekday="${current_date_output[0]}" +-local current_yyyy_mm_dd="${current_date_output[1]}" +-local current_hhmm="${current_date_output[2]}" +-# The date FULLBACKUP_OUTDATED_DAYS ago is needed to check if the latest full backup is too old. +-# When the latest full backup is more than FULLBACKUP_OUTDATED_DAYS ago a new full backup is made. +-# This separated call of the 'date' command which is technically needed because it is +-# for another point in time (e.g. 7 days ago) is run after the above call of the 'date' +-# command for the current time to be on the safe side when midnight passes in between +-# both 'date' commands which would then result that a new full backup is made +-# when the latest full backup is basically right now FULLBACKUP_OUTDATED_DAYS ago because +-# the stored date of the latest full backup is the current date at the time when it was made. +-# Example (assuming FULLBACKUP_OUTDATED_DAYS=7 ): +-# The latest full backup was made on Sunday January 10 in 2016 (just before midnight). +-# One week later this script runs again while midnight passes between the two 'date' calls +-# so that current_date_output[@]="Sun 2016-01-17 0000" (still Sunday January 17 in 2016) +-# and yyyymmdd_max_days_ago=20160111 (already Monday January 11 in 2016), then +-# Sunday January 10 is older than Monday January 11 so that a new full backup is made: +-test "$FULLBACKUP_OUTDATED_DAYS" || FULLBACKUP_OUTDATED_DAYS="7" +-local yyyymmdd_max_days_ago=$( date '+%Y%m%d' --date="$FULLBACKUP_OUTDATED_DAYS days ago" ) +-# Full backup file names are of the form YYYY-MM-DD-HHMM-F.tar.gz +-# where the 'F' denotes a full backup: +-local full_backup_marker="F" +-# Incremental backup file names are of the form YYYY-MM-DD-HHMM-I.tar.gz +-# where the 'I' denotes an incremental backup: +-local incremental_backup_marker="I" +-# Differential backup file names are of the form YYYY-MM-DD-HHMM-D.tar.gz +-# where the last 'D' denotes a differential backup: +-local differential_backup_marker="D" +-# In case of incremental or differential backup the RESTORE_ARCHIVES contains +-# first the latest full backup file. +-# In case of incremental backup the RESTORE_ARCHIVES contains +-# after the latest full backup file each incremental backup +-# in the ordering how they must be restored. +-# For example when the latest full backup was made on Sunday +-# plus each subsequent weekday a separated incremental backup was made, +-# then during a "rear recover" on Wednesday morning +-# first the full backup from Sunday has to be restored, +-# then the incremental backup from Monday, and +-# finally the incremental backup from Tuesday. +-# In case of differential backup the RESTORE_ARCHIVES contains +-# after the latest full backup file the latest differential backup. +-# For example when the latest full backup was made on Sunday +-# plus each subsequent weekday a separated differential backup was made, +-# then during a "rear recover" on Wednesday morning +-# first the full backup from Sunday has to be restored, +-# and finally the differential backup from Tuesday +-# (i.e. the differential backup from Monday is skipped). +-# The date format YYYY-MM-DD that is used here is crucial. +-# It is the ISO 8601 format 'year-month-day' to specify a day of a year +-# that is accepted by 'tar' for the '--newer' option, +-# see the GNU tar manual section "Operating Only on New Files" +-# at https://www.gnu.org/software/tar/manual/html_node/after.html +-# and the GNU tar manual section "Calendar date items" +-# at https://www.gnu.org/software/tar/manual/html_node/Calendar-date-items.html#SEC124 +-local date_glob_regex="[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]" +-local date_time_glob_regex="$date_glob_regex-[0-9][0-9][0-9][0-9]" +-# Determine what kind of backup must be created, 'full' or 'incremental' or 'differential' +-# (the empty default means it is undecided what kind of backup must be created): +-local create_backup_type="" +-# Code regarding creating a backup is useless during "rear recover" and +-# messages about creating a backup are misleading during "rear recover": +-local recovery_workflows=( "recover" "layoutonly" "restoreonly" ) +-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # When today is a specified full backup day, do a full backup in any case +- # (regardless if there is already a full backup of this day): +- if IsInArray "$current_weekday" "${FULLBACKUPDAY[@]}" ; then +- create_backup_type="full" +- LogPrint "Today's weekday ('$current_weekday') is a full backup day that triggers a new full backup in any case" +- fi +-fi +-# Get the latest full backup (if exists): +-local full_backup_glob_regex="$date_time_glob_regex-$full_backup_marker$backup_file_suffix" +-# Here things like 'find /path/to/dir -name '*.tar.gz' | sort' are used because +-# one cannot use bash globbing via commands like 'ls /path/to/dir/*.tar.gz' +-# because /usr/sbin/rear sets the nullglob bash option which leads to plain 'ls' +-# when '/path/to/dir/*.tar.gz' matches nothing (i.e. when no backup file exists) +-# so that then plain 'ls' would result nonsense. +-local latest_full_backup=$( find $backup_directory -name "$full_backup_glob_regex" | sort | tail -n1 ) +-# A latest full backup is found: +-if test "$latest_full_backup" ; then +- local latest_full_backup_file_name=$( basename "$latest_full_backup" ) +- # The full_or_incremental_backup_glob_regex is also needed below for non-"recover" WORKFLOWs +- # to set the right variables for creating an incremental backup: +- local full_or_incremental_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$incremental_backup_marker]$backup_file_suffix" +- # Code regarding creating a backup is useless during "rear recover" and +- # messages about creating a backup are misleading during "rear recover": +- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # There is nothing to do here if it is already decided that +- # a full backup must be created (see "full backup day" above"): +- if ! test "full" = "$create_backup_type" ; then +- local latest_full_backup_date=$( echo $latest_full_backup_file_name | grep -o "$date_glob_regex" ) +- local yyyymmdd_latest_full_backup=$( echo $latest_full_backup_date | tr -d '-' ) +- # Check if the latest full backup is too old: +- if test $yyyymmdd_latest_full_backup -lt $yyyymmdd_max_days_ago ; then +- create_backup_type="full" +- LogPrint "Latest full backup date '$latest_full_backup_date' too old (more than $FULLBACKUP_OUTDATED_DAYS days ago) triggers new full backup" +- else +- # When a latest full backup is found that is not too old +- # a BACKUP_TYPE (incremental or differential) backup will be created: +- create_backup_type="$BACKUP_TYPE" +- LogPrint "Latest full backup found ($latest_full_backup_file_name) triggers $BACKUP_TYPE backup" +- fi +- fi +- else +- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: +- case "$BACKUP_TYPE" in +- (incremental) +- # When a latest full backup is found use that plus all later incremental backups for restore: +- # The following command is a bit tricky: +- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-I.tar.gz files in the backup directory and sorts them +- # and finally it outputs only those that match the latest full backup file name and incremental backups that got sorted after that +- # where it is mandatory that the backup file names sort by date (i.e. date must be the leading part of the backup file names): +- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" ) ) +- ;; +- (differential) +- # For differential backup use the latest full backup plus the one latest differential backup for restore: +- # The following command is a bit tricky: +- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-D.tar.gz files in the backup directory and sorts them +- # then it outputs only those that match the latest full backup file name and all differential backups that got sorted after that +- # and then it outputs only the first line (i.e. the full backup) and the last line (i.e. the latest differential backup) +- # but when no differential backup exists (i.e. when only the full backup exists) the first line is also the last line +- # so that "sed -n -e '1p;$p'" outputs the full backup twice which is corrected by the final "sort -u": +- local full_or_differential_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$differential_backup_marker]$backup_file_suffix" +- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_differential_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | sed -n -e '1p;$p' | sort -u ) ) +- ;; +- (*) +- BugError "Unexpected BACKUP_TYPE '$BACKUP_TYPE'" +- ;; +- esac +- # Tell the user what will be restored: +- local restore_archives_file_names="" +- for restore_archive in "${RESTORE_ARCHIVES[@]}" ; do +- restore_archives_file_names="$restore_archives_file_names $( basename "$restore_archive" )" +- done +- LogPrint "For backup restore using $restore_archives_file_names" +- fi +-# No latest full backup is found: +-else +- # Code regarding creating a backup is useless during "rear recover" and +- # messages about creating a backup are misleading during "rear recover": +- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # If no latest full backup is found create one during "rear mkbackup": +- create_backup_type="full" +- LogPrint "No full backup found (YYYY-MM-DD-HHMM-F.tar.gz) triggers full backup" +- else +- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: +- # If no latest full backup is found (i.e. no file name matches the YYYY-MM-DD-HHMM-F.tar.gz form) +- # fall back to what is done in case of normal (i.e. non-incremental/non-differential) backup +- # and hope for the best (i.e. that a backup_directory/backup_file_name actually exists). +- # In case of normal (i.e. non-incremental/non-differential) backup there is only one restore archive +- # and its name is the same as the backup archive (usually 'backup.tar.gz'). +- # This is only a fallback setting to be more on the safe side for "rear recover". +- # Initially for the very fist run of incremental backup during "rear mkbackup" +- # a full backup file of the YYYY-MM-DD-HHMM-F.tar.gz form will be created. +- RESTORE_ARCHIVES=( "$backup_directory/$backup_file_name" ) +- LogPrint "Using $backup_file_name for backup restore" +- fi +-fi +-# Code regarding creating a backup is useless during "rear recover" and +-# messages about creating a backup are misleading during "rear recover": +-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # Set the right variables for creating a backup (but do not actually do anything at this point): +- case "$create_backup_type" in +- (full) +- local new_full_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$full_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_full_backup_file_name" +- BACKUP_PROG_CREATE_NEWER_OPTIONS="-V $new_full_backup_file_name" +- LogPrint "Performing full backup using backup archive '$new_full_backup_file_name'" +- ;; +- (incremental) +- local new_incremental_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$incremental_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_incremental_backup_file_name" +- # Get the latest latest incremental backup that is based on the latest full backup (if exists): +- local incremental_backup_glob_regex="$date_time_glob_regex-$incremental_backup_marker$backup_file_suffix" +- # First get the latest full backup plus all later incremental backups (cf. how RESTORE_ARCHIVES is set in case of incremental backup) +- # then grep only the incremental backups and from the incremental backups use only the last one (if exists): +- local latest_incremental_backup=$( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | grep "$incremental_backup_glob_regex" | tail -n1 ) +- if test "$latest_incremental_backup" ; then +- # A latest incremental backup that is based on the latest full backup is found: +- local latest_incremental_backup_file_name=$( basename $latest_incremental_backup ) +- LogPrint "Latest incremental backup found ($latest_incremental_backup_file_name) that is newer than the latest full backup" +- local latest_incremental_backup_date=$( echo $latest_incremental_backup_file_name | grep -o "$date_glob_regex" ) +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_incremental_backup_date -V $latest_incremental_backup_file_name" +- LogPrint "Performing incremental backup for files newer than $latest_incremental_backup_date using backup archive '$new_incremental_backup_file_name'" +- else +- # When there is not yet an incremental backup that is based on the latest full backup +- # the new created incremental backup must be based on the latest full backup: +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" +- LogPrint "Performing incremental backup for files newer than $latest_full_backup_date using backup archive '$new_incremental_backup_file_name'" +- fi +- ;; +- (differential) +- local new_differential_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$differential_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_differential_backup_file_name" +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" +- LogPrint "Performing differential backup for files newer than $latest_full_backup_date using backup archive '$new_differential_backup_file_name'" +- ;; +- (*) +- BugError "Unexpected create_backup_type '$create_backup_type'" +- ;; +- esac +-fi +-# Go back from "set -e -u -o pipefail" to the defaults: +-apply_bash_flags_and_options_commands "$DEFAULT_BASH_FLAGS_AND_OPTIONS_COMMANDS" +- +diff --git a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh +new file mode 120000 +index 00000000..cdbdc31f +--- /dev/null ++++ b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh +@@ -0,0 +1 @@ ++../../NETFS/default/070_set_backup_archive.sh +\ No newline at end of file +diff --git a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh +deleted file mode 100644 +index 64b7a792..00000000 +--- a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# create mount point +-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then +- mkdir -p $v "$BUILD_DIR/outputfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +- AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- +- if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then +- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD" +- fi +- +- mount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs $BACKUP_DUPLICITY_NETFS_OPTIONS +- +- BACKUP_DUPLICITY_URL="file://$BUILD_DIR/outputfs" +-fi +diff --git a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh +new file mode 120000 +index 00000000..7f558c5d +--- /dev/null ++++ b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh +@@ -0,0 +1 @@ ++../../../backup/DUPLICITY/default/100_mount_duplicity_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh +deleted file mode 100644 +index 60aa811e..00000000 +--- a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# umount mountpoint +-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]; then +- +- if [[ "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]] ; then +- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_UMOUNTCMD" +- fi +- +- umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs +- +- rmdir $v $BUILD_DIR/outputfs >&2 +- if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- fi +-fi +diff --git a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh +new file mode 120000 +index 00000000..b7e47be1 +--- /dev/null ++++ b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh +@@ -0,0 +1 @@ ++../../../backup/DUPLICITY/default/980_unmount_duplicity_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh +deleted file mode 100644 +index 7de92af4..00000000 +--- a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh ++++ /dev/null +@@ -1,13 +0,0 @@ +-# Copied from ../../NETFS/default/100_mount_NETFS_path.sh a.k.a. ../../../backup/NETFS/default/100_mount_NETFS_path.sh for YUM +- +-# create mount point +-mkdir -p $v "$BUILD_DIR/outputfs" >&2 +-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +-AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- +-if [[ "$BACKUP_MOUNTCMD" ]] ; then +- BACKUP_URL="var://BACKUP_MOUNTCMD" +-fi +- +-mount_url $BACKUP_URL $BUILD_DIR/outputfs $BACKUP_OPTIONS +diff --git a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh +new file mode 120000 +index 00000000..60e0f83f +--- /dev/null ++++ b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh +@@ -0,0 +1 @@ ++../../NETFS/default/100_mount_NETFS_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh +deleted file mode 100644 +index d02dcf34..00000000 +--- a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# Copied from ../../../backup/NETFS/default/980_umount_NETFS_dir.sh for YUM +- +-# umount NETFS mountpoint +- +-if [[ "$BACKUP_UMOUNTCMD" ]] ; then +- BACKUP_URL="var://BACKUP_UMOUNTCMD" +-fi +- +-umount_url $BACKUP_URL $BUILD_DIR/outputfs +- +-rmdir $v $BUILD_DIR/outputfs >&2 +-if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +-fi +diff --git a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh +new file mode 120000 +index 00000000..2c29cb57 +--- /dev/null ++++ b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh +@@ -0,0 +1 @@ ++../../NETFS/default/980_umount_NETFS_dir.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh +deleted file mode 100644 +index 64b7a792..00000000 +--- a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# create mount point +-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then +- mkdir -p $v "$BUILD_DIR/outputfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +- AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- +- if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then +- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD" +- fi +- +- mount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs $BACKUP_DUPLICITY_NETFS_OPTIONS +- +- BACKUP_DUPLICITY_URL="file://$BUILD_DIR/outputfs" +-fi +diff --git a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh +new file mode 120000 +index 00000000..7f558c5d +--- /dev/null ++++ b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh +@@ -0,0 +1 @@ ++../../../backup/DUPLICITY/default/100_mount_duplicity_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh +deleted file mode 100644 +index 60aa811e..00000000 +--- a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# umount mountpoint +-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]; then +- +- if [[ "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]] ; then +- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_UMOUNTCMD" +- fi +- +- umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs +- +- rmdir $v $BUILD_DIR/outputfs >&2 +- if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- fi +-fi +diff --git a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh +new file mode 120000 +index 00000000..b7e47be1 +--- /dev/null ++++ b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh +@@ -0,0 +1 @@ ++../../../backup/DUPLICITY/default/980_unmount_duplicity_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh +deleted file mode 100644 +index cfd70026..00000000 +--- a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh ++++ /dev/null +@@ -1,116 +0,0 @@ +-# Copied from ../../../prep/NETFS/default/050_check_NETFS_requirements.sh for YUM +-# BACKUP_URL=[proto]://[host]/[share] +-# example: nfs://lucky/temp/backup +-# example: cifs://lucky/temp +-# example: usb:///dev/sdb1 +-# example: tape:///dev/nst0 +-# example: file:///path +-# example: iso://backup/ +-# example: sshfs://user@host/G/rear/ +-# example: ftpfs://user:password@host/rear/ (the password part is optional) +- +-[[ "$BACKUP_URL" || "$BACKUP_MOUNTCMD" ]] +-# FIXME: The above test does not match the error message below. +-# To match the the error message the test should be +-# [[ "$BACKUP_URL" || ( "$BACKUP_MOUNTCMD" && "$BACKUP_UMOUNTCMD" ) ]] +-# but I cannot decide if there is a subtle reason for the omission. +-StopIfError "You must specify either BACKUP_URL or BACKUP_MOUNTCMD and BACKUP_UMOUNTCMD !" +- +-if [[ "$BACKUP_URL" ]] ; then +- local scheme=$( url_scheme $BACKUP_URL ) +- local hostname=$( url_hostname $BACKUP_URL ) +- local path=$( url_path $BACKUP_URL ) +- +- ### check for vaild BACKUP_URL schemes +- ### see https://github.com/rear/rear/issues/842 +- case $scheme in +- (nfs|cifs|usb|tape|file|iso|sshfs|ftpfs) +- # do nothing for vaild BACKUP_URL schemes +- : +- ;; +- (*) +- Error "Invalid scheme '$scheme' in BACKUP_URL '$BACKUP_URL' valid schemes: nfs cifs usb tape file iso sshfs ftpfs" +- ;; +- esac +- +- ### set other variables from BACKUP_URL +- if [[ "usb" = "$scheme" ]] ; then +- # if USB_DEVICE is not explicitly specified it is the path from BACKUP_URL +- [[ -z "$USB_DEVICE" ]] && USB_DEVICE="$path" +- fi +- +- ### check if host is reachable +- if [[ "$PING" && "$hostname" ]] ; then +- # Only LogPrintIfError but no StopIfError because it is not a fatal error +- # (i.e. not a reason to abort) when a host does not respond to a 'ping' +- # because hosts can be accessible via certain ports but do not respond to a 'ping' +- # cf. https://bugzilla.opensuse.org/show_bug.cgi?id=616706 +- # TODO: it would be better to test if it is accessible via the actually needed port(s) +- ping -c 2 "$hostname" >/dev/null +- LogPrintIfError "Host '$hostname' in BACKUP_URL '$BACKUP_URL' does not respond to a 'ping'." +- else +- Log "Skipping 'ping' test for host '$hostname' in BACKUP_URL '$BACKUP_URL'" +- fi +- +-fi +- +-# some backup progs require a different backuparchive name +-case "$(basename $BACKUP_PROG)" in +- (rsync) +- # rsync creates a target directory instead of a file +- BACKUP_PROG_SUFFIX= +- BACKUP_PROG_COMPRESS_SUFFIX= +- ;; +- (*) +- : +- ;; +-esac +- +-# include required programs +-# the code below includes mount.* and umount.* programs for all non-empty schemes +-# (i.e. for any non-empty BACKUP_URL like usb tape file sshfs ftpfs) +-# and it includes 'mount.' for empty schemes (e.g. if BACKUP_URL is not set) +-# which is o.k. because it is a catch all rule so we do not miss any +-# important executable needed a certain scheme and it does not hurt +-# see https://github.com/rear/rear/pull/859 +-PROGS+=( +-showmount +-mount.$(url_scheme $BACKUP_URL) +-umount.$(url_scheme $BACKUP_URL) +-$( test "$BACKUP_MOUNTCMD" && echo "${BACKUP_MOUNTCMD%% *}" ) +-$( test "$BACKUP_UMOUNTCMD" && echo "${BACKUP_UMOUNTCMD%% *}" ) +-$BACKUP_PROG +-gzip +-bzip2 +-xz +-) +- +-# include required stuff for sshfs or ftpfs (via CurlFtpFS) +-if [[ "sshfs" = "$scheme" || "ftpfs" = "$scheme" ]] ; then +- # both sshfs and ftpfs (via CurlFtpFS) are based on FUSE +- PROGS+=( fusermount mount.fuse ) +- MODULES+=( fuse ) +- MODULES_LOAD+=( fuse ) +- COPY_AS_IS+=( /etc/fuse.conf ) +- # include what is specific for sshfs +- if [[ "sshfs" = "$scheme" ]] ; then +- # see http://sourceforge.net/apps/mediawiki/fuse/index.php?title=SshfsFaq +- REQUIRED_PROGS+=( sshfs ssh ) +- # relying on 500_ssh.sh to take a long the SSH related files +- fi +- # include what is specific for ftpfs +- if [[ "ftpfs" = "$scheme" ]] ; then +- # see http://curlftpfs.sourceforge.net/ +- # and https://github.com/rear/rear/issues/845 +- REQUIRED_PROGS+=( curlftpfs ) +- fi +-fi +- +-# include required modules, like nfs cifs ... +-# the code below includes modules for all non-empty schemes +-# (i.e. for any non-empty BACKUP_URL like usb tape file sshfs ftpfs) +-# which is o.k. because this must been seen as a catch all rule +-# (one never knows what one could miss) +-# see https://github.com/rear/rear/pull/859 +-MODULES+=( $(url_scheme $BACKUP_URL) ) +- +diff --git a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh +new file mode 120000 +index 00000000..af1512d6 +--- /dev/null ++++ b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh +@@ -0,0 +1 @@ ++../../NETFS/default/050_check_NETFS_requirements.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh +deleted file mode 100644 +index f7e31ed6..00000000 +--- a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh ++++ /dev/null +@@ -1,12 +0,0 @@ +-# Copied from ../../../backup/NETFS/default/100_mount_NETFS_path.sh for YUM +-# create mount point +-mkdir -p $v "$BUILD_DIR/outputfs" >&2 +-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +-AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- +-if [[ "$BACKUP_MOUNTCMD" ]] ; then +- BACKUP_URL="var://BACKUP_MOUNTCMD" +-fi +- +-mount_url $BACKUP_URL $BUILD_DIR/outputfs $BACKUP_OPTIONS +diff --git a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh +new file mode 120000 +index 00000000..73dd4697 +--- /dev/null ++++ b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh +@@ -0,0 +1 @@ ++../../../restore/YUM/default/100_mount_YUM_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh +deleted file mode 100644 +index 86d1708d..00000000 +--- a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh ++++ /dev/null +@@ -1,300 +0,0 @@ +-# Copied from ../../../prep/NETFS/default/070_set_backup_archive.sh for YUM +-### Determine the name of the backup archive +-### This needs to be after we special case USB devices. +- +-# FIXME: backuparchive is no local variable (regardless that it is lowercased) +- +-# If TAPE_DEVICE is specified, use that: +-if test "$TAPE_DEVICE" ; then +- backuparchive="$TAPE_DEVICE" +- LogPrint "Using backup archive '$backuparchive'" +- return +-fi +- +-local backup_file_suffix="$BACKUP_PROG_SUFFIX$BACKUP_PROG_COMPRESS_SUFFIX" +-local backup_file_name="$BACKUP_PROG_ARCHIVE$backup_file_suffix" +- +-local scheme=$( url_scheme $BACKUP_URL ) +-local path=$( url_path $BACKUP_URL ) +-case "$scheme" in +- (file|iso) +- # Define the output path according to the scheme +- local outputpath=$( backup_path $scheme $path ) +- backuparchive="$outputpath/$backup_file_name" +- LogPrint "Using backup archive '$backuparchive'" +- return +- ;; +- (tape) +- # TODO: Check if that case is really needed. +- # Perhaps prep/default/030_translate_tape.sh does already all what is needed. +- backuparchive=$path +- LogPrint "Using backup archive '$backuparchive'" +- return +- ;; +-esac +- +-local backup_directory=$BUILD_DIR/outputfs/$NETFS_PREFIX +- +-# Normal (i.e. non-incremental/non-differential) backup: +-if ! test "incremental" = "$BACKUP_TYPE" -o "differential" = "$BACKUP_TYPE" ; then +- # In case of normal (i.e. non-incremental) backup there is only one restore archive +- # and its name is the same as the backup archive (usually 'backup.tar.gz'): +- backuparchive="$backup_directory/$backup_file_name" +- LogPrint "Using backup archive '$backuparchive'" +- # This script is also run during "rear recover/restoreonly" where RESTORE_ARCHIVES must be set. +- local backup_restore_workflows=( "recover" "restoreonly" ) +- if IsInArray $WORKFLOW ${backup_restore_workflows[@]} ; then +- # Only set RESTORE_ARCHIVES the backup archive is actually accessible +- # cf. https://github.com/rear/rear/issues/1166 +- if test -r "$backuparchive" ; then +- RESTORE_ARCHIVES=( "$backuparchive" ) +- else +- # In case of USB backup there is the subsequent 540_choose_backup_archive.sh script +- # that shows a backup selection dialog when RESTORE_ARCHIVES is not already set. +- if test "usb" = "$scheme" ; then +- LogPrint "Backup archive '$backuparchive' not readable. Need to select another one." +- else +- Error "Backup archive '$backuparchive' not readable." +- fi +- fi +- fi +- return +-fi +- +-# Incremental or differential backup: +-set -e -u -o pipefail +-# Incremental or differential backup only works for the NETFS backup method +-# and only with the 'tar' backup program: +-if ! test "NETFS" = "$BACKUP" -a "tar" = "$BACKUP_PROG" ; then +- Error "BACKUP_TYPE incremental or differential only works with BACKUP=NETFS and BACKUP_PROG=tar" +-fi +-# Incremental or differential backup is currently only known to work with BACKUP_URL=nfs://. +-# Other BACKUP_URL schemes may work and at least BACKUP_URL=usb:///... needs special setup +-# to work with incremental or differential backup (see https://github.com/rear/rear/issues/1145): +-if test "usb" = "$scheme" ; then +- # When USB_SUFFIX is set the compliance mode is used where +- # backup on USB works in compliance with backup on NFS which means +- # a fixed backup directory where incremental or differential backups work. +- # Use plain $USB_SUFFIX and not "$USB_SUFFIX" because when USB_SUFFIX contains only blanks +- # test "$USB_SUFFIX" would result true because test " " results true: +- test $USB_SUFFIX || Error "BACKUP_TYPE incremental or differential requires USB_SUFFIX for BACKUP_URL=usb" +-fi +-# Incremental or differential backup and keeping old backup contradict each other (mutual exclusive) +-# so that NETFS_KEEP_OLD_BACKUP_COPY must not be 'true' in case of incremental or differential backup: +-if test "$NETFS_KEEP_OLD_BACKUP_COPY" ; then +- NETFS_KEEP_OLD_BACKUP_COPY="" +- LogPrint "Disabled NETFS_KEEP_OLD_BACKUP_COPY because BACKUP_TYPE incremental or differential does not work with that" +-fi +-# For incremental or differential backup some date values (weekday, YYYY-MM-DD, HHMM) are needed +-# that must be consistent for one single point of the current time which means +-# one cannot call the 'date' command several times because then there would be +-# a small probability that e.g. weekday, YYYY-MM-DD, HHMM do not match +-# one single point in time (in particular when midnight passes in between). +-# Therefore the output of one single 'date' call is storend in an array and +-# the array elements are then assinged to individual variables as needed: +-local current_date_output=( $( date '+%a %Y-%m-%d %H%M' ) ) +-local current_weekday="${current_date_output[0]}" +-local current_yyyy_mm_dd="${current_date_output[1]}" +-local current_hhmm="${current_date_output[2]}" +-# The date FULLBACKUP_OUTDATED_DAYS ago is needed to check if the latest full backup is too old. +-# When the latest full backup is more than FULLBACKUP_OUTDATED_DAYS ago a new full backup is made. +-# This separated call of the 'date' command which is technically needed because it is +-# for another point in time (e.g. 7 days ago) is run after the above call of the 'date' +-# command for the current time to be on the safe side when midnight passes in between +-# both 'date' commands which would then result that a new full backup is made +-# when the latest full backup is basically right now FULLBACKUP_OUTDATED_DAYS ago because +-# the stored date of the latest full backup is the current date at the time when it was made. +-# Example (assuming FULLBACKUP_OUTDATED_DAYS=7 ): +-# The latest full backup was made on Sunday January 10 in 2016 (just before midnight). +-# One week later this script runs again while midnight passes between the two 'date' calls +-# so that current_date_output[@]="Sun 2016-01-17 0000" (still Sunday January 17 in 2016) +-# and yyyymmdd_max_days_ago=20160111 (already Monday January 11 in 2016), then +-# Sunday January 10 is older than Monday January 11 so that a new full backup is made: +-test "$FULLBACKUP_OUTDATED_DAYS" || FULLBACKUP_OUTDATED_DAYS="7" +-local yyyymmdd_max_days_ago=$( date '+%Y%m%d' --date="$FULLBACKUP_OUTDATED_DAYS days ago" ) +-# Full backup file names are of the form YYYY-MM-DD-HHMM-F.tar.gz +-# where the 'F' denotes a full backup: +-local full_backup_marker="F" +-# Incremental backup file names are of the form YYYY-MM-DD-HHMM-I.tar.gz +-# where the 'I' denotes an incremental backup: +-local incremental_backup_marker="I" +-# Differential backup file names are of the form YYYY-MM-DD-HHMM-D.tar.gz +-# where the last 'D' denotes a differential backup: +-local differential_backup_marker="D" +-# In case of incremental or differential backup the RESTORE_ARCHIVES contains +-# first the latest full backup file. +-# In case of incremental backup the RESTORE_ARCHIVES contains +-# after the latest full backup file each incremental backup +-# in the ordering how they must be restored. +-# For example when the latest full backup was made on Sunday +-# plus each subsequent weekday a separated incremental backup was made, +-# then during a "rear recover" on Wednesday morning +-# first the full backup from Sunday has to be restored, +-# then the incremental backup from Monday, and +-# finally the incremental backup from Tuesday. +-# In case of differential backup the RESTORE_ARCHIVES contains +-# after the latest full backup file the latest differential backup. +-# For example when the latest full backup was made on Sunday +-# plus each subsequent weekday a separated differential backup was made, +-# then during a "rear recover" on Wednesday morning +-# first the full backup from Sunday has to be restored, +-# and finally the differential backup from Tuesday +-# (i.e. the differential backup from Monday is skipped). +-# The date format YYYY-MM-DD that is used here is crucial. +-# It is the ISO 8601 format 'year-month-day' to specify a day of a year +-# that is accepted by 'tar' for the '--newer' option, +-# see the GNU tar manual section "Operating Only on New Files" +-# at https://www.gnu.org/software/tar/manual/html_node/after.html +-# and the GNU tar manual section "Calendar date items" +-# at https://www.gnu.org/software/tar/manual/html_node/Calendar-date-items.html#SEC124 +-local date_glob_regex="[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]" +-local date_time_glob_regex="$date_glob_regex-[0-9][0-9][0-9][0-9]" +-# Determine what kind of backup must be created, 'full' or 'incremental' or 'differential' +-# (the empty default means it is undecided what kind of backup must be created): +-local create_backup_type="" +-# Code regarding creating a backup is useless during "rear recover" and +-# messages about creating a backup are misleading during "rear recover": +-local recovery_workflows=( "recover" "layoutonly" "restoreonly" ) +-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # When today is a specified full backup day, do a full backup in any case +- # (regardless if there is already a full backup of this day): +- if IsInArray "$current_weekday" "${FULLBACKUPDAY[@]}" ; then +- create_backup_type="full" +- LogPrint "Today's weekday ('$current_weekday') is a full backup day that triggers a new full backup in any case" +- fi +-fi +-# Get the latest full backup (if exists): +-local full_backup_glob_regex="$date_time_glob_regex-$full_backup_marker$backup_file_suffix" +-# Here things like 'find /path/to/dir -name '*.tar.gz' | sort' are used because +-# one cannot use bash globbing via commands like 'ls /path/to/dir/*.tar.gz' +-# because /usr/sbin/rear sets the nullglob bash option which leads to plain 'ls' +-# when '/path/to/dir/*.tar.gz' matches nothing (i.e. when no backup file exists) +-# so that then plain 'ls' would result nonsense. +-local latest_full_backup=$( find $backup_directory -name "$full_backup_glob_regex" | sort | tail -n1 ) +-# A latest full backup is found: +-if test "$latest_full_backup" ; then +- local latest_full_backup_file_name=$( basename "$latest_full_backup" ) +- # The full_or_incremental_backup_glob_regex is also needed below for non-"recover" WORKFLOWs +- # to set the right variables for creating an incremental backup: +- local full_or_incremental_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$incremental_backup_marker]$backup_file_suffix" +- # Code regarding creating a backup is useless during "rear recover" and +- # messages about creating a backup are misleading during "rear recover": +- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # There is nothing to do here if it is already decided that +- # a full backup must be created (see "full backup day" above"): +- if ! test "full" = "$create_backup_type" ; then +- local latest_full_backup_date=$( echo $latest_full_backup_file_name | grep -o "$date_glob_regex" ) +- local yyyymmdd_latest_full_backup=$( echo $latest_full_backup_date | tr -d '-' ) +- # Check if the latest full backup is too old: +- if test $yyyymmdd_latest_full_backup -lt $yyyymmdd_max_days_ago ; then +- create_backup_type="full" +- LogPrint "Latest full backup date '$latest_full_backup_date' too old (more than $FULLBACKUP_OUTDATED_DAYS days ago) triggers new full backup" +- else +- # When a latest full backup is found that is not too old +- # a BACKUP_TYPE (incremental or differential) backup will be created: +- create_backup_type="$BACKUP_TYPE" +- LogPrint "Latest full backup found ($latest_full_backup_file_name) triggers $BACKUP_TYPE backup" +- fi +- fi +- else +- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: +- case "$BACKUP_TYPE" in +- (incremental) +- # When a latest full backup is found use that plus all later incremental backups for restore: +- # The following command is a bit tricky: +- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-I.tar.gz files in the backup directory and sorts them +- # and finally it outputs only those that match the latest full backup file name and incremental backups that got sorted after that +- # where it is mandatory that the backup file names sort by date (i.e. date must be the leading part of the backup file names): +- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" ) ) +- ;; +- (differential) +- # For differential backup use the latest full backup plus the one latest differential backup for restore: +- # The following command is a bit tricky: +- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-D.tar.gz files in the backup directory and sorts them +- # then it outputs only those that match the latest full backup file name and all differential backups that got sorted after that +- # and then it outputs only the first line (i.e. the full backup) and the last line (i.e. the latest differential backup) +- # but when no differential backup exists (i.e. when only the full backup exists) the first line is also the last line +- # so that "sed -n -e '1p;$p'" outputs the full backup twice which is corrected by the final "sort -u": +- local full_or_differential_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$differential_backup_marker]$backup_file_suffix" +- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_differential_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | sed -n -e '1p;$p' | sort -u ) ) +- ;; +- (*) +- BugError "Unexpected BACKUP_TYPE '$BACKUP_TYPE'" +- ;; +- esac +- # Tell the user what will be restored: +- local restore_archives_file_names="" +- for restore_archive in "${RESTORE_ARCHIVES[@]}" ; do +- restore_archives_file_names="$restore_archives_file_names $( basename "$restore_archive" )" +- done +- LogPrint "For backup restore using $restore_archives_file_names" +- fi +-# No latest full backup is found: +-else +- # Code regarding creating a backup is useless during "rear recover" and +- # messages about creating a backup are misleading during "rear recover": +- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # If no latest full backup is found create one during "rear mkbackup": +- create_backup_type="full" +- LogPrint "No full backup found (YYYY-MM-DD-HHMM-F.tar.gz) triggers full backup" +- else +- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: +- # If no latest full backup is found (i.e. no file name matches the YYYY-MM-DD-HHMM-F.tar.gz form) +- # fall back to what is done in case of normal (i.e. non-incremental/non-differential) backup +- # and hope for the best (i.e. that a backup_directory/backup_file_name actually exists). +- # In case of normal (i.e. non-incremental/non-differential) backup there is only one restore archive +- # and its name is the same as the backup archive (usually 'backup.tar.gz'). +- # This is only a fallback setting to be more on the safe side for "rear recover". +- # Initially for the very fist run of incremental backup during "rear mkbackup" +- # a full backup file of the YYYY-MM-DD-HHMM-F.tar.gz form will be created. +- RESTORE_ARCHIVES=( "$backup_directory/$backup_file_name" ) +- LogPrint "Using $backup_file_name for backup restore" +- fi +-fi +-# Code regarding creating a backup is useless during "rear recover" and +-# messages about creating a backup are misleading during "rear recover": +-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # Set the right variables for creating a backup (but do not actually do anything at this point): +- case "$create_backup_type" in +- (full) +- local new_full_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$full_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_full_backup_file_name" +- BACKUP_PROG_CREATE_NEWER_OPTIONS="-V $new_full_backup_file_name" +- LogPrint "Performing full backup using backup archive '$new_full_backup_file_name'" +- ;; +- (incremental) +- local new_incremental_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$incremental_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_incremental_backup_file_name" +- # Get the latest latest incremental backup that is based on the latest full backup (if exists): +- local incremental_backup_glob_regex="$date_time_glob_regex-$incremental_backup_marker$backup_file_suffix" +- # First get the latest full backup plus all later incremental backups (cf. how RESTORE_ARCHIVES is set in case of incremental backup) +- # then grep only the incremental backups and from the incremental backups use only the last one (if exists): +- local latest_incremental_backup=$( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | grep "$incremental_backup_glob_regex" | tail -n1 ) +- if test "$latest_incremental_backup" ; then +- # A latest incremental backup that is based on the latest full backup is found: +- local latest_incremental_backup_file_name=$( basename $latest_incremental_backup ) +- LogPrint "Latest incremental backup found ($latest_incremental_backup_file_name) that is newer than the latest full backup" +- local latest_incremental_backup_date=$( echo $latest_incremental_backup_file_name | grep -o "$date_glob_regex" ) +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_incremental_backup_date -V $latest_incremental_backup_file_name" +- LogPrint "Performing incremental backup for files newer than $latest_incremental_backup_date using backup archive '$new_incremental_backup_file_name'" +- else +- # When there is not yet an incremental backup that is based on the latest full backup +- # the new created incremental backup must be based on the latest full backup: +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" +- LogPrint "Performing incremental backup for files newer than $latest_full_backup_date using backup archive '$new_incremental_backup_file_name'" +- fi +- ;; +- (differential) +- local new_differential_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$differential_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_differential_backup_file_name" +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" +- LogPrint "Performing differential backup for files newer than $latest_full_backup_date using backup archive '$new_differential_backup_file_name'" +- ;; +- (*) +- BugError "Unexpected create_backup_type '$create_backup_type'" +- ;; +- esac +-fi +-# Go back from "set -e -u -o pipefail" to the defaults: +-apply_bash_flags_and_options_commands "$DEFAULT_BASH_FLAGS_AND_OPTIONS_COMMANDS" +- +diff --git a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh +new file mode 120000 +index 00000000..b8de3d9e +--- /dev/null ++++ b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh +@@ -0,0 +1 @@ ++../../../prep/YUM/default/070_set_backup_archive.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh +deleted file mode 100644 +index dc719e38..00000000 +--- a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh ++++ /dev/null +@@ -1,14 +0,0 @@ +-# Copied from ../../../backup/NETFS/default/980_umount_NETFS_dir.sh for YUM +-# umount NETFS mountpoint +- +-if [[ "$BACKUP_UMOUNTCMD" ]] ; then +- BACKUP_URL="var://BACKUP_UMOUNTCMD" +-fi +- +-umount_url $BACKUP_URL $BUILD_DIR/outputfs +- +-rmdir $v $BUILD_DIR/outputfs >&2 +-if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +-fi +diff --git a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh +new file mode 120000 +index 00000000..ada5ea50 +--- /dev/null ++++ b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh +@@ -0,0 +1 @@ ++../../../restore/YUM/default/980_umount_YUM_dir.sh +\ No newline at end of file diff --git a/SOURCES/rear-bz1983013.patch b/SOURCES/rear-bz1983013.patch new file mode 100644 index 0000000..f8032bb --- /dev/null +++ b/SOURCES/rear-bz1983013.patch @@ -0,0 +1,68 @@ +diff --git a/usr/share/rear/conf/Linux-ppc64.conf b/usr/share/rear/conf/Linux-ppc64.conf +index 7e20ddc7..d7774062 100644 +--- a/usr/share/rear/conf/Linux-ppc64.conf ++++ b/usr/share/rear/conf/Linux-ppc64.conf +@@ -1,18 +1,26 @@ +-REQUIRED_PROGS+=( sfdisk ) ++REQUIRED_PROGS+=( sfdisk ofpathname ) + + PROGS+=( + mkofboot + ofpath + ybin + yabootconfig +-bootlist + pseries_platform + nvram +-ofpathname + bc + agetty + ) + ++if grep -q "emulated by qemu" /proc/cpuinfo ; then ++ # Qemu/KVM virtual machines don't need bootlist - don't complain if ++ # it is missing ++ PROGS+=( bootlist ) ++else ++ # PowerVM environment, we need to run bootlist, otherwise ++ # we can't make the system bpotable. Be strict about requiring it ++ REQUIRED_PROGS+=( bootlist ) ++fi ++ + COPY_AS_IS+=( + /usr/lib/yaboot/yaboot + /usr/lib/yaboot/ofboot +diff --git a/usr/share/rear/conf/Linux-ppc64le.conf b/usr/share/rear/conf/Linux-ppc64le.conf +index d00154a2..df8066ea 100644 +--- a/usr/share/rear/conf/Linux-ppc64le.conf ++++ b/usr/share/rear/conf/Linux-ppc64le.conf +@@ -1,10 +1,8 @@ + REQUIRED_PROGS+=( sfdisk ) + + PROGS+=( +-bootlist + pseries_platform + nvram +-ofpathname + bc + agetty + ) +@@ -17,4 +15,18 @@ agetty + if [[ $(awk '/platform/ {print $NF}' < /proc/cpuinfo) != PowerNV ]] ; then + # No firmware files when ppc64le Linux is not run in BareMetal Mode (PowerNV): + test "${FIRMWARE_FILES[*]}" || FIRMWARE_FILES=( 'no' ) ++ # grub2-install for powerpc-ieee1275 calls ofpathname, so without it, ++ # the rescue system can't make the recovered system bootable ++ REQUIRED_PROGS+=( ofpathname ) ++ if grep -q "emulated by qemu" /proc/cpuinfo ; then ++ # Qemu/KVM virtual machines don't need bootlist - don't complain if ++ # it is missing ++ PROGS+=( bootlist ) ++ else ++ # PowerVM environment, we need to run bootlist, otherwise ++ # we can't make the system bpotable. Be strict about requiring it ++ REQUIRED_PROGS+=( bootlist ) ++ fi ++else ++ PROGS+=( ofpathname bootlist ) + fi diff --git a/SOURCES/rear-bz1993296.patch b/SOURCES/rear-bz1993296.patch new file mode 100644 index 0000000..15e65a2 --- /dev/null +++ b/SOURCES/rear-bz1993296.patch @@ -0,0 +1,34 @@ +From 4233fe30b315737ac8c4d857e2b04e021c2e2886 Mon Sep 17 00:00:00 2001 +From: Pavel Cahyna +Date: Mon, 16 Aug 2021 10:10:38 +0300 +Subject: [PATCH] Revert the main part of PR #2299 + +multipath -l is very slow with many multipath devices. As it will be +called for every multipath device, it leads to quadratic time complexity +in the number of multipath devices. For thousands of devices, ReaR can +take hours to scan and exclude them. We therefore have to comment +multipath -l out, as it is a huge performance regression, and find +another solution to bug #2298. +--- + usr/share/rear/lib/layout-functions.sh | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh +index cdd81a14..8c8be74b 100644 +--- a/usr/share/rear/lib/layout-functions.sh ++++ b/usr/share/rear/lib/layout-functions.sh +@@ -771,7 +771,10 @@ function is_multipath_path { + # so that no "multipath -l" output could clutter the log (the "multipath -l" output is irrelevant here) + # in contrast to e.g. test "$( multipath -l )" that would falsely succeed with blank output + # and the output would appear in the log in 'set -x' debugscript mode: +- multipath -l | grep -q '[[:alnum:]]' || return 1 ++ # ++ # Unfortunately, multipat -l is quite slow with many multipath devices ++ # and becomes a performance bottleneck, so we must comment it out for now. ++ #multipath -l | grep -q '[[:alnum:]]' || return 1 + # Check if a block device should be a path in a multipath device: + multipath -c /dev/$1 &>/dev/null + } +-- +2.26.3 + diff --git a/SOURCES/rear-bz2035939.patch b/SOURCES/rear-bz2035939.patch new file mode 100644 index 0000000..30771c9 --- /dev/null +++ b/SOURCES/rear-bz2035939.patch @@ -0,0 +1,56 @@ +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index 0c230f38..f231bf3d 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -2707,6 +2707,15 @@ WARN_MISSING_VOL_ID=1 + USE_CFG2HTML= + # The SKIP_CFG2HTML variable is no longer supported since ReaR 1.18 + ++# IP addresses that are present on the system but must be excluded when ++# building the network configuration used in recovery mode; this is typically ++# used when floating IP addresses are used on the system ++EXCLUDE_IP_ADDRESSES=() ++ ++# Network interfaces that are present on the system but must be excluded when ++# building the network configuration used in recovery mode ++EXCLUDE_NETWORK_INTERFACES=() ++ + # Simplify bonding setups by configuring always the first active device of a + # bond, except when mode is 4 (IEEE 802.3ad policy) + SIMPLIFY_BONDING=no +diff --git a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh +index f806bfbf..2385f5b6 100644 +--- a/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh ++++ b/usr/share/rear/rescue/GNU/Linux/310_network_devices.sh +@@ -355,6 +355,11 @@ function is_interface_up () { + local network_interface=$1 + local sysfspath=/sys/class/net/$network_interface + ++ if IsInArray "$network_interface" "${EXCLUDE_NETWORK_INTERFACES[@]}"; then ++ LogPrint "Excluding '$network_interface' per EXCLUDE_NETWORK_INTERFACES directive." ++ return 1 ++ fi ++ + local state=$( cat $sysfspath/operstate ) + if [ "$state" = "down" ] ; then + return 1 +@@ -403,11 +408,19 @@ function ipaddr_setup () { + if [ -n "$ipaddrs" ] ; then + # If some IP is found for the network interface, then use them + for ipaddr in $ipaddrs ; do ++ if IsInArray "${ipaddr%%/*}" "${EXCLUDE_IP_ADDRESSES[@]}"; then ++ LogPrint "Excluding IP address '$ipaddr' per EXCLUDE_IP_ADDRESSES directive even through it's defined in mapping file '$CONFIG_DIR/mappings/ip_addresses'." ++ continue ++ fi + echo "ip addr add $ipaddr dev $mapped_as" + done + else + # Otherwise, collect IP addresses for the network interface on the system + for ipaddr in $( ip a show dev $network_interface scope global | grep "inet.*\ " | tr -s " " | cut -d " " -f 3 ) ; do ++ if IsInArray "${ipaddr%%/*}" "${EXCLUDE_IP_ADDRESSES[@]}"; then ++ LogPrint "Excluding IP address '$ipaddr' per EXCLUDE_IP_ADDRESSES directive." ++ continue ++ fi + echo "ip addr add $ipaddr dev $mapped_as" + done + fi diff --git a/SOURCES/rear-bz2048454.patch b/SOURCES/rear-bz2048454.patch new file mode 100644 index 0000000..428505e --- /dev/null +++ b/SOURCES/rear-bz2048454.patch @@ -0,0 +1,78 @@ +diff --git a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh +index 35be1721..d3c9ae86 100644 +--- a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh ++++ b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh +@@ -103,12 +103,7 @@ local lvs_exit_code + pdev=$( get_device_name $pdev ) + + # Output lvmdev entry to DISKLAYOUT_FILE: +- # With the above example the output is: +- # lvmdev /dev/system /dev/sda1 7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7 41940992 +- echo "lvmdev /dev/$vgrp $pdev $uuid $size" +- +- # After the 'lvmdev' line was written to disklayout.conf so that the user can inspect it +- # check that the required positional parameters in the 'lvmdev' line are non-empty ++ # Check that the required positional parameters in the 'lvmdev' line are non-empty + # because an empty positional parameter would result an invalid 'lvmdev' line + # which would cause invalid parameters are 'read' as input during "rear recover" + # cf. "Verifying ... 'lvm...' entries" in layout/save/default/950_verify_disklayout_file.sh +@@ -117,13 +112,24 @@ local lvs_exit_code + # so that this also checks that the variables do not contain blanks or more than one word + # because blanks (actually $IFS characters) are used as field separators in disklayout.conf + # which means the positional parameter values must be exactly one non-empty word. +- # Two separated simple 'test $vgrp && test $pdev' commands are used here because +- # 'test $vgrp -a $pdev' does not work when $vgrp is empty or only blanks +- # because '-a' has two different meanings: "EXPR1 -a EXPR2" and "-a FILE" (see "help test") +- # so that when $vgrp is empty 'test $vgrp -a $pdev' tests if file $pdev exists +- # which is usually true because $pdev is usually a partition device node (e.g. /dev/sda1) +- # so that when $vgrp is empty 'test $vgrp -a $pdev' would falsely succeed: +- test $vgrp && test $pdev || Error "LVM 'lvmdev' entry in $DISKLAYOUT_FILE where volume_group or device is empty or more than one word" ++ test $pdev || Error "Cannot make 'lvmdev' entry in disklayout.conf (PV device '$pdev' empty or more than one word)" ++ if ! test $vgrp ; then ++ # Valid $pdev but invalid $vgrp (empty or more than one word): ++ # When $vgrp is empty it means it is a PV that is not part of a VG so the PV exists but it is not used. ++ # PVs that are not part of a VG are documented as comment in disklayout.conf but they are not recreated ++ # because they were not used on the original system so there is no need to recreate them by "rear recover" ++ # (the user can manually recreate them later in his recreated system when needed) ++ # cf. https://github.com/rear/rear/issues/2596 ++ DebugPrint "Skipping PV $pdev that is not part of a valid VG (VG '$vgrp' empty or more than one word)" ++ echo "# Skipping PV $pdev that is not part of a valid VG (VG '$vgrp' empty or more than one word):" ++ contains_visible_char "$vgrp" || vgrp='' ++ echo "# lvmdev /dev/$vgrp $pdev $uuid $size" ++ # Continue with the next line in the output of "lvm pvdisplay -c" ++ continue ++ fi ++ # With the above example the output is: ++ # lvmdev /dev/system /dev/sda1 7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7 41940992 ++ echo "lvmdev /dev/$vgrp $pdev $uuid $size" + + done + # Check the exit code of "lvm pvdisplay -c" +@@ -161,8 +167,15 @@ local lvs_exit_code + # lvmgrp /dev/system 4096 5119 20967424 + echo "lvmgrp /dev/$vgrp $extentsize $nrextents $size" + +- # Check that the required positional parameters in the 'lvmgrp' line are non-empty +- # cf. the code above to "check that the required positional parameters in the 'lvmdev' line are non-empty": ++ # Check that the required positional parameters in the 'lvmgrp' line are non-empty. ++ # The tested variables are intentionally not quoted here, cf. the code above to ++ # "check that the required positional parameters in the 'lvmdev' line are non-empty". ++ # Two separated simple 'test $vgrp && test $extentsize' commands are used here because ++ # 'test $vgrp -a $extentsize' does not work when $vgrp is empty or only blanks ++ # because '-a' has two different meanings: "EXPR1 -a EXPR2" and "-a FILE" (see "help test") ++ # so with empty $vgrp it becomes 'test -a $extentsize' that tests if a file $extentsize exists ++ # which is unlikely to be true but it is not impossible that a file $extentsize exists ++ # so when $vgrp is empty (or blanks) 'test $vgrp -a $extentsize' might falsely succeed: + test $vgrp && test $extentsize || Error "LVM 'lvmgrp' entry in $DISKLAYOUT_FILE where volume_group or extentsize is empty or more than one word" + + done +@@ -305,7 +318,8 @@ local lvs_exit_code + fi + already_processed_lvs+=( "$vg/$lv" ) + # Check that the required positional parameters in the 'lvmvol' line are non-empty +- # cf. the code above to "check that the required positional parameters in the 'lvmdev' line are non-empty": ++ # cf. the code above to "check that the required positional parameters in the 'lvmdev' line are non-empty" ++ # and the code above to "check that the required positional parameters in the 'lvmgrp' line are non-empty": + test $vg && test $lv && test $size && test $layout || Error "LVM 'lvmvol' entry in $DISKLAYOUT_FILE where volume_group or name or size or layout is empty or more than one word" + fi + diff --git a/SOURCES/rear-bz2049091.patch b/SOURCES/rear-bz2049091.patch new file mode 100644 index 0000000..9f5e12d --- /dev/null +++ b/SOURCES/rear-bz2049091.patch @@ -0,0 +1,25 @@ +diff --git a/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh b/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh +index 040e9eec..e731c994 100644 +--- a/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh ++++ b/usr/share/rear/layout/save/default/335_remove_excluded_multipath_vgs.sh +@@ -19,9 +19,9 @@ while read lvmdev name mpdev junk ; do + # Remember, multipath devices from a volume group that is "excluded" should be 'commented out' + device=$(echo $mpdev | cut -c1-45) + while read LINE ; do +- # Now we need to comment all lines that contain "$devices" in the LAYOUT_FILE ++ # Now we need to comment all lines that contain "$device" in the LAYOUT_FILE + sed -i "s|^$LINE|\#$LINE|" "$LAYOUT_FILE" +- done < <(grep "$device" $LAYOUT_FILE | grep -v "^#") ++ done < <(grep " $device " $LAYOUT_FILE | grep -v "^#") + Log "Excluding multipath device $device" + done < <(grep "^#lvmdev" $LAYOUT_FILE) + +@@ -31,7 +31,7 @@ done < <(grep "^#lvmdev" $LAYOUT_FILE) + while read LINE ; do + # multipath /dev/mapper/360060e8007e2e3000030e2e300002065 /dev/sdae,/dev/sdat,/dev/sdbi,/dev/sdp + device=$(echo $LINE | awk '{print $2}' | cut -c1-45) +- num=$(grep "$device" $LAYOUT_FILE | grep -v "^#" | wc -l) ++ num=$(grep " $device " $LAYOUT_FILE | grep -v "^#" | wc -l) + if [ $num -lt 2 ] ; then + # If the $device is only seen once (in a uncommented line) then the multipath is not in use + sed -i "s|^$LINE|\#$LINE|" "$LAYOUT_FILE" diff --git a/SOURCES/rear-bz2083272.patch b/SOURCES/rear-bz2083272.patch new file mode 100644 index 0000000..03c8a8a --- /dev/null +++ b/SOURCES/rear-bz2083272.patch @@ -0,0 +1,171 @@ +commit 3d1bcf1b50ca8201a3805bc7cab6ca69c14951a1 +Author: pcahyna +Date: Thu May 5 12:11:55 2022 +0200 + + Merge pull request #2795 from pcahyna/recover-check-sums + + Verify file hashes at the end of recover after file restore from backup + +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index f231bf3d..881a0af0 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -313,8 +313,30 @@ CDROM_SIZE=20 + # which exits with non-zero exit code when the disk layout or those files changed + # (cf. https://github.com/rear/rear/issues/1134) but the checklayout workflow + # does not automatically recreate the rescue/recovery system. ++# Files matching FILES_TO_PATCH_PATTERNS are added to this list automatically. + CHECK_CONFIG_FILES=( '/etc/drbd/' '/etc/drbd.conf' '/etc/lvm/lvm.conf' '/etc/multipath.conf' '/etc/rear/' '/etc/udev/udev.conf' ) + ++# FILES_TO_PATCH_PATTERNS is a space-separated list of shell glob patterns. ++# Files that match are eligible for a final migration of UUIDs and other ++# identifiers after recovery (if the layout recreation process has led ++# to a change of an UUID or a device name and a corresponding change needs ++# to be performed on restored configuration files ). ++# See finalize/GNU/Linux/280_migrate_uuid_tags.sh ++# The [] around the first letter make sure that shopt -s nullglob removes this file from the list if it does not exist ++ ++FILES_TO_PATCH_PATTERNS="[b]oot/{grub.conf,menu.lst,device.map} [e]tc/grub.* \ ++ [b]oot/grub/{grub.conf,grub.cfg,menu.lst,device.map} \ ++ [b]oot/grub2/{grub.conf,grub.cfg,menu.lst,device.map} \ ++ [e]tc/sysconfig/grub [e]tc/sysconfig/bootloader \ ++ [e]tc/lilo.conf [e]tc/elilo.conf \ ++ [e]tc/yaboot.conf \ ++ [e]tc/mtab [e]tc/fstab \ ++ [e]tc/mtools.conf \ ++ [e]tc/smartd.conf [e]tc/sysconfig/smartmontools \ ++ [e]tc/sysconfig/rawdevices \ ++ [e]tc/security/pam_mount.conf.xml \ ++ [b]oot/efi/*/*/grub.cfg" ++ + ## + # Relax-and-Recover recovery system update during "rear recover" + # +diff --git a/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh b/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh +index 1a91a0e3..e869e5e9 100644 +--- a/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh ++++ b/usr/share/rear/finalize/GNU/Linux/250_migrate_disk_devices_layout.sh +@@ -29,19 +29,9 @@ LogPrint "The original restored files get saved in $save_original_file_dir (in $ + + local symlink_target="" + local restored_file="" +-# the funny [] around the first letter make sure that shopt -s nullglob removes this file from the list if it does not exist +-# the files without a [] are mandatory, like fstab FIXME: but below there is [e]tc/fstab not etc/fstab - why? +- +-for restored_file in [b]oot/{grub.conf,menu.lst,device.map} [e]tc/grub.* [b]oot/grub/{grub.conf,menu.lst,device.map} \ +- [b]oot/grub2/{grub.conf,grub.cfg,menu.lst,device.map} \ +- [e]tc/sysconfig/grub [e]tc/sysconfig/bootloader \ +- [e]tc/lilo.conf \ +- [e]tc/yaboot.conf \ +- [e]tc/mtab [e]tc/fstab \ +- [e]tc/mtools.conf \ +- [e]tc/smartd.conf [e]tc/sysconfig/smartmontools \ +- [e]tc/sysconfig/rawdevices \ +- [e]tc/security/pam_mount.conf.xml [b]oot/efi/*/*/grub.cfg ++# The variable expansion is deliberately not quoted in order to perform ++# pathname expansion on the variable value. ++for restored_file in $FILES_TO_PATCH_PATTERNS + do + # Silently skip directories and file not found: + test -f "$restored_file" || continue +diff --git a/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh b/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh +index 074689a1..d994ce8e 100644 +--- a/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh ++++ b/usr/share/rear/finalize/GNU/Linux/280_migrate_uuid_tags.sh +@@ -23,18 +23,9 @@ LogPrint "Migrating filesystem UUIDs in certain restored files in $TARGET_FS_ROO + + local symlink_target="" + local restored_file="" +-# the funny [] around the first letter make sure that shopt -s nullglob removes this file from the list if it does not exist +-# the files without a [] are mandatory, like fstab FIXME: but below there is [e]tc/fstab not etc/fstab - why? +-for restored_file in [b]oot/{grub.conf,menu.lst,device.map} [e]tc/grub.* \ +- [b]oot/grub/{grub.conf,grub.cfg,menu.lst,device.map} \ +- [b]oot/grub2/{grub.conf,grub.cfg,menu.lst,device.map} \ +- [e]tc/sysconfig/grub [e]tc/sysconfig/bootloader \ +- [e]tc/lilo.conf [e]tc/elilo.conf \ +- [e]tc/mtab [e]tc/fstab \ +- [e]tc/mtools.conf \ +- [e]tc/smartd.conf [e]tc/sysconfig/smartmontools \ +- [e]tc/sysconfig/rawdevices \ +- [e]tc/security/pam_mount.conf.xml [b]oot/efi/*/*/grub.cfg ++# The variable expansion is deliberately not quoted in order to perform ++# pathname expansion on the variable value. ++for restored_file in $FILES_TO_PATCH_PATTERNS + do + # Silently skip directories and file not found: + test -f "$restored_file" || continue +diff --git a/usr/share/rear/finalize/default/060_compare_files.sh b/usr/share/rear/finalize/default/060_compare_files.sh +new file mode 100644 +index 00000000..6947fda9 +--- /dev/null ++++ b/usr/share/rear/finalize/default/060_compare_files.sh +@@ -0,0 +1,6 @@ ++if [ -e $VAR_DIR/layout/config/files.md5sum ] ; then ++ if ! chroot $TARGET_FS_ROOT md5sum -c --quiet < $VAR_DIR/layout/config/files.md5sum 1>> >( tee -a "$RUNTIME_LOGFILE" 1>&7 ) 2>> >( tee -a "$RUNTIME_LOGFILE" 1>&8 ) ; then ++ LogPrintError "Error: Restored files do not match the recreated system in $TARGET_FS_ROOT" ++ return 1 ++ fi ++fi +diff --git a/usr/share/rear/layout/save/default/490_check_files_to_patch.sh b/usr/share/rear/layout/save/default/490_check_files_to_patch.sh +new file mode 100644 +index 00000000..ee717063 +--- /dev/null ++++ b/usr/share/rear/layout/save/default/490_check_files_to_patch.sh +@@ -0,0 +1,43 @@ ++# FILES_TO_PATCH_PATTERNS is a space-separated list of shell glob patterns. ++# Files that match are eligible for a final migration of UUIDs and other ++# identifiers after recovery (if the layout recreation process has led ++# to a change of an UUID or a device name and a corresponding change needs ++# to be performed on restored configuration files ). ++# See finalize/GNU/Linux/280_migrate_uuid_tags.sh ++# We should add all such files to CHECK_CONFIG_FILES - if they change, ++# we risk inconsistencies between the restored files and recreated layout, ++# or failures of UUID migration. ++ ++local file final_file symlink_target ++ ++# The patterns are relative to /, change directory there ++# so that the shell finds the files during pathname expansion ++pushd / >/dev/null ++# The variable expansion is deliberately not quoted in order to perform ++# pathname expansion on the variable value. ++for file in $FILES_TO_PATCH_PATTERNS ; do ++ final_file="/$file" ++ IsInArray "$final_file" "${CHECK_CONFIG_FILES[@]}" && continue ++ # Symlink handling (partially from 280_migrate_uuid_tags.sh): ++ # avoid dead symlinks, and symlinks to files on dynamic filesystems ++ # ( /proc etc.) - they are expected to change and validating ++ # their checksums has no sense ++ if test -L "$final_file" ; then ++ if symlink_target="$( readlink -e "$final_file" )" ; then ++ # If the symlink target contains /proc/ /sys/ /dev/ or /run/ we skip it because then ++ # the symlink target is considered to not be a restored file that needs to be patched ++ # and thus we don't need to generate and check its hash, either ++ # cf. https://github.com/rear/rear/pull/2047#issuecomment-464846777 ++ if echo $symlink_target | egrep -q '/proc/|/sys/|/dev/|/run/' ; then ++ Log "Skip adding symlink $final_file target $symlink_target on /proc/ /sys/ /dev/ or /run/ to CHECK_CONFIG_FILES" ++ continue ++ fi ++ Debug "Adding symlink $final_file with target $symlink_target to CHECK_CONFIG_FILES" ++ else ++ LogPrint "Skip adding dead symlink $final_file to CHECK_CONFIG_FILES" ++ continue ++ fi ++ fi ++ CHECK_CONFIG_FILES+=( "$final_file" ) ++done ++popd >/dev/null +diff --git a/usr/share/rear/layout/save/default/600_snapshot_files.sh b/usr/share/rear/layout/save/default/600_snapshot_files.sh +index 0ebf197c..3ac6b07e 100644 +--- a/usr/share/rear/layout/save/default/600_snapshot_files.sh ++++ b/usr/share/rear/layout/save/default/600_snapshot_files.sh +@@ -3,7 +3,8 @@ if [ "$WORKFLOW" = "checklayout" ] ; then + return 0 + fi + +-config_files=() ++local obj ++local config_files=() + for obj in "${CHECK_CONFIG_FILES[@]}" ; do + if [ -d "$obj" ] ; then + config_files+=( $( find "$obj" -type f ) ) diff --git a/SOURCES/rear-bz2091163.patch b/SOURCES/rear-bz2091163.patch new file mode 100644 index 0000000..3a68a34 --- /dev/null +++ b/SOURCES/rear-bz2091163.patch @@ -0,0 +1,46 @@ +diff --git a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh +index d3c9ae86..f21845df 100644 +--- a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh ++++ b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh +@@ -70,14 +70,20 @@ local lvs_exit_code + # Get physical_device configuration. + # Format: lvmdev [] [] + header_printed="no" +- # Example output of "lvm pvdisplay -c": +- # /dev/sda1:system:41940992:-1:8:8:-1:4096:5119:2:5117:7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7 ++ # Set pvdisplay separator to '|' to prevent issues with a colon in the path under /dev/disk/by-path ++ # that contains a ':' in the SCSI slot name. ++ # Example output of "lvm pvdisplay -C --separator '|' --noheadings --nosuffix --units=b -o pv_name,vg_name,pv_size,pv_uuid" ++ # on a system where LVM is configured to show the /dev/disk/by-path device names instead of the usual ++ # /dev/sda etc. (by using a setting like ++ # filter = [ "r|/dev/disk/by-path/.*-usb-|", "a|/dev/disk/by-path/pci-.*-nvme-|", "a|/dev/disk/by-path/pci-.*-scsi-|", "a|/dev/disk/by-path/pci-.*-ata-|", "a|/dev/disk/by-path/pci-.*-sas-|", "a|loop|", "r|.*|" ] ++ # in /etc/lvm/lvm.conf): ++ # /dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:1:0-part1|system|107340627968|7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7 + # There are two leading blanks in the output (at least on SLES12-SP4 with LVM 2.02.180). +- lvm pvdisplay -c | while read line ; do ++ lvm pvdisplay -C --separator '|' --noheadings --nosuffix --units=b -o pv_name,vg_name,pv_size,pv_uuid | while read line ; do + +- # With the above example pdev=/dev/sda1 ++ # With the above example pdev=/dev/disk/by-path/pci-0000:03:00.0-scsi-0:0:1:0-part1 + # (the "echo $line" makes the leading blanks disappear) +- pdev=$( echo $line | cut -d ":" -f "1" ) ++ pdev=$( echo $line | cut -d "|" -f "1" ) + + # Skip lines that are not describing physical devices + # i.e. lines where pdev does not start with a leading / character: +@@ -91,11 +97,11 @@ local lvs_exit_code + fi + + # With the above example vgrp=system +- vgrp=$( echo $line | cut -d ":" -f "2" ) +- # With the above example size=41940992 +- size=$( echo $line | cut -d ":" -f "3" ) ++ vgrp=$( echo $line | cut -d "|" -f "2" ) ++ # With the above example size=107340627968 ++ size=$( echo $line | cut -d "|" -f "3" ) + # With the above example uuid=7wwpcO-KmNN-qsTE-7sp7-JBJS-vBdC-Zyt1W7 +- uuid=$( echo $line | cut -d ":" -f "12" ) ++ uuid=$( echo $line | cut -d "|" -f "4" ) + + # Translate pdev through diskbyid_mappings file: + pdev=$( get_device_mapping $pdev ) diff --git a/SOURCES/rear-bz2104005.patch b/SOURCES/rear-bz2104005.patch new file mode 100644 index 0000000..db2c9dc --- /dev/null +++ b/SOURCES/rear-bz2104005.patch @@ -0,0 +1,21 @@ +commit 40ec3bf072a51229e81bfbfa7cedb8a7c7902dbd +Author: Johannes Meixner +Date: Fri Jun 24 15:11:27 2022 +0200 + + Merge pull request #2827 from rear/jsmeix-fail-safe-yes-pipe-lvcreate + + and commit b3fd58fc871e00bd713a0cb081de54d746ffffb3 from pull request #2839 + +diff --git a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh +index 1be17ba8..d34ab335 100644 +--- a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh ++++ b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh +@@ -263,7 +263,7 @@ $ifline + + LogPrint "Creating LVM volume '$vg/$lvname'; Warning: some properties may not be preserved..." + $warnraidline +- lvm lvcreate $lvopts $vg << +Date: Wed May 25 13:51:14 2022 +0200 + + Merge pull request #2808 from rear/jsmeix-exclude-watchdog + + Exclude dev/watchdog* from the ReaR recovery system: + In default.conf add dev/watchdog* to COPY_AS_IS_EXCLUDE + because watchdog functionality is not wanted in the recovery system + because we do not want any automated reboot functionality + while disaster recovery happens via "rear recover", + see https://github.com/rear/rear/pull/2808 + Furthermore having a copy of dev/watchdog* + during "rear mkrescue" in ReaR's build area + may even trigger a system crash that is caused by a + buggy TrendMicro ds_am module touching dev/watchdog + in ReaR's build area (/var/tmp/rear.XXX/rootfs), + see https://github.com/rear/rear/issues/2798 + +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index 881a0af0..cb14da8b 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -1414,7 +1414,12 @@ COPY_AS_IS=( $SHARE_DIR $VAR_DIR ) + # We let them being recreated by device mapper in the recovery system during the recovery process. + # Copying them into the recovery system would let "rear recover" avoid the migration process. + # See https://github.com/rear/rear/pull/1393 for details. +-COPY_AS_IS_EXCLUDE=( $VAR_DIR/output/\* dev/.udev dev/shm dev/shm/\* dev/oracleasm dev/mapper ) ++# /dev/watchdog /dev/watchdog\* functionality is not wanted in the ReaR rescue/recovery system ++# because we do not want any automated reboot while disaster recovery happens via "rear recover". ++# Furthermore having dev/watchdog* during "rear mkrescue" may even trigger a system "crash" that is ++# caused by TrendMicro ds_am module touching dev/watchdog in ReaR's build area (/var/tmp/rear.XXX/rootfs). ++# See https://github.com/rear/rear/issues/2798 ++COPY_AS_IS_EXCLUDE=( $VAR_DIR/output/\* dev/.udev dev/shm dev/shm/\* dev/oracleasm dev/mapper dev/watchdog\* ) + # Array of user names that are trusted owners of files where RequiredSharedObjects calls ldd (cf. COPY_AS_IS) + # and where a ldd test is run inside the recovery system that tests all binaries for 'not found' libraries. + # The default is 'root' plus those standard system users that have a 'bin' or 'sbin' or 'root' home directory diff --git a/SOURCES/rear-bz2111059.patch b/SOURCES/rear-bz2111059.patch new file mode 100644 index 0000000..fff1437 --- /dev/null +++ b/SOURCES/rear-bz2111059.patch @@ -0,0 +1,105 @@ +commit 552dd6bfb20fdb3dc712b5243656d147392c27c3 +Author: Johannes Meixner +Date: Thu Jun 2 15:25:52 2022 +0200 + + Merge pull request #2811 from rear/jsmeix-RECOVERY_COMMANDS + + Add PRE_RECOVERY_COMMANDS and POST_RECOVERY_COMMANDS + as alternative to PRE_RECOVERY_SCRIPT and POST_RECOVERY_SCRIPT + see the description in default.conf how to use them and how they work. + See https://github.com/rear/rear/pull/2811 and see also + https://github.com/rear/rear/pull/2735 therein in particular + https://github.com/rear/rear/pull/2735#issuecomment-1134686196 + Additionally use LogPrint to show the user the executed commands, + see https://github.com/rear/rear/pull/2789 + +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index cb14da8b..b14525da 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -3117,14 +3117,37 @@ ELILO_BIN= + ################ ---- custom scripts + # + # NOTE: The scripts can be defined as an array to better handly spaces in parameters. +-# The scripts are called like this: eval "${PRE_RECOVERY_SCRIPT[@]}" ++# The scripts are called like this: ++# eval "${PRE_RECOVERY_SCRIPT[@]}" ++# ++# Alternatively, commands can be executed by using the corresponding ++# PRE_RECOVERY_COMMANDS and POST_RECOVERY_COMMANDS array variables ++# which evaluate like this: ++# for command in "${PRE_RECOVERY_COMMANDS[@]}" ; do ++# eval "$command" ++# done ++# ++# Using PRE_RECOVERY_COMMANDS and POST_RECOVERY_COMMANDS ++# is simpler when multiple commands should be executed. ++# For example, ++# PRE_RECOVERY_SCRIPT=( 'echo Hello' ';' 'sleep 3' ) ++# can be rewritten as ++# PRE_RECOVERY_COMMANDS=( 'echo Hello' 'sleep 3' ) ++# or ++# PRE_RECOVERY_COMMANDS=( 'echo Hello' ) ++# PRE_RECOVERY_COMMANDS+=( 'sleep 3' ) ++ ++# Those get called at the very beginning of "rear recover". ++# The PRE_RECOVERY_COMMANDS are called directly before the PRE_RECOVERY_SCRIPT. ++# Nothing was recreated and you have only the plain ReaR rescue/recovery system: ++PRE_RECOVERY_COMMANDS=() ++PRE_RECOVERY_SCRIPT= + +-# Call this after Relax-and-Recover did everything in the recover workflow. +-# Use $TARGET_FS_ROOT (by default '/mnt/local') to refer to the recovered system. ++# Those get called at the very end of "rear recover". ++# The POST_RECOVERY_COMMANDS are called directly after the POST_RECOVERY_SCRIPT. ++# Use $TARGET_FS_ROOT (by default '/mnt/local') to access the recreated target system. + POST_RECOVERY_SCRIPT= +- +-# Call this before Relax-and-Recover starts to do anything in the recover workflow. You have the rescue system but nothing else +-PRE_RECOVERY_SCRIPT= ++POST_RECOVERY_COMMANDS=() + + # PRE/POST Backup scripts will provide the ability to run certain tasks before and after a ReaR backup. + # for example: +diff --git a/usr/share/rear/setup/default/010_pre_recovery_script.sh b/usr/share/rear/setup/default/010_pre_recovery_script.sh +index 005107cc..8b4e4a36 100644 +--- a/usr/share/rear/setup/default/010_pre_recovery_script.sh ++++ b/usr/share/rear/setup/default/010_pre_recovery_script.sh +@@ -1,4 +1,14 @@ ++ ++# The PRE_RECOVERY_COMMANDS are called directly before the PRE_RECOVERY_SCRIPT ++# so PRE_RECOVERY_COMMANDS can also be used to prepare things for the PRE_RECOVERY_SCRIPT: ++ ++local command ++for command in "${PRE_RECOVERY_COMMANDS[@]}" ; do ++ LogPrint "Running PRE_RECOVERY_COMMANDS '$command'" ++ eval "$command" ++done ++ + if test "$PRE_RECOVERY_SCRIPT" ; then +- Log "Running PRE_RECOVERY_SCRIPT '${PRE_RECOVERY_SCRIPT[@]}'" +- eval "${PRE_RECOVERY_SCRIPT[@]}" ++ LogPrint "Running PRE_RECOVERY_SCRIPT '${PRE_RECOVERY_SCRIPT[@]}'" ++ eval "${PRE_RECOVERY_SCRIPT[@]}" + fi +diff --git a/usr/share/rear/wrapup/default/500_post_recovery_script.sh b/usr/share/rear/wrapup/default/500_post_recovery_script.sh +index 77751800..866c9368 100644 +--- a/usr/share/rear/wrapup/default/500_post_recovery_script.sh ++++ b/usr/share/rear/wrapup/default/500_post_recovery_script.sh +@@ -1,4 +1,14 @@ ++ ++# The POST_RECOVERY_COMMANDS are called directly after the POST_RECOVERY_SCRIPT ++# so POST_RECOVERY_COMMANDS can also be used to clean up things after the POST_RECOVERY_SCRIPT: ++ + if test "$POST_RECOVERY_SCRIPT" ; then +- Log "Running POST_RECOVERY_SCRIPT '${POST_RECOVERY_SCRIPT[@]}'" +- eval "${POST_RECOVERY_SCRIPT[@]}" ++ LogPrint "Running POST_RECOVERY_SCRIPT '${POST_RECOVERY_SCRIPT[@]}'" ++ eval "${POST_RECOVERY_SCRIPT[@]}" + fi ++ ++local command ++for command in "${POST_RECOVERY_COMMANDS[@]}" ; do ++ LogPrint "Running POST_RECOVERY_COMMANDS '$command'" ++ eval "$command" ++done diff --git a/SOURCES/rear-bz2119501.patch b/SOURCES/rear-bz2119501.patch new file mode 100644 index 0000000..71b4d47 --- /dev/null +++ b/SOURCES/rear-bz2119501.patch @@ -0,0 +1,39 @@ +diff --git a/usr/share/rear/build/default/490_fix_broken_links.sh b/usr/share/rear/build/default/490_fix_broken_links.sh +index 5bace664..cf960be8 100644 +--- a/usr/share/rear/build/default/490_fix_broken_links.sh ++++ b/usr/share/rear/build/default/490_fix_broken_links.sh +@@ -7,6 +7,23 @@ + # see https://github.com/rear/rear/issues/1638 + # and https://github.com/rear/rear/pull/1734 + ++# Some broken symlinks are expected. The 'build' and 'source' symlinks in kernel modules point to kernel sources ++# and are broken untol one installs the kernel-debug-devel or kernel-devel packages (on Fedora) and even then ++# the targets are jot included in the rescue system by default. ++# Do not warn about those, it is just noise. ++local irrelevant_symlinks=( '*/lib/modules/*/build' '*/lib/modules/*/source' ) ++function symlink_is_irrelevant () { ++ for i in "${irrelevant_symlinks[@]}"; do ++ # do not quote $i, it is a glob pattern, matching will be performed by [[ ... == ... ]] ++ # quoting inside [[ ]] prevents pattern matching ++ if [[ "$1" == $i ]]; then ++ return 0 ++ fi ++ done ++ return 1 ++} ++ ++ + # FIXME: The following code fails if symlinks or their targets contain characters from IFS (e.g. blanks), + # cf. the same kind of comments in build/default/990_verify_rootfs.sh + # and layout/prepare/GNU/Linux/130_include_mount_subvolumes_code.sh +@@ -38,6 +55,10 @@ pushd $ROOTFS_DIR + local broken_symlink='' + local link_target='' + for broken_symlink in $broken_symlinks ; do ++ if symlink_is_irrelevant "$broken_symlink" ; then ++ DebugPrint "Ignoring irrelevant broken symlink $broken_symlink" ++ continue ++ fi + # For each broken symlink absolute path inside ROOTFS_DIR + # we call "readlink -e" in the original system to get its link target there. + # If in the original system there was a chain of symbolic links like diff --git a/SOURCES/rear-bz2120736.patch b/SOURCES/rear-bz2120736.patch new file mode 100644 index 0000000..8bcce79 --- /dev/null +++ b/SOURCES/rear-bz2120736.patch @@ -0,0 +1,18 @@ +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index b14525da..23a83b71 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -1841,10 +1841,10 @@ OBDR_BLOCKSIZE=2048 + # BACKUP=NBU stuff (Symantec/Veritas NetBackup) + ## + # +-COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt /opt/VRTSpbx /etc/vx/VxICS /etc/vx/vrtslog.conf ) +-COPY_AS_IS_EXCLUDE_NBU=( /usr/openv/netbackup/logs "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal ) ++COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt /usr/openv/var/vxss /usr/openv/var/webtruststore /usr/openv/resources/nbpxyhelper /opt/VRTSpbx /etc/vx/VxICS /etc/vx/vrtslog.conf /var/log/VRTSpbx ) ++COPY_AS_IS_EXCLUDE_NBU=( "/usr/openv/netbackup/logs/*" "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java "/usr/openv/lib/*-plugins" /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal "/var/log/VRTSpbx/*" ) + # See https://github.com/rear/rear/issues/2105 why /usr/openv/netbackup/sec/at/lib/ is needed: +-NBU_LD_LIBRARY_PATH="/usr/openv/lib:/usr/openv/netbackup/sec/at/lib/" ++NBU_LD_LIBRARY_PATH="/usr/openv/lib:/usr/openv/netbackup/sec/at/lib/:/usr/openv/lib/boost" + PROGS_NBU=( ) + + ## diff --git a/SOURCES/rear-bz2130945.patch b/SOURCES/rear-bz2130945.patch new file mode 100644 index 0000000..5291d13 --- /dev/null +++ b/SOURCES/rear-bz2130945.patch @@ -0,0 +1,20 @@ +diff --git a/usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/550_rebuild_initramfs.sh +similarity index 100% +rename from usr/share/rear/finalize/Fedora/i386/550_rebuild_initramfs.sh +rename to usr/share/rear/finalize/Fedora/550_rebuild_initramfs.sh +diff --git a/usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh +deleted file mode 120000 +index 22eede59..00000000 +--- a/usr/share/rear/finalize/Fedora/ppc64/550_rebuild_initramfs.sh ++++ /dev/null +@@ -1 +0,0 @@ +-../i386/550_rebuild_initramfs.sh +\ No newline at end of file +diff --git a/usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh b/usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh +deleted file mode 120000 +index 22eede59..00000000 +--- a/usr/share/rear/finalize/Fedora/ppc64le/550_rebuild_initramfs.sh ++++ /dev/null +@@ -1 +0,0 @@ +-../i386/550_rebuild_initramfs.sh +\ No newline at end of file diff --git a/SOURCES/rear-bz2131946.patch b/SOURCES/rear-bz2131946.patch new file mode 100644 index 0000000..1ee90ba --- /dev/null +++ b/SOURCES/rear-bz2131946.patch @@ -0,0 +1,129 @@ +diff --git a/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh +index 172ac032..9cff63a0 100644 +--- a/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh ++++ b/usr/share/rear/layout/prepare/GNU/Linux/131_include_filesystem_code.sh +@@ -143,9 +143,9 @@ function create_fs () { + # unless the user has explicitly specified XFS filesystem options: + local xfs_opts + local xfs_device_basename="$( basename $device )" +- local xfs_info_filename="$LAYOUT_XFS_OPT_DIR/$xfs_device_basename.xfs" ++ local xfs_info_filename="$LAYOUT_XFS_OPT_DIR_RESTORE/$xfs_device_basename.xfs" + # Only uppercase letters and digits are used to ensure mkfs_xfs_options_variable_name is a valid bash variable name +- # even in case of complicated device nodes e.g. things like /dev/mapper/SIBM_2810XIV_78033E7012F-part3 ++ # even in case of complicated device nodes e.g. things like /dev/mapper/SIBM_2810XIV_78033E7012F-part3 + # cf. current_orig_device_basename_alnum_uppercase in layout/prepare/default/300_map_disks.sh + local xfs_device_basename_alnum_uppercase="$( echo $xfs_device_basename | tr -d -c '[:alnum:]' | tr '[:lower:]' '[:upper:]' )" + # cf. predefined_input_variable_name in the function UserInput in lib/_input-output-functions.sh +diff --git a/usr/share/rear/layout/prepare/default/010_prepare_files.sh b/usr/share/rear/layout/prepare/default/010_prepare_files.sh +index 85964712..7a980e63 100644 +--- a/usr/share/rear/layout/prepare/default/010_prepare_files.sh ++++ b/usr/share/rear/layout/prepare/default/010_prepare_files.sh +@@ -5,6 +5,7 @@ LAYOUT_DEPS="$VAR_DIR/layout/diskdeps.conf" + LAYOUT_TODO="$VAR_DIR/layout/disktodo.conf" + LAYOUT_CODE="$VAR_DIR/layout/diskrestore.sh" + LAYOUT_XFS_OPT_DIR="$VAR_DIR/layout/xfs" ++LAYOUT_XFS_OPT_DIR_RESTORE="$LAYOUT_XFS_OPT_DIR/restore" + + FS_UUID_MAP="$VAR_DIR/layout/fs_uuid_mapping" + LUN_WWID_MAP="$VAR_DIR/layout/lun_wwid_mapping" +diff --git a/usr/share/rear/layout/prepare/default/319_rename_xfs_configs.sh b/usr/share/rear/layout/prepare/default/319_rename_xfs_configs.sh +new file mode 100644 +index 00000000..406afa61 +--- /dev/null ++++ b/usr/share/rear/layout/prepare/default/319_rename_xfs_configs.sh +@@ -0,0 +1,83 @@ ++# Cleanup directory which hold XFS configuration file for `rear recover'. ++# This will avoid possible mess in LAYOUT_XFS_OPT_DIR_RESTORE if `rear recover' ++# would be launched multiple times, where user will choose different disk ++# mapping each time. ++# Removing and creating LAYOUT_XFS_OPT_DIR_RESTORE will ensure that ReaR will ++# have only current files available during current session. ++rm -rf "$LAYOUT_XFS_OPT_DIR_RESTORE" ++mkdir -p "$LAYOUT_XFS_OPT_DIR_RESTORE" ++ ++local excluded_configs=() ++ ++# Read $MAPPING_FILE (disk_mappings) to discover final disk mapping. ++# Once mapping is known, configuration files can be renamed. ++# (e.g. sds2.xfs to sdb2.xfs, ...) ++while read source target junk ; do ++ # Disks in MAPPING_FILE are listed with full device path. Since XFS config ++ # files are created in format e.g. sda2.xfs strip prefixed path to have ++ # only short device name available. ++ base_source=$(basename "$source") ++ base_target=$(basename "$target") ++ ++ # Check if XFS configuration file for whole device (unpartitioned) ++ # is available (sda, sdb, ...). If so, rename and copy it to ++ # LAYOUT_XFS_OPT_DIR_RESTORE. ++ if [ -e "$LAYOUT_XFS_OPT_DIR/$base_source.xfs" ]; then ++ Log "Migrating XFS configuration file $base_source.xfs to $base_target.xfs" ++ cp "$v" "$LAYOUT_XFS_OPT_DIR/$base_source.xfs" \ ++ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_target.xfs" ++ ++ # Replace old device name in meta-data= option in XFS ++ # configuration file as well. ++ sed -i s#"meta-data=${source}\(\s\)"#"meta-data=${target}\1"# \ ++ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_target.xfs" ++ ++ # Mark XFS config file as processed to avoid copying it again later. ++ # More details on why are configs excluded can be found near the ++ # end of this script (near `tar' command). ++ excluded_configs+=("--exclude=$base_source.xfs") ++ fi ++ ++ # Find corresponding partitions to source disk in LAYOUT_FILE ++ # and migrate/rename them too if necessary. ++ while read _ layout_device _ _ _ _ layout_partition; do ++ if [[ "$source" = "$layout_device" ]]; then ++ base_src_layout_partition=$(basename "$layout_partition") ++ base_dst_layout_partition=${base_src_layout_partition//$base_source/$base_target} ++ dst_layout_partition=${layout_partition//$base_source/$base_target} ++ ++ if [ -e "$LAYOUT_XFS_OPT_DIR/$base_src_layout_partition.xfs" ]; then ++ Log "Migrating XFS configuration $base_src_layout_partition.xfs to $base_dst_layout_partition.xfs" ++ cp "$v" "$LAYOUT_XFS_OPT_DIR/$base_src_layout_partition.xfs" \ ++ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_dst_layout_partition.xfs" ++ ++ # Replace old device name in meta-data= option in XFS ++ # configuration file as well. ++ sed -i s#"meta-data=${layout_partition}\(\s\)"#"meta-data=${dst_layout_partition}\1"# \ ++ "$LAYOUT_XFS_OPT_DIR_RESTORE/$base_dst_layout_partition.xfs" ++ ++ # Mark XFS config file as processed to avoid copying it again later. ++ # More details on why are configs excluded can be found near the ++ # end of this script (near `tar' command). ++ excluded_configs+=("--exclude=$base_src_layout_partition.xfs") ++ fi ++ fi ++ done < <( grep -E "^part " "$LAYOUT_FILE" ) ++done < <( grep -v '^#' "$MAPPING_FILE" ) ++ ++pushd "$LAYOUT_XFS_OPT_DIR" >/dev/null ++# Copy remaining files ++# We need to copy remaining files into LAYOUT_XFS_OPT_DIR_RESTORE which will ++# serve as base dictionary where ReaR will look for XFS config files. ++# It is necessary to copy only files that were not previously processed, ++# because in LAYOUT_XFS_OPT_DIR they are still listed with ++# original name and copy to LAYOUT_XFS_OPT_DIR_RESTORE could overwrite ++# XFS configs already migrated. ++# e.g. with following disk mapping situation: ++# /dev/sda2 => /dev/sdb2 ++# /dev/sdb2 => /dev/sda2 ++# Files in LAYOUT_XFS_OPT_DIR_RESTORE would be overwritten by XFS configs with ++# wrong names. ++# tar is used to take advantage of its exclude feature. ++tar cf - --exclude=restore "${excluded_configs[@]}" . | tar xfp - -C "$LAYOUT_XFS_OPT_DIR_RESTORE" ++popd >/dev/null +diff --git a/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh b/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh +index 7895e4ee..fc0fa8fc 100644 +--- a/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh ++++ b/usr/share/rear/layout/save/GNU/Linux/100_create_layout_file.sh +@@ -10,6 +10,7 @@ mkdir -p $v $VAR_DIR/layout/config + # We need directory for XFS options only if XFS is in use: + if test "$( mount -t xfs )" ; then + LAYOUT_XFS_OPT_DIR="$VAR_DIR/layout/xfs" ++ rm -rf $LAYOUT_XFS_OPT_DIR + mkdir -p $v $LAYOUT_XFS_OPT_DIR + fi + diff --git a/SOURCES/rear-device-shrinking-bz2223895.patch b/SOURCES/rear-device-shrinking-bz2223895.patch new file mode 100644 index 0000000..4da263c --- /dev/null +++ b/SOURCES/rear-device-shrinking-bz2223895.patch @@ -0,0 +1,32 @@ +commit 4f03a10d4866efc9b6920a3878e6397d170742f9 +Author: Johannes Meixner +Date: Thu Jul 20 15:11:52 2023 +0200 + + Merge pull request #3027 from rmetrich/shrinking_file + + In build/GNU/Linux/100_copy_as_is.sh + ensure to really get all COPY_AS_IS files copied by using + 'tar ... -i' when extracting to avoid a false regular exit of 'tar' + in particular when padding zeroes get added when a file being read shrinks + because for 'tar' (without '-i') two consecutive 512-blocks of zeroes mean EOF, + cf. https://github.com/rear/rear/pull/3027 + +diff --git a/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh b/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh +index ec55f331..0e402b01 100644 +--- a/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh ++++ b/usr/share/rear/build/GNU/Linux/100_copy_as_is.sh +@@ -92,9 +92,13 @@ done >$copy_as_is_exclude_file + # COPY_AS_IS+=( /path/to/directory/* ) + # which are used in our scripts and by users in their etc/rear/local.conf + # cf. https://github.com/rear/rear/pull/2405#issuecomment-633512932 ++# Using '-i' when extracting is necessary to avoid a false regular exit of 'tar' ++# in particular when padding zeroes get added when a file being read shrinks ++# because for 'tar' (without '-i') two consecutive 512-blocks of zeroes mean EOF, ++# cf. https://github.com/rear/rear/pull/3027 + # FIXME: The following code fails if file names contain characters from IFS (e.g. blanks), + # cf. https://github.com/rear/rear/issues/1372 +-if ! tar -v -X $copy_as_is_exclude_file -P -C / -c ${COPY_AS_IS[*]} 2>$copy_as_is_filelist_file | tar $v -C $ROOTFS_DIR/ -x 1>/dev/null ; then ++if ! tar -v -X $copy_as_is_exclude_file -P -C / -c ${COPY_AS_IS[*]} 2>$copy_as_is_filelist_file | tar $v -C $ROOTFS_DIR/ -x -i 1>/dev/null ; then + Error "Failed to copy files and directories in COPY_AS_IS minus COPY_AS_IS_EXCLUDE" + fi + Log "Finished copying files and directories in COPY_AS_IS minus COPY_AS_IS_EXCLUDE" diff --git a/SOURCES/rear-luks-key-bz2228779.patch b/SOURCES/rear-luks-key-bz2228779.patch new file mode 100644 index 0000000..56559d9 --- /dev/null +++ b/SOURCES/rear-luks-key-bz2228779.patch @@ -0,0 +1,25 @@ +commit 2aa7b47354bdf5863071c8b479d29c99aad05ecb +Author: Johannes Meixner +Date: Fri Jul 24 13:02:45 2020 +0200 + + Update 240_reassign_luks_keyfiles.sh + + Use ReaR specific TMP_DIR (not TMPDIR or hardcoded /tmp) + +diff --git a/usr/share/rear/finalize/GNU/Linux/240_reassign_luks_keyfiles.sh b/usr/share/rear/finalize/GNU/Linux/240_reassign_luks_keyfiles.sh +index d989c3fb..358f3950 100644 +--- a/usr/share/rear/finalize/GNU/Linux/240_reassign_luks_keyfiles.sh ++++ b/usr/share/rear/finalize/GNU/Linux/240_reassign_luks_keyfiles.sh +@@ -24,9 +24,9 @@ awk ' + while read target_name source_device original_keyfile; do + Log "Re-assigning keyfile $original_keyfile to LUKS device $target_name ($source_device)" + +- # The scheme for generating a temporary keyfile path must be the same here and in the 'layout/prepare' stage. +- temp_keyfile="${TMPDIR:-/tmp}/LUKS-keyfile-$target_name" +- [ -f "$temp_keyfile" ] || BugError "temporary keyfile $temp_keyfile not found" ++ # The scheme for generating a temporary keyfile path must be the same here and in the 'layout/prepare' stage: ++ temp_keyfile="$TMP_DIR/LUKS-keyfile-$target_name" ++ test -f "$temp_keyfile" || BugError "temporary LUKS keyfile $temp_keyfile not found" + + target_keyfile="$TARGET_FS_ROOT/$original_keyfile" + diff --git a/SOURCES/rear-pr2675.patch b/SOURCES/rear-pr2675.patch new file mode 100644 index 0000000..7d11071 --- /dev/null +++ b/SOURCES/rear-pr2675.patch @@ -0,0 +1,60 @@ +diff --git a/usr/share/rear/lib/framework-functions.sh b/usr/share/rear/lib/framework-functions.sh +index 4878216b..e919bdbf 100644 +--- a/usr/share/rear/lib/framework-functions.sh ++++ b/usr/share/rear/lib/framework-functions.sh +@@ -121,7 +121,7 @@ function cleanup_build_area_and_end_program () { + sleep 2 + umount_mountpoint_lazy $BUILD_DIR/outputfs + fi +- remove_temporary_mountpoint '$BUILD_DIR/outputfs' || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove" ++ remove_temporary_mountpoint "$BUILD_DIR/outputfs" || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove" + rmdir $v $BUILD_DIR >&2 + fi + Log "End of program reached" +diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh +index c1a11615..0f8f362d 100644 +--- a/usr/share/rear/lib/global-functions.sh ++++ b/usr/share/rear/lib/global-functions.sh +@@ -317,7 +317,20 @@ function url_path() { + + ### Returns true if one can upload files to the URL + function scheme_accepts_files() { +- local scheme=$1 ++ # Be safe against 'set -eu' which would exit 'rear' with "bash: $1: unbound variable" ++ # when scheme_accepts_files is called without an argument ++ # by bash parameter expansion with using an empty default value if $1 is unset or null. ++ # Bash parameter expansion with assigning a default value ${1:=} does not work ++ # (then it would still exit with "bash: $1: cannot assign in this way") ++ # but using a default value is practicable here because $1 is used only once ++ # cf. https://github.com/rear/rear/pull/2675#discussion_r705018956 ++ local scheme=${1:-} ++ # Return false if scheme is empty or blank (e.g. when OUTPUT_URL is unset or empty or blank) ++ # cf. https://github.com/rear/rear/issues/2676 ++ # and https://github.com/rear/rear/issues/2667#issuecomment-914447326 ++ # also return false if scheme is more than one word (so no quoted "$scheme" here) ++ # cf. https://github.com/rear/rear/pull/2675#discussion_r704401462 ++ test $scheme || return 1 + case $scheme in + (null|tape|obdr) + # tapes do not support uploading arbitrary files, one has to handle them +@@ -341,7 +354,10 @@ function scheme_accepts_files() { + ### Returning true does not imply that the URL is currently mounted at a filesystem and usable, + ### only that it can be mounted (use mount_url() first) + function scheme_supports_filesystem() { +- local scheme=$1 ++ # Be safe against 'set -eu' exit if scheme_supports_filesystem is called without argument ++ local scheme=${1:-} ++ # Return false if scheme is empty or blank or more than one word, cf. scheme_accepts_files() above ++ test $scheme || return 1 + case $scheme in + (null|tape|obdr|rsync|fish|ftp|ftps|hftp|http|https|sftp) + return 1 +@@ -560,7 +576,7 @@ function umount_url() { + + RemoveExitTask "perform_umount_url '$url' '$mountpoint' lazy" + +- remove_temporary_mountpoint '$mountpoint' && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'" ++ remove_temporary_mountpoint "$mountpoint" && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'" + return 0 + } + diff --git a/SOURCES/rear-restore-hybrid-bootloader-RHEL-16864.patch b/SOURCES/rear-restore-hybrid-bootloader-RHEL-16864.patch new file mode 100644 index 0000000..8a31d1d --- /dev/null +++ b/SOURCES/rear-restore-hybrid-bootloader-RHEL-16864.patch @@ -0,0 +1,569 @@ +diff --git a/usr/share/rear/finalize/Linux-i386/630_install_grub.sh b/usr/share/rear/finalize/Linux-i386/630_install_grub.sh +index f3d9a8204..a0e87e1db 100644 +--- a/usr/share/rear/finalize/Linux-i386/630_install_grub.sh ++++ b/usr/share/rear/finalize/Linux-i386/630_install_grub.sh +@@ -1,22 +1,18 @@ + # This script is an improvement over the default grub-install '(hd0)' + # +-# However the following issues still exist: ++# However the following issue still exists: + # + # * We don't know what the first disk will be, so we cannot be sure the MBR +-# is written to the correct disk(s). That's why we make all disks bootable. +-# +-# * There is no guarantee that GRUB was the boot loader used originally. +-# One possible attempt would be to save and restore the MBR for each disk, +-# but this does not guarantee a correct boot order, +-# or even a working boot loader config +-# (eg. GRUB stage2 might not be at the exact same location). ++# is written to the correct disk(s). That's why we make all suitable disks bootable. + + # Skip if another boot loader is already installed + # (then $NOBOOTLOADER is not a true value cf. finalize/default/010_prepare_checks.sh): + is_true $NOBOOTLOADER || return 0 + +-# For UEFI systems with grub legacy with should use efibootmgr instead: +-is_true $USING_UEFI_BOOTLOADER && return ++# For UEFI systems with grub legacy with should use efibootmgr instead, ++# but if BOOTLOADER is explicitly set to GRUB, we are on a hybrid (BIOS/UEFI) ++# boot system and we need to install GRUB to MBR as well. ++# Therefore, we don't test $USING_UEFI_BOOTLOADER. + + # If the BOOTLOADER variable (read by finalize/default/010_prepare_checks.sh) + # is not "GRUB" (which means GRUB Legacy) skip this script (which is only for GRUB Legacy) +@@ -25,31 +21,27 @@ is_true $USING_UEFI_BOOTLOADER && return + test "GRUB" = "$BOOTLOADER" || return 0 + + # If the BOOTLOADER variable is "GRUB" (which means GRUB Legacy) +-# do not unconditionally trust that because https://github.com/rear/rear/pull/589 +-# reads (excerpt): +-# Problems found: +-# The ..._install_grub.sh checked for GRUB2 which is not part +-# of the first 2048 bytes of a disk - only GRUB was present - +-# thus the check for grub-probe/grub2-probe +-# and https://github.com/rear/rear/commit/079de45b3ad8edcf0e3df54ded53fe955abded3b +-# reads (excerpt): +-# replace grub-install by grub-probe +-# as grub-install also exist in legacy grub +-# so that it seems there are cases where actually GRUB 2 is used +-# but wrongly detected as "GRUB" so that another test is needed +-# to detected if actually GRUB 2 is used and that test is to +-# check if grub-probe or grub2-probe is installed because +-# grub-probe or grub2-probe is only installed in case of GRUB 2 +-# and when GRUB 2 is installed we assume GRUB 2 is used as boot loader +-# so that then we skip this script (which is only for GRUB Legacy) +-# because finalize/Linux-i386/660_install_grub2.sh is for installing GRUB 2: +-if type -p grub-probe >&2 || type -p grub2-probe >&2 ; then +- LogPrint "Skip installing GRUB Legacy boot loader because GRUB 2 is installed (grub-probe or grub2-probe exist)." ++# we could in principle trust that and continue because ++# layout/save/default/445_guess_bootloader.sh (where the value has been set) ++# is now able to distinguish between GRUB Legacy and GRUB 2. ++# But, as this code used to support the value "GRUB" for GRUB 2, ++# the user can have BOOTLOADER=GRUB set explicitly in the configuration file ++# and then it overrides the autodetection in layout/save/default/445_guess_bootloader.sh . ++# The user expects this setting to work with GRUB 2, thus for backward compatibility ++# we need to take into accout the possibility that GRUB actually means GRUB 2. ++if is_grub2_installed ; then ++ LogPrint "Skip installing GRUB Legacy boot loader because GRUB 2 is installed." ++ # We have the ErrorIfDeprecated function, but it aborts ReaR by default, ++ # which is not a good thing to do during recovery. ++ # Therefore it better to log a warning and continue. ++ LogPrintError "WARNING: setting BOOTLOADER=GRUB for GRUB 2 is deprecated, set BOOTLOADER=GRUB2 if setting BOOTLOADER explicitly" + return + fi + + # The actual work: + LogPrint "Installing GRUB Legacy boot loader:" ++# See above for the reasoning why not to use ErrorIfDeprecated ++LogPrintError "WARNING: support for GRUB Legacy is deprecated" + + # Installing GRUB Legacy boot loader requires an executable "grub": + type -p grub >&2 || Error "Cannot install GRUB Legacy boot loader because there is no 'grub' program." +@@ -79,8 +71,10 @@ if [[ -r "$LAYOUT_FILE" && -r "$LAYOUT_DEPS" ]] ; then + + for disk in $disks ; do + # Installing grub on an LVM PV will wipe the metadata so we skip those +- # function is_disk_a_pv returns with 1 if disk is a PV +- is_disk_a_pv "$disk" || continue ++ # function is_disk_a_pv returns true if disk is a PV ++ is_disk_a_pv "$disk" && continue ++ # Is the disk suitable for GRUB installation at all? ++ is_disk_grub_candidate "$disk" || continue + # Use first boot partition by default + part=$( echo $bootparts | cut -d' ' -f1 ) + +diff --git a/usr/share/rear/finalize/Linux-i386/660_install_grub2.sh b/usr/share/rear/finalize/Linux-i386/660_install_grub2.sh +index 58163d622..f42b5bfbe 100644 +--- a/usr/share/rear/finalize/Linux-i386/660_install_grub2.sh ++++ b/usr/share/rear/finalize/Linux-i386/660_install_grub2.sh +@@ -38,6 +38,37 @@ + # so that after "rear recover" finished he can manually install the bootloader + # as appropriate for his particular system. + ++local grub_name ++local grub2_install_failed grub2_install_device ++local source_disk target_disk junk ++local grub2_installed_disks ++local part bootparts ++local disk disks bootdisk ++ ++function bios_grub_install () ++{ ++ local grub2_install_device="$1" ++ ++ if is_true $USING_UEFI_BOOTLOADER ; then ++ # If running under UEFI, we need to specify the target explicitly, otherwise grub-install thinks ++ # that we are installing the EFI bootloader. ++ if ! chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install --target=i386-pc $grub2_install_device" ; then ++ LogPrintError "Failed to install GRUB2 for BIOS boot (target i386-pc) on $bootdisk" ++ # purely informational test that may help to explain the reason for the error ++ if ! test -d "$TARGET_FS_ROOT/boot/$grub_name/i386-pc" ; then ++ LogPrintError "GRUB2 module dir for BIOS boot (boot/$grub_name/i386-pc in $TARGET_FS_ROOT) does not exist, is GRUB2 for BIOS (target i386-pc) installed?" ++ fi ++ return 1 ++ fi ++ else ++ if ! chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_install_device" ; then ++ LogPrintError "Failed to install GRUB2 on $grub2_install_device" ++ return 1 ++ fi ++ fi ++ return 0 ++} ++ + # Skip if another bootloader was already installed: + # In this case NOBOOTLOADER is not true, + # cf. finalize/default/050_prepare_checks.sh +@@ -45,12 +76,16 @@ is_true $NOBOOTLOADER || return 0 + + # For UEFI systems with grub2 we should use efibootmgr instead, + # cf. finalize/Linux-i386/670_run_efibootmgr.sh +-is_true $USING_UEFI_BOOTLOADER && return ++# but if BOOTLOADER is explicitly set to GRUB2, we are on a hybrid (BIOS/UEFI) ++# boot system and we need to install GRUB to MBR as well ++if is_true $USING_UEFI_BOOTLOADER && [ "GRUB2" != "$BOOTLOADER" ] ; then ++ return 0 ++fi + + # Only for GRUB2 - GRUB Legacy will be handled by its own script. + # GRUB2 is detected by testing for grub-probe or grub2-probe which does not exist in GRUB Legacy. + # If neither grub-probe nor grub2-probe is there assume GRUB2 is not there: +-type -p grub-probe || type -p grub2-probe || return 0 ++is_grub2_installed || return 0 + + LogPrint "Installing GRUB2 boot loader..." + +@@ -94,7 +129,7 @@ if test "$GRUB2_INSTALL_DEVICES" ; then + else + LogPrint "Installing GRUB2 on $grub2_install_device (specified in GRUB2_INSTALL_DEVICES)" + fi +- if ! chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $grub2_install_device" ; then ++ if ! bios_grub_install "$grub2_install_device" ; then + LogPrintError "Failed to install GRUB2 on $grub2_install_device" + grub2_install_failed="yes" + fi +@@ -138,8 +173,8 @@ fi + grub2_installed_disks=() + for disk in $disks ; do + # Installing GRUB2 on an LVM PV will wipe the metadata so we skip those: +- # function is_disk_a_pv returns with 1 if disk is a PV +- is_disk_a_pv "$disk" || continue ++ # function is_disk_a_pv returns true if disk is a PV ++ is_disk_a_pv "$disk" && continue + + # Use first boot partition by default: + part=$( echo $bootparts | cut -d' ' -f1 ) +@@ -158,6 +193,8 @@ for disk in $disks ; do + + # Install GRUB2 on the boot disk if one was found: + if test "$bootdisk" ; then ++ # Is the disk suitable for GRUB installation at all? ++ is_disk_grub_candidate "$bootdisk" || continue + # Continue with the next possible boot disk when GRUB2 was already installed on the current one. + # When there are more disks like /dev/sda and /dev/sdb it can happen that + # for /dev/sda bootdisk=/dev/sda and GRUB2 gets installed on /dev/sda and +@@ -165,7 +202,7 @@ for disk in $disks ; do + # so we avoid that GRUB2 gets needlessly installed two times on the same device: + IsInArray "$bootdisk" "${grub2_installed_disks[@]}" && continue + LogPrint "Found possible boot disk $bootdisk - installing GRUB2 there" +- if chroot $TARGET_FS_ROOT /bin/bash --login -c "$grub_name-install $bootdisk" ; then ++ if bios_grub_install "$bootdisk" ; then + grub2_installed_disks+=( "$bootdisk" ) + # In contrast to the above behaviour when GRUB2_INSTALL_DEVICES is specified + # consider it here as a successful bootloader installation when GRUB2 +@@ -174,11 +211,14 @@ for disk in $disks ; do + # Continue with the next possible boot disk: + continue + fi +- LogPrintError "Failed to install GRUB2 on possible boot disk $bootdisk" + fi + done + + is_true $NOBOOTLOADER || return 0 +-LogPrintError "Failed to install GRUB2 - you may have to manually install it" ++if is_true $USING_UEFI_BOOTLOADER ; then ++ LogPrintError "Failed to install GRUB2 for BIOS boot - you may have to manually install it to preserve the hybrid BIOS/UEFI boot support, otherwise only UEFI boot will work" ++else ++ LogPrintError "Failed to install GRUB2 - you may have to manually install it" ++fi + return 1 + +diff --git a/usr/share/rear/finalize/default/050_prepare_checks.sh b/usr/share/rear/finalize/default/050_prepare_checks.sh +index 1679c9a41..57b44bca4 100644 +--- a/usr/share/rear/finalize/default/050_prepare_checks.sh ++++ b/usr/share/rear/finalize/default/050_prepare_checks.sh +@@ -10,10 +10,18 @@ + NOBOOTLOADER=1 + + # Try to read the BOOTLOADER value if /var/lib/rear/recovery/bootloader is not empty. +-# Currently (June 2016) the used BOOTLOADER values (grep for '$BOOTLOADER') are: ++# Currently (February 2024) the used BOOTLOADER values (grep for '$BOOTLOADER') are: + # GRUB for GRUB Legacy + # GRUB2 for GRUB 2 + # ELILO for elilo ++# LILO for lilo ++# GRUB2-EFI for GRUB 2, EFI version ++# EFI for any EFI bootloader, dummy value ++# ARM for ARM devices, dummy value ++# ARM-ALLWINNER for Allwinner devices ++# ZIPL for zIPL, on IBM Z (s390x) ++# PPC for any bootloader in the PReP boot partition (can be LILO, YABOOT, GRUB2) ++ + local bootloader_file="$VAR_DIR/recovery/bootloader" + # The output is stored in an artificial bash array so that $BOOTLOADER is the first word: + test -s $bootloader_file && BOOTLOADER=( $( grep -v '^[[:space:]]*#' $bootloader_file ) ) +diff --git a/usr/share/rear/layout/save/default/445_guess_bootloader.sh b/usr/share/rear/layout/save/default/445_guess_bootloader.sh +index fd5267dcf..b9e636a02 100644 +--- a/usr/share/rear/layout/save/default/445_guess_bootloader.sh ++++ b/usr/share/rear/layout/save/default/445_guess_bootloader.sh +@@ -1,7 +1,15 @@ + + # Determine or guess the used bootloader if not specified by the user + # and save this information into /var/lib/rear/recovery/bootloader +-bootloader_file="$VAR_DIR/recovery/bootloader" ++local bootloader_file="$VAR_DIR/recovery/bootloader" ++ ++local sysconfig_bootloader ++local block_device ++local blockd ++local disk_device ++local bootloader_area_strings_file ++local block_size ++local known_bootloader + + # When BOOTLOADER is specified use that: + if test "$BOOTLOADER" ; then +@@ -57,39 +65,31 @@ for block_device in /sys/block/* ; do + # Continue guessing the used bootloader by inspecting the first bytes on the next disk: + continue + fi +- # 'Hah!IdontNeedEFI' is the ASCII representation of the official GUID number +- # for a GPT BIOS boot partition which is 21686148-6449-6E6F-744E-656564454649 +- # see https://en.wikipedia.org/wiki/BIOS_boot_partition (issue #1752). +- # Use single quotes for 'Hah!IdontNeedEFI' to be on the safe side +- # because with double quotes the ! would cause history expansion if that is enabled +- # (non-interactive shells do not perform history expansion by default but better safe than sorry): +- if grep -q 'Hah!IdontNeedEFI' $bootloader_area_strings_file ; then +- # Because 'Hah!IdontNeedEFI' contains the known bootloader 'EFI' +- # the default code below would falsely guess that 'EFI' is used +- # but actually another non-EFI bootloader is used here +- # cf. https://github.com/rear/rear/issues/1752#issue-303856221 +- # so that in the 'Hah!IdontNeedEFI' case only non-EFI bootloaders are tested. +- # IBM Z (s390) uses zipl boot loader for RHEL and Ubuntu +- # cf. https://github.com/rear/rear/issues/2137 +- for known_bootloader in GRUB2 GRUB ELILO LILO ZIPL ; do +- if grep -q -i "$known_bootloader" $bootloader_area_strings_file ; then +- LogPrint "Using guessed bootloader '$known_bootloader' (found in first bytes on $disk_device with GPT BIOS boot partition)" +- echo "$known_bootloader" >$bootloader_file +- return +- fi +- done +- # When in the 'Hah!IdontNeedEFI' case no known non-EFI bootloader is found +- # continue guessing the used bootloader by inspecting the first bytes on the next disk +- # because otherwise the default code below would falsely guess that 'EFI' is used +- # cf. https://github.com/rear/rear/pull/1754#issuecomment-383531597 +- continue +- fi + # Check the default cases of known bootloaders. + # IBM Z (s390) uses zipl boot loader for RHEL and Ubuntu + # cf. https://github.com/rear/rear/issues/2137 +- for known_bootloader in GRUB2-EFI EFI GRUB2 GRUB ELILO LILO ZIPL ; do ++ for known_bootloader in GRUB2 GRUB LILO ZIPL ; do + if grep -q -i "$known_bootloader" $bootloader_area_strings_file ; then ++ # If we find "GRUB" (which means GRUB Legacy) ++ # do not unconditionally trust that because https://github.com/rear/rear/pull/589 ++ # reads (excerpt): ++ # Problems found: ++ # The ..._install_grub.sh checked for GRUB2 which is not part ++ # of the first 2048 bytes of a disk - only GRUB was present - ++ # thus the check for grub-probe/grub2-probe ++ # and https://github.com/rear/rear/commit/079de45b3ad8edcf0e3df54ded53fe955abded3b ++ # reads (excerpt): ++ # replace grub-install by grub-probe ++ # as grub-install also exist in legacy grub ++ # so that if actually GRUB 2 is used, the string in the bootloader area ++ # is "GRUB" so that another test is needed to detect if actually GRUB 2 is used. ++ # When GRUB 2 is installed we assume GRUB 2 is used as boot loader. ++ if [ "$known_bootloader" = "GRUB" ] && is_grub2_installed ; then ++ known_bootloader=GRUB2 ++ LogPrint "GRUB found in first bytes on $disk_device and GRUB 2 is installed, using GRUB2 as a guessed bootloader for 'rear recover'" ++ else + LogPrint "Using guessed bootloader '$known_bootloader' (found in first bytes on $disk_device)" ++ fi + echo "$known_bootloader" >$bootloader_file + return + fi +@@ -103,6 +103,26 @@ for block_device in /sys/block/* ; do + Log "End of strings in the first bytes on $disk_device" + done + ++# No bootloader detected, but we are using UEFI - there is probably an EFI bootloader ++if is_true $USING_UEFI_BOOTLOADER ; then ++ if is_grub2_installed ; then ++ echo "GRUB2-EFI" >$bootloader_file ++ elif test -f /sbin/elilo ; then ++ echo "ELILO" >$bootloader_file ++ else ++ # There is an EFI bootloader, we don't know which one exactly. ++ # The value "EFI" is a bit redundant with USING_UEFI_BOOTLOADER=1, ++ # which already indicates that there is an EFI bootloader. We use it as a placeholder ++ # to not leave $bootloader_file empty. ++ # Note that it is legal to have USING_UEFI_BOOTLOADER=1 and e.g. known_bootloader=GRUB2 ++ # (i.e. a non=EFI bootloader). This will happen in BIOS/UEFI hybrid boot scenarios. ++ # known_bootloader=GRUB2 indicates that there is a BIOS bootloader and USING_UEFI_BOOTLOADER=1 ++ # indicates that there is also an EFI bootloader. Only the EFI one is being used at this ++ # time, but both will need to be restored. ++ echo "EFI" >$bootloader_file ++ fi ++ return 0 ++fi + + # Error out when no bootloader was specified or could be autodetected: + Error "Cannot autodetect what is used as bootloader, see default.conf about 'BOOTLOADER'" +diff --git a/usr/share/rear/lib/bootloader-functions.sh b/usr/share/rear/lib/bootloader-functions.sh +index 5402f1da0..7aa40a589 100644 +--- a/usr/share/rear/lib/bootloader-functions.sh ++++ b/usr/share/rear/lib/bootloader-functions.sh +@@ -491,6 +491,53 @@ function get_root_disk_UUID { + echo $(mount | grep ' on / ' | awk '{print $1}' | xargs blkid -s UUID -o value) + } + ++# Detect whether actually GRUB 2 is installed and that test is to ++# check if grub-probe or grub2-probe is installed because ++# grub-probe or grub2-probe is only installed in case of GRUB 2. ++# Needed because one can't tell the GRUB version by looking at the MBR ++# (both GRUB 2 and GRUB Legacy have the string "GRUB" in their MBR). ++function is_grub2_installed () { ++ if type -p grub-probe >&2 || type -p grub2-probe >&2 ; then ++ Log "GRUB 2 is installed (grub-probe or grub2-probe exist)." ++ return 0 ++ else ++ return 1 ++ fi ++} ++ ++# Determine whether a disk is worth detecting or installing GRUB on ++function is_disk_grub_candidate () { ++ local disk="$1" ++ local disk_partitions part ++ local label flags ++ ++ # ToDo : validate $disk (does it even exist? Isn't it write-protected?) ++ ++ # Installing grub on an LVM PV will wipe the metadata so we skip those ++ is_disk_a_pv "$disk" && return 1 ++ ++ label="$( get_disklabel_type "$disk" )" || return 1 ++ # We don't care about the SUSE-specific 'gpt_sync_mbr' partition scheme ++ # anymore: https://github.com/rear/rear/pull/3145#discussion_r1481388431 ++ if [ "$label" == gpt ] ; then ++ # GPT needs a special BIOS boot partition to store GRUB (BIOS version). ++ # Let's try to find it. It can be recognized as having the bios_grub flag. ++ disk_partitions=( $( get_child_components "$disk" "part" ) ) ++ for part in "${disk_partitions[@]}" ; do ++ flags=( $( get_partition_flags "$part" ) ) ++ IsInArray bios_grub "${flags[@]}" && return 0 # found! ++ done ++ # If a given GPT-partitioned disk does not contain a BIOS boot partition, ++ # GRUB for BIOS booting can not be installed into its MBR (grub-install errors out). ++ return 1 ++ else ++ # Other disklabel types don't need anything special to install GRUB. ++ # The test for the PReP boot partition (finalize/Linux-ppc64le/660_install_grub2.sh) ++ # is a bit similar, but operates on the partition itself, not on the uderlying disk. ++ return 0 ++ fi ++} ++ + # Create configuration grub + function create_grub2_cfg { + root_uuid=$(get_root_disk_UUID) +diff --git a/usr/share/rear/lib/checklayout-workflow.sh b/usr/share/rear/lib/checklayout-workflow.sh +index 94b70fc06..744ca0be1 100644 +--- a/usr/share/rear/lib/checklayout-workflow.sh ++++ b/usr/share/rear/lib/checklayout-workflow.sh +@@ -15,6 +15,10 @@ function WORKFLOW_checklayout () { + + SourceStage "layout/precompare" + ++ # layout code needs to know whether we are using UEFI (USING_UEFI_BOOTLOADER) ++ # as it also detects the bootloader in use ( layout/save/default/445_guess_bootloader.sh ) ++ Source $SHARE_DIR/prep/default/320_include_uefi_env.sh ++ + # In case of e.g. BACKUP_URL=file:///mybackup/ automatically exclude the matching component 'fs:/mybackup' + # otherwise 'rear checklayout' would always detect a changed layout with BACKUP_URL=file:///... + # because during 'rear mkrescue/mkbackup' such a component was automatically excluded this way +diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh +index 4f5b8f6f8..f5fc7538e 100644 +--- a/usr/share/rear/lib/layout-functions.sh ++++ b/usr/share/rear/lib/layout-functions.sh +@@ -520,6 +520,33 @@ get_component_type() { + grep -E "^[^ ]+ $1 " $LAYOUT_TODO | cut -d " " -f 3 + } + ++# Get the disklabel (partition table) type of the disk $1 from the layout file ++# (NOT from the actual disk, so layout file must exist before calling this, ++# and it is useful during recovery even before the disk layout has been recreated) ++function get_disklabel_type () { ++ # from create_disk() in layout/prepare/GNU/Linux/100_include_partition_code.sh ++ local component disk size label junk ++ ++ disk='' ++ ++ read component disk size label junk < <(grep "^disk $1 " "$LAYOUT_FILE") ++ test $disk || return 1 ++ ++ echo $label ++} ++ ++# Get partition flags from layout (space-separated) of partition given as $1 ++function get_partition_flags () { ++ local part disk size pstart name flags partition junk ++ ++ while read part disk size pstart name flags partition junk; do ++ if [ "$partition" == "$1" ] ; then ++ echo "$flags" | tr ',' ' ' ++ return 0 ++ fi ++ done < <(grep "^part " $LAYOUT_FILE) ++} ++ + # Function returns 0 when v1 is greater or equal than v2 + version_newer() { + local v1list=( ${1//[-.]/ } ) +@@ -794,17 +821,17 @@ blkid_label_of_device() { + echo "$label" + } + +-# Returns 1 if the device is an LVM physical volume +-# Returns 0 otherwise or if the device doesn't exists ++# Returns true if the device is an LVM physical volume ++# Returns false otherwise or if the device doesn't exists + is_disk_a_pv() { + disk=$1 + + # Using awk, select the 'lvmdev' line for which $disk is the device (column 3), + # cf. https://github.com/rear/rear/pull/1897 + # If exit == 1, then there is such line (so $disk is a PV), +- # otherwise exit with default value '0', which falls through to 'return 0' below. +- awk "\$1 == \"lvmdev\" && \$3 == \"${disk}\" { exit 1 }" "$LAYOUT_FILE" >/dev/null || return 1 +- return 0 ++ # otherwise exit with default value '0', which falls through to 'return 1' below. ++ awk "\$1 == \"lvmdev\" && \$3 == \"${disk}\" { exit 1 }" "$LAYOUT_FILE" >/dev/null || return 0 ++ return 1 + } + + function is_multipath_path { +diff --git a/usr/share/rear/lib/savelayout-workflow.sh b/usr/share/rear/lib/savelayout-workflow.sh +index 69cda58e9..27bb0a1ad 100644 +--- a/usr/share/rear/lib/savelayout-workflow.sh ++++ b/usr/share/rear/lib/savelayout-workflow.sh +@@ -10,6 +10,10 @@ if [[ "$VERBOSE" ]]; then + fi + WORKFLOWS+=( savelayout ) + WORKFLOW_savelayout () { ++ # layout code needs to know whether we are using UEFI (USING_UEFI_BOOTLOADER) ++ # as it also detects the bootloader in use ( layout/save/default/445_guess_bootloader.sh ) ++ Source $SHARE_DIR/prep/default/320_include_uefi_env.sh ++ + #DISKLAYOUT_FILE=$VAR_DIR/layout/disklayout.conf # defined in default.conf now (issue #678) + SourceStage "layout/save" + } +diff --git a/usr/share/rear/prep/GNU/Linux/300_include_grub_tools.sh b/usr/share/rear/prep/GNU/Linux/300_include_grub_tools.sh +index fcf0a5ff6..7d494281a 100644 +--- a/usr/share/rear/prep/GNU/Linux/300_include_grub_tools.sh ++++ b/usr/share/rear/prep/GNU/Linux/300_include_grub_tools.sh +@@ -1,8 +1,6 @@ + # + # GRUB2 has much more commands than the legacy grub command, including modules + +-test -d $VAR_DIR/recovery || mkdir -p $VAR_DIR/recovery +- + # cf. https://github.com/rear/rear/issues/2137 + # s390 zlinux does not use grub + # ********************************************************************************* +@@ -11,19 +9,8 @@ test -d $VAR_DIR/recovery || mkdir -p $VAR_DIR/recovery + # ********************************************************************************* + [ "$ARCH" == "Linux-s390" ] && return 0 + +-# Because usr/sbin/rear sets 'shopt -s nullglob' the 'echo -n' command +-# outputs nothing if nothing matches the bash globbing pattern '/boot/grub*' +-local grubdir="$( echo -n /boot/grub* )" +-# Use '/boot/grub' as fallback if nothing matches '/boot/grub*' +-test -d "$grubdir" || grubdir='/boot/grub' +- +-# Check if we're using grub or grub2 before doing something. +-if has_binary grub-probe ; then +- grub-probe -t device $grubdir >$VAR_DIR/recovery/bootdisk 2>/dev/null || return 0 +-elif has_binary grub2-probe ; then +- grub2-probe -t device $grubdir >$VAR_DIR/recovery/bootdisk 2>/dev/null || return 0 +-fi +- ++# It is safe to assume that we are using GRUB and try to add these files to the rescue image ++# even if the assumption is wrong. + # Missing programs in the PROGS array are ignored: + PROGS+=( grub-bios-setup grub2-bios-setup + grub-install grub2-install +diff --git a/usr/share/rear/prep/Linux-s390/305_include_s390_tools.sh b/usr/share/rear/prep/Linux-s390/305_include_s390_tools.sh +index 084ea3104..4451f53d0 100644 +--- a/usr/share/rear/prep/Linux-s390/305_include_s390_tools.sh ++++ b/usr/share/rear/prep/Linux-s390/305_include_s390_tools.sh +@@ -1,10 +1,13 @@ + # + # s390 zIPL boot loader and grubby for configuring boot loader` + +-test -d $VAR_DIR/recovery || mkdir -p $VAR_DIR/recovery +- +-local bootdir="$( echo -n /boot/ )" +-test -d "$bootdir" || $bootdir='/boot/' ++# See the code in prep/GNU/Linux/300_include_grub_tools.sh ++# that sets grubdir via ++# local grubdir="$( echo -n /boot/grub* )" ++# where 'shopt -s nullglob' results nothing when nothing matches ++# but that is not needed here to set a fixed bootdir="/boot" ++# cf. https://github.com/rear/rear/issues/1040#issuecomment-1034890880 ++local bootdir="/boot/" + + # cf. https://github.com/rear/rear/issues/2137 + # findmnt is used the same as grub-probe to find the device where /boot is mounted +@@ -16,7 +19,7 @@ test -d "$bootdir" || $bootdir='/boot/' + # findmnt returns --> /dev/dasda3[/@/.snapshots/1/snapshot] + # use 300_include_grub_tools.sh instead of this file (grub2-probe) + if has_binary findmnt ; then +- findmnt -no SOURCE --target $bootdir >$VAR_DIR/recovery/bootdisk || return 0 ++ findmnt -no SOURCE --target $bootdir > /dev/null || return 0 + fi + + # Missing programs in the PROGS array are ignored: +diff --git a/usr/share/rear/prep/default/320_include_uefi_env.sh b/usr/share/rear/prep/default/320_include_uefi_env.sh +index ea86af4ca..93e59eae5 100644 +--- a/usr/share/rear/prep/default/320_include_uefi_env.sh ++++ b/usr/share/rear/prep/default/320_include_uefi_env.sh +@@ -87,7 +87,3 @@ fi + DebugPrint "Found EFI system partition ${esp_proc_mounts_line[0]} on ${esp_proc_mounts_line[1]} type ${esp_proc_mounts_line[2]}" + USING_UEFI_BOOTLOADER=1 + LogPrint "Using UEFI Boot Loader for Linux (USING_UEFI_BOOTLOADER=1)" +- +-# Remember the ESP device node in VAR_DIR/recovery/bootdisk: +-echo "${esp_proc_mounts_line[0]}" >$VAR_DIR/recovery/bootdisk +- diff --git a/SOURCES/rear-save-lvm-poolmetadatasize-RHEL-6984.patch b/SOURCES/rear-save-lvm-poolmetadatasize-RHEL-6984.patch new file mode 100644 index 0000000..8754e6d --- /dev/null +++ b/SOURCES/rear-save-lvm-poolmetadatasize-RHEL-6984.patch @@ -0,0 +1,102 @@ +From e7b84271536782fbc8673ef4573e155e1dfa850e Mon Sep 17 00:00:00 2001 +From: pcahyna +Date: Wed, 1 Nov 2023 12:53:33 +0100 +Subject: [PATCH] Merge pull request #3061 from + pcahyna/save-lvm-poolmetadatasize + +Save LVM pool metadata volume size in disk layout +--- + .../layout/save/GNU/Linux/220_lvm_layout.sh | 39 ++++++++++++------- + 1 file changed, 24 insertions(+), 15 deletions(-) + +diff --git a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh +index f21845df9..42f0e4126 100644 +--- a/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh ++++ b/usr/share/rear/layout/save/GNU/Linux/220_lvm_layout.sh +@@ -18,7 +18,7 @@ local already_processed_lvs=() + local lv_layout_supported lvs_fields + local origin lv vg + local layout modules +-local thinpool chunksize stripes stripesize segmentsize ++local thinpool chunksize stripes stripesize segmentsize poolmetadatasize + local kval infokval + local lvs_exit_code + +@@ -130,7 +130,7 @@ local lvs_exit_code + echo "# Skipping PV $pdev that is not part of a valid VG (VG '$vgrp' empty or more than one word):" + contains_visible_char "$vgrp" || vgrp='' + echo "# lvmdev /dev/$vgrp $pdev $uuid $size" +- # Continue with the next line in the output of "lvm pvdisplay -c" ++ # Continue with the next line in the output of "lvm pvdisplay -C" + continue + fi + # With the above example the output is: +@@ -138,10 +138,10 @@ local lvs_exit_code + echo "lvmdev /dev/$vgrp $pdev $uuid $size" + + done +- # Check the exit code of "lvm pvdisplay -c" +- # in the "lvm pvdisplay -c | while read line ; do ... done" pipe: ++ # Check the exit code of "lvm pvdisplay -C" ++ # in the "lvm pvdisplay -C ... | while read line ; do ... done" pipe: + pvdisplay_exit_code=${PIPESTATUS[0]} +- test $pvdisplay_exit_code -eq 0 || Error "LVM command 'lvm pvdisplay -c' failed with exit code $pvdisplay_exit_code" ++ test $pvdisplay_exit_code -eq 0 || Error "LVM command 'lvm pvdisplay -C ... -o pv_name,vg_name,pv_size,pv_uuid' failed with exit code $pvdisplay_exit_code" + + # Get the volume group configuration: + # Format: lvmgrp [] [] +@@ -200,17 +200,17 @@ local lvs_exit_code + + # Specify the fields for the lvs command depending on whether or not the 'lv_layout' field is supported: + if is_true $lv_layout_supported ; then +- lvs_fields="origin,lv_name,vg_name,lv_size,lv_layout,pool_lv,chunk_size,stripes,stripe_size,seg_size" ++ lvs_fields="origin,lv_name,vg_name,lv_size,lv_layout,pool_lv,chunk_size,stripes,stripe_size,seg_size,lv_metadata_size" + else + # Use the 'modules' field as fallback replacement when the 'lv_layout' field is not supported: +- lvs_fields="origin,lv_name,vg_name,lv_size,modules,pool_lv,chunk_size,stripes,stripe_size,seg_size" ++ lvs_fields="origin,lv_name,vg_name,lv_size,modules,pool_lv,chunk_size,stripes,stripe_size,seg_size,lv_metadata_size" + fi + + # Example output of "lvs --separator=':' --noheadings --units b --nosuffix -o $lvs_fields" +- # with lvs_fields="origin,lv_name,vg_name,lv_size,lv_layout,pool_lv,chunk_size,stripes,stripe_size,seg_size" ++ # with lvs_fields="origin,lv_name,vg_name,lv_size,lv_layout,pool_lv,chunk_size,stripes,stripe_size,seg_size,lv_metadata_size" + # i.e. when the 'lv_layout' field is supported: +- # :root:system:19927138304:linear::0:1:0:19927138304 +- # :swap:system:1535115264:linear::0:1:0:1535115264 ++ # :root:system:19927138304:linear::0:1:0:19927138304: ++ # :swap:system:1535115264:linear::0:1:0:1535115264: + # There are two leading blanks in the output (at least on SLES12-SP4 with LVM 2.02.180). + lvm lvs --separator=':' --noheadings --units b --nosuffix -o $lvs_fields | while read line ; do + +@@ -261,14 +261,23 @@ local lvs_exit_code + # With the above example segmentsize=19927138304 and segmentsize=1535115264 + segmentsize="$( echo "$line" | awk -F ':' '{ print $10 }' )" + +- # TODO: Explain what that code is meant to do. +- # In particular a more explanatory variable name than 'kval' might help. +- # In 110_include_lvm_code.sh there is a comment what 'kval' means there +- # # kval: "key:value" pairs, separated by spaces +- # so probably 'kval' means the same here, but what is 'infokval'? ++ # With the above example poolmetadatasize="" ++ poolmetadatasize="$( echo "$line" | awk -F ':' '{ print $11 }' )" ++ ++ # kval is a string of space-separated key:value pairs. Key names are chosen to represent ++ # long options to lvcreate, and value will be the parameter for each long option. ++ # e.g. "chunksize:${chunksize}b" will eventually become a --chunksize=${chunksize}b ++ # argument to lvcreate. ++ # This way 110_include_lvm_code.sh which constructs the arguments to lvcreate ++ # can be kept generic and does not need to be updated every time an argument is added, ++ # as long as the argument can follow this generic scheme. ++ # infokval are key:value pairs that are not used when restoring the layout ++ # and are kept in disklayout.conf only as comments for information ++ # (because the setting is not easy or desirable to preserve). + kval="" + infokval="" + [ -z "$thinpool" ] || kval="${kval:+$kval }thinpool:$thinpool" ++ [ -z "$poolmetadatasize" ] || kval="${kval:+$kval }poolmetadatasize:${poolmetadatasize}b" + [ $chunksize -eq 0 ] || kval="${kval:+$kval }chunksize:${chunksize}b" + [ $stripesize -eq 0 ] || kval="${kval:+$kval }stripesize:${stripesize}b" + [ $segmentsize -eq $size ] || infokval="${infokval:+$infokval }segmentsize:${segmentsize}b" +-- +2.43.0 + diff --git a/SOURCES/rear-sfdc02772301.patch b/SOURCES/rear-sfdc02772301.patch new file mode 100644 index 0000000..74456dd --- /dev/null +++ b/SOURCES/rear-sfdc02772301.patch @@ -0,0 +1,38 @@ +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index 9ada92c3..455aa3ce 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -1813,7 +1813,7 @@ OBDR_BLOCKSIZE=2048 + # BACKUP=NBU stuff (Symantec/Veritas NetBackup) + ## + # +-COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt ) ++COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt /opt/VRTSpbx /etc/vx/VxICS /etc/vx/vrtslog.conf ) + COPY_AS_IS_EXCLUDE_NBU=( /usr/openv/netbackup/logs "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal ) + # See https://github.com/rear/rear/issues/2105 why /usr/openv/netbackup/sec/at/lib/ is needed: + NBU_LD_LIBRARY_PATH="/usr/openv/lib:/usr/openv/netbackup/sec/at/lib/" +diff --git a/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh b/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh +index cd48b8d9..ae5a3ccc 100644 +--- a/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh ++++ b/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh +@@ -7,6 +7,12 @@ + + [[ $NBU_version -lt 7 ]] && return # NBU is using xinetd when version <7.x + ++if [ -e "/etc/init.d/vxpbx_exchanged" ]; then ++ cp $v /etc/init.d/vxpbx_exchanged $ROOTFS_DIR/etc/scripts/system-setup.d/vxpbx_exchanged.real ++ chmod $v +x $ROOTFS_DIR/etc/scripts/system-setup.d/vxpbx_exchanged.real ++ echo "( /etc/scripts/system-setup.d/vxpbx_exchanged.real start )" > $ROOTFS_DIR/etc/scripts/system-setup.d/89-vxpbx_exchanged.sh ++fi ++ + if [ -e "/etc/init.d/netbackup" ]; then + cp $v /etc/init.d/netbackup $ROOTFS_DIR/etc/scripts/system-setup.d/netbackup.real + chmod $v +x $ROOTFS_DIR/etc/scripts/system-setup.d/netbackup.real +diff --git a/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore b/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore +new file mode 100644 +index 00000000..d6b7ef32 +--- /dev/null ++++ b/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore +@@ -0,0 +1,2 @@ ++* ++!.gitignore diff --git a/SOURCES/rear-skip-invalid-drives-RHEL-22863.patch b/SOURCES/rear-skip-invalid-drives-RHEL-22863.patch new file mode 100644 index 0000000..557e03a --- /dev/null +++ b/SOURCES/rear-skip-invalid-drives-RHEL-22863.patch @@ -0,0 +1,60 @@ +diff --git a/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh b/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh +index da6ce64c4..ab14ec83f 100644 +--- a/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh ++++ b/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh +@@ -395,6 +395,10 @@ Log "Saving disk partitions." + elif [[ ! ($blockd = *rpmb || $blockd = *[0-9]boot[0-9]) ]]; then # Silently skip Replay Protected Memory Blocks and others + devname=$(get_device_name $disk) + devsize=$(get_disk_size ${disk#/sys/block/}) ++ if ! validation_error=$(is_disk_valid $devname) ; then ++ LogPrintError "Ignoring $blockd: $validation_error" ++ continue ++ fi + disktype=$(parted -s $devname print | grep -E "Partition Table|Disk label" | cut -d ":" -f "2" | tr -d " ") + if [ "$disktype" != "dasd" ]; then + echo "# Disk $devname" +diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh +index f5fc7538e..90b16cb20 100644 +--- a/usr/share/rear/lib/layout-functions.sh ++++ b/usr/share/rear/lib/layout-functions.sh +@@ -834,6 +834,40 @@ is_disk_a_pv() { + return 1 + } + ++# Check whether disk is suitable for being added to layout ++# Can be used to skip obviously unsuitable/broken devices ++# (missing device node, zero size, device can't be opened). ++# Should not be used to skip potential mapping targets before layout restoration ++# - an invalid disk may become valid later, for example if it is a DASD that needs ++# low-level formatting (see 090_include_dasd_code.sh and 360_generate_dasd_format_code.sh), ++# unformatted DASDs show zero size. ++# Returns 0 if the device is ok ++# Returns nonzero code if it should be skipped, and a text describing the error ++# (without the device name) on stdout ++# usage example: ++# local err ++# if ! err=$(is_disk_valid /dev/sda); then ++ ++function is_disk_valid { ++ local disk="$1" ++ local size ++ ++ if ! test -b "$disk" ; then ++ echo "$disk is not a block device" ++ return 1 ++ fi ++ # capture stdout in a variable and redirect stderr to stdout - the error message ++ # will be our output ++ if { size=$(blockdev --getsize64 "$disk") ; } 2>&1 ; then ++ if ! test "$size" -gt 0 2>/dev/null ; then ++ echo "$disk has invalid size $size" ++ return 1 ++ fi ++ else ++ return 1 ++ fi ++} ++ + function is_multipath_path { + # Return 'false' if there is no device as argument: + test "$1" || return 1 diff --git a/SOURCES/rear-skip-useless-xfs-mount-options-RHEL-10478.patch b/SOURCES/rear-skip-useless-xfs-mount-options-RHEL-10478.patch new file mode 100644 index 0000000..2863131 --- /dev/null +++ b/SOURCES/rear-skip-useless-xfs-mount-options-RHEL-10478.patch @@ -0,0 +1,85 @@ +diff --git a/usr/share/rear/layout/prepare/GNU/Linux/133_include_mount_filesystem_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/133_include_mount_filesystem_code.sh +index d57077791..87ab5d691 100644 +--- a/usr/share/rear/layout/prepare/GNU/Linux/133_include_mount_filesystem_code.sh ++++ b/usr/share/rear/layout/prepare/GNU/Linux/133_include_mount_filesystem_code.sh +@@ -29,6 +29,7 @@ mount_fs() { + case $name in + (options) + # Do not mount nodev, as chrooting later on would fail: ++ # FIXME: naive approach, will replace any "nodev" inside longer options/values + value=${value//nodev/dev} + # btrfs mount options like subvolid=259 or subvol=/@/.snapshots/1/snapshot + # from the old system cannot work here for recovery because btrfs subvolumes +@@ -147,6 +148,27 @@ mount_fs() { + echo "mount $mountopts,remount,user_xattr $device $TARGET_FS_ROOT$mountpoint" + ) >> "$LAYOUT_CODE" + ;; ++ (xfs) ++ # remove logbsize=... mount option. It is a purely performance/memory usage optimization option, ++ # which can lead to mount failures, because it must be an integer multiple of the log stripe unit ++ # and the log stripe unit can be different in the recreated filesystem from the original filesystem ++ # (for example when using MKFS_XFS_OPTIONS, or in some exotic situations involving an old filesystem, ++ # see GitHub issue #2777 ). ++ # If logbsize is not an integer multiple of the log stripe unit, mount fails with the warning ++ # "XFS (...): logbuf size must be greater than or equal to log stripe size" ++ # in the kernel log ++ # (and a confusing error message ++ # "mount: ...: wrong fs type, bad option, bad superblock on ..., missing codepage or helper program, or other error." ++ # from the mount command), causing the layout restoration in the recovery process to fail. ++ # Wrong sunit/swidth can cause mount to fail as well, with this in the kernel log: ++ # "kernel: XFS (...): alignment check failed: sunit/swidth vs. agsize", ++ # so remove the sunit=.../swidth=... mount options as well. ++ mountopts="$( remove_mount_options_values "$mountopts" logbsize sunit swidth )" ++ ( ++ echo "mkdir -p $TARGET_FS_ROOT$mountpoint" ++ echo "mount $mountopts $device $TARGET_FS_ROOT$mountpoint" ++ ) >> "$LAYOUT_CODE" ++ ;; + (*) + ( + echo "mkdir -p $TARGET_FS_ROOT$mountpoint" +diff --git a/usr/share/rear/lib/filesystems-functions.sh b/usr/share/rear/lib/filesystems-functions.sh +index afdd3f24c..658d757f4 100644 +--- a/usr/share/rear/lib/filesystems-functions.sh ++++ b/usr/share/rear/lib/filesystems-functions.sh +@@ -239,3 +239,40 @@ function xfs_parse + # Output xfs options for further use + echo "$xfs_opts" + } ++ ++ ++# $1 is a mount command argument (string containing comma-separated ++# mount options). The remaining arguments to the function ($2 ... ) ++# specify the mount options to remove from $1, together with a trailing "=" ++# and any value that follows each option. ++# For example, the call ++# "remove_mount_options_values nodev,uid=1,rw,gid=1 uid gid" ++# returns "nodev,rw". ++# There is no support for removing a mount option without a value and "=", ++# so "remove_mount_options_values nodev,uid=1,rw,gid=1 rw" will not work. ++# The function will return the modified string on stdout. ++ ++function remove_mount_options_values () { ++ local str="$1" ++ ++ shift ++ # First add a comma at the end so that it is easier to remove a mount option at the end: ++ str="${str/%/,}" ++ for i in "$@" ; do ++ # FIXME this also removes trailing strings at the end of longer words ++ # For example if one wants to remove any id=... option, ++ # the function will also replace "uid=1" by "u" by removing ++ # the trailing "id=1", which is not intended. ++ # Not easy to fix because $str can contain prefixes which are not ++ # mount options but arguments to the mount command itself ++ # (in particluar, "-o "). ++ # FIXME this simple approach would fail in case of mount options ++ # containing commas, for example the "context" option values, ++ # see mount(8) ++ ++ # the extglob shell option is enabled in rear ++ str="${str//$i=*([^,]),/}" ++ done ++ # Remove all commas at the end: ++ echo "${str/%,/}" ++} diff --git a/SOURCES/rear-uefi-usb-secureboot-bz2196445.patch b/SOURCES/rear-uefi-usb-secureboot-bz2196445.patch new file mode 100644 index 0000000..2fc7035 --- /dev/null +++ b/SOURCES/rear-uefi-usb-secureboot-bz2196445.patch @@ -0,0 +1,82 @@ +commit 4af486794d45adbda7567361d8dcc658599dcd2c +Author: Johannes Meixner +Date: Tue Aug 8 14:44:16 2023 +0200 + + Merge pull request #3031 from rear/jsmeix-USB-Secure-Boot + + Secure Boot support for OUTPUT=USB: + In output/USB/Linux-i386/100_create_efiboot.sh + added SECURE_BOOT_BOOTLOADER related code that is based + on the code in output/ISO/Linux-i386/250_populate_efibootimg.sh + with some adaptions to make it work within the existing USB code. + The basic idea for Secure Boot booting of the ReaR recovery system + is to "just copy" the (signed) EFI binaries of the Linux distribution + (shim*.efi and grub*.efi as first and second stage UEFI bootloaders) + instead of let ReaR make its own EFI binary via build_bootx86_efi() + see https://github.com/rear/rear/pull/3031 + +diff --git a/usr/share/rear/output/USB/Linux-i386/100_create_efiboot.sh b/usr/share/rear/output/USB/Linux-i386/100_create_efiboot.sh +index f4659306..fd631c44 100644 +--- a/usr/share/rear/output/USB/Linux-i386/100_create_efiboot.sh ++++ b/usr/share/rear/output/USB/Linux-i386/100_create_efiboot.sh +@@ -29,6 +29,44 @@ mount $EFI_PART $EFI_MPT || Error "Failed to mount EFI partition '$EFI_PART' at + mkdir -p $EFI_DST || Error "Failed to create directory '$EFI_DST'" + + # Copy boot loader ++# The SECURE_BOOT_BOOTLOADER related code below is based on the code in output/ISO/Linux-i386/250_populate_efibootimg.sh ++# because I noticed that Secure Boot works with ISO at least for me, cf. ++# https://github.com/rear/rear/pull/3025#issuecomment-1635876186 ++# but not with USB, cf. ++# https://github.com/rear/rear/pull/3025#issuecomment-1643774477 ++# so I tried to re-use the ISO Secure Boot code for USB ++# which made Secure Boot "just work" for me with USB ++# but I had to do some (minor) adaptions to make it work ++# within the existing USB code, cf. ++# https://github.com/rear/rear/pull/3031#issuecomment-1653443454 ++# Copy UEFI bootloader: ++if test -f "$SECURE_BOOT_BOOTLOADER" ; then ++ # For a technical description of Shim see https://mjg59.dreamwidth.org/19448.html ++ # Shim is a signed EFI binary that is a first stage bootloader ++ # that loads and executes another (signed) EFI binary ++ # which normally is a second stage bootloader ++ # which normally is a GRUB EFI binary ++ # which normally is available as a file named grub*.efi ++ # so when SECURE_BOOT_BOOTLOADER is used as UEFI_BOOTLOADER ++ # (cf. rescue/default/850_save_sysfs_uefi_vars.sh) ++ # then Shim (usually shim.efi) must be copied as EFI/BOOT/BOOTX64.efi ++ # and Shim's second stage bootloader must be also copied where Shim already is. ++ DebugPrint "Using '$SECURE_BOOT_BOOTLOADER' as first stage Secure Boot bootloader BOOTX64.efi" ++ cp -L $v "$SECURE_BOOT_BOOTLOADER" "$EFI_DST/BOOTX64.efi" || Error "Failed to copy SECURE_BOOT_BOOTLOADER '$SECURE_BOOT_BOOTLOADER' to $EFI_DST/BOOTX64.efi" ++ # When Shim is used, its second stage bootloader can be actually anything ++ # named grub*.efi (second stage bootloader is Shim compile time option), see ++ # http://www.rodsbooks.com/efi-bootloaders/secureboot.html#initial_shim ++ local uefi_bootloader_dirname="$( dirname $SECURE_BOOT_BOOTLOADER )" ++ local second_stage_UEFI_bootloader_files="$( echo $uefi_bootloader_dirname/grub*.efi )" ++ # Avoid 'nullglob' pitfall when nothing matches .../grub*.efi which would result ++ # an invalid "cp -v /var/tmp/.../EFI/BOOT/" command that fails ++ # cf. https://github.com/rear/rear/issues/1921 ++ test "$second_stage_UEFI_bootloader_files" || Error "Could not find second stage Secure Boot bootloader $uefi_bootloader_dirname/grub*.efi" ++ DebugPrint "Using second stage Secure Boot bootloader files: $second_stage_UEFI_bootloader_files" ++ cp -L $v $second_stage_UEFI_bootloader_files $EFI_DST/ || Error "Failed to copy second stage Secure Boot bootloader files" ++else ++ cp -L $v "$UEFI_BOOTLOADER" "$EFI_DST/BOOTX64.efi" || Error "Failed to copy UEFI_BOOTLOADER '$UEFI_BOOTLOADER' to $EFI_DST/BOOTX64.efi" ++fi + cp $v $UEFI_BOOTLOADER "$EFI_DST/BOOTX64.efi" || Error "Failed to copy UEFI_BOOTLOADER '$UEFI_BOOTLOADER' to $EFI_DST/BOOTX64.efi" + + # Copy kernel +@@ -93,7 +131,14 @@ EOF + create_grub2_cfg ${EFI_DIR}/kernel ${EFI_DIR}/$REAR_INITRD_FILENAME > ${EFI_DST}/grub.cfg + + # Create bootloader, this overwrite BOOTX64.efi copied in previous step ... +- build_bootx86_efi ${EFI_DST}/BOOTX64.efi ${EFI_DST}/grub.cfg "/boot" "$UEFI_BOOTLOADER" ++ # Create BOOTX86.efi but only if we are NOT secure booting. ++ # We are not able to create signed boot loader ++ # so we need to reuse existing one. ++ # See issue #1374 ++ # build_bootx86_efi () can be safely used for other scenarios. ++ if ! test -f "$SECURE_BOOT_BOOTLOADER" ; then ++ build_bootx86_efi ${EFI_DST}/BOOTX64.efi ${EFI_DST}/grub.cfg "/boot" "$UEFI_BOOTLOADER" ++ fi + ;; + *) + BugError "Neither grub 0.97 nor 2.0" diff --git a/SOURCES/rear-usb-uefi-part-size-bz2228402.patch b/SOURCES/rear-usb-uefi-part-size-bz2228402.patch new file mode 100644 index 0000000..68cf13e --- /dev/null +++ b/SOURCES/rear-usb-uefi-part-size-bz2228402.patch @@ -0,0 +1,41 @@ +commit 1cd41052f7a7cd42ea14ea53b7280c73624aba3f +Author: Johannes Meixner +Date: Mon Mar 21 12:14:21 2022 +0100 + + Merge pull request #2774 from rear/jsmeix-1024-USB_UEFI_PART_SIZE + + In default.conf increase USB_UEFI_PART_SIZE to 1024 MiB, + cf. https://github.com/rear/rear/pull/1205 + in particular to also make things work by default when additional + third-party kernel modules and firmware (e.g. from Nvidia) are used, + cf. https://github.com/rear/rear/issues/2770#issuecomment-1068935688 + +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index 8faa56aa..17a764cb 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -872,13 +872,20 @@ USB_PARTITION_ALIGN_BLOCK_SIZE="8" + # in MiB when formatting a medium by the format workflow. + # If USB_UEFI_PART_SIZE is empty or invalid (i.e. not an unsigned integer larger than 0) + # the user must interactively enter a valid value while running the format workflow. +-# The default value of 400 MiB should be sufficiently big and it is in compliance +-# with the 8 MiB partition alignment default value ( 400 = 8 * 50 ) +-# and even with a 16 MiB partition alignment value ( 400 = 16 * 25 ) ++# The default value of 1024 MiB should be sufficiently big + # cf. https://github.com/rear/rear/pull/1205 ++# in particular when third-party kernel modules and firmware (e.g. from Nvidia) are used ++# cf. https://github.com/rear/rear/issues/2770#issuecomment-1068935688 ++# and 1024 MiB is in compliance with the 8 MiB partition alignment value ( 1024 = 8 * 128 ) ++# and also with higher 2^n MiB partition alignment values. ++# Furthermore the default value of 1024 MiB results that the FAT filesystem of the ESP ++# will be in compliance with that the ESP should officially use a FAT32 filesystem ++# because mkfs.vfat automatically makes a FAT32 filesystem starting at 512 MiB ++# (a FAT16 ESP works in most cases but causes issues with certain UEFI firmware) ++# cf. https://github.com/rear/rear/issues/2575 + # The value of USB_UEFI_PART_SIZE will be rounded to the nearest + # USB_PARTITION_ALIGN_BLOCK_SIZE chunk: +-USB_UEFI_PART_SIZE="400" ++USB_UEFI_PART_SIZE="1024" + # + # Default boot option (i.e. what gets booted automatically after some timeout) + # when EXTLINUX boots the USB stick or USB disk or other disk device on BIOS systems. diff --git a/SOURCES/rear-vg-command-not-found-bz2121476.patch b/SOURCES/rear-vg-command-not-found-bz2121476.patch new file mode 100644 index 0000000..6d6ab1d --- /dev/null +++ b/SOURCES/rear-vg-command-not-found-bz2121476.patch @@ -0,0 +1,21 @@ +commit ead05a460d3b219372f47be888ba6011c7fd3318 +Author: Pavel Cahyna +Date: Tue Aug 22 12:32:04 2023 +0200 + + Fix downstream only bug + + \$IsInArray -> IsInArray - it is a shell function, not a variable. + +diff --git a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh +index d34ab335..a65a9c8e 100644 +--- a/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh ++++ b/usr/share/rear/layout/prepare/GNU/Linux/110_include_lvm_code.sh +@@ -246,7 +246,7 @@ create_lvmvol() { + local warnraidline + + if [ $is_thin -eq 0 ] ; then +- ifline="if IsInArray $vg \"\${create_logical_volumes[@]}\" && ! \$IsInArray $vg \"\${create_thin_volumes_only[@]}\" ; then" ++ ifline="if IsInArray $vg \"\${create_logical_volumes[@]}\" && ! IsInArray $vg \"\${create_thin_volumes_only[@]}\" ; then" + else + ifline="if IsInArray $vg \"\${create_logical_volumes[@]}\" ; then" + fi diff --git a/SOURCES/s390-no-clobber-disks.patch b/SOURCES/s390-no-clobber-disks.patch new file mode 100644 index 0000000..8d2f81a --- /dev/null +++ b/SOURCES/s390-no-clobber-disks.patch @@ -0,0 +1,751 @@ +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index 23a83b71..0d13b487 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -416,6 +416,18 @@ test "$RECOVERY_UPDATE_URL" || RECOVERY_UPDATE_URL="" + # export MIGRATION_MODE='true' + # directly before he calls "rear recover": + test "$MIGRATION_MODE" || MIGRATION_MODE='' ++#### ++ ++#### ++# Formatting DASDs (S/390 specific) ++# DASD (Direct Access Storage Device) denotes a disk drive on the S/390 architecture. ++# DASDs need to be formatted before use (even before creating a partition table on them). ++# By default ReaR will format the DASDs that are going to be used to recreate the system ++# (are referenced in disklayout.conf) before recreating the disk layout. ++# This can be suppressed by setting FORMAT_DASDS="false". It can be useful when one intends ++# to use already formatted DASDs as recovery target. ++FORMAT_DASDS="" ++#### + + ## + # Resizing partitions in MIGRATION_MODE during "rear recover" +diff --git a/usr/share/rear/layout/prep-for-mount/Linux-s390/205_s390_enable_disk.sh b/usr/share/rear/layout/prep-for-mount/Linux-s390/205_s390_enable_disk.sh +new file mode 120000 +index 00000000..5f7a2ac0 +--- /dev/null ++++ b/usr/share/rear/layout/prep-for-mount/Linux-s390/205_s390_enable_disk.sh +@@ -0,0 +1 @@ ++../../prepare/Linux-s390/205_s390_enable_disk.sh +\ No newline at end of file +diff --git a/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh b/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh +index 13c69ce8..2a2bc33f 100644 +--- a/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh ++++ b/usr/share/rear/layout/prepare/GNU/Linux/100_include_partition_code.sh +@@ -24,6 +24,7 @@ fi + ### Prepare a disk for partitioning/general usage. + create_disk() { + local component disk size label junk ++ local blocksize layout dasdtype dasdcyls junk2 + read component disk size label junk < <(grep "^disk $1 " "$LAYOUT_FILE") + + ### Disks should be block devices. +@@ -67,7 +68,8 @@ sync + + EOF + +- create_partitions "$disk" "$label" ++ # $junk can contain useful DASD-specific fields ++ create_partitions "$disk" "$label" "$junk" + + cat >> "$LAYOUT_CODE" <> "$LAYOUT_CODE" <"$DASD_FORMAT_CODE" ++ ++# Show the current output of lsdasd, it can be useful for identifying disks ++# (in particular it shows the Linux device name <-> virtual device number mapping, ++# formatted / unformatted status and the number/size of blocks when formatted ) ++echo "# Current output of 'lsdasd':" >>"$DASD_FORMAT_CODE" ++lsdasd | sed -e 's/^/# /' >>"$DASD_FORMAT_CODE" ++ ++cat <>"$DASD_FORMAT_CODE" ++ ++LogPrint "Start DASD format restoration." ++ ++set -e ++set -x ++ ++EOF ++ ++while read component disk size label junk; do ++ if [ "$label" == dasd ]; then ++ # Ignore excluded components. ++ # Normally they are removed in 520_exclude_components.sh, ++ # but we run before it, so we must skip them here as well. ++ if IsInArray "$disk" "${EXCLUDE_RECREATE[@]}" ; then ++ Log "Excluding $disk from DASD reformatting." ++ continue ++ fi ++ # dasd has more fields - junk is not junk anymore ++ read blocksize layout dasdtype dasdcyls junk2 <<<$junk ++ dasd_format_code "$disk" "$size" "$blocksize" "$layout" "$dasdtype" "$dasdcyls" >> "$DASD_FORMAT_CODE" || \ ++ LogPrintError "Error producing DASD format code for $disk" ++ fi ++done < <(grep "^disk " "$LAYOUT_FILE") ++ ++cat <>"$DASD_FORMAT_CODE" ++ ++set +x ++set +e ++ ++LogPrint "DASD(s) formatted." ++ ++EOF +diff --git a/usr/share/rear/layout/prepare/Linux-s390/370_confirm_dasd_format_code.sh b/usr/share/rear/layout/prepare/Linux-s390/370_confirm_dasd_format_code.sh +new file mode 100644 +index 00000000..5ba4edd5 +--- /dev/null ++++ b/usr/share/rear/layout/prepare/Linux-s390/370_confirm_dasd_format_code.sh +@@ -0,0 +1,69 @@ ++# adapted from 100_confirm_layout_code.sh ++# ++# Let the user confirm the ++# DASD format code (dasdformat.sh) script. ++# ++ ++is_false "$FORMAT_DASDS" && return 0 ++ ++# Show the user confirmation dialog in any case but when not in migration mode ++# automatically proceed with less timeout USER_INPUT_INTERRUPT_TIMEOUT (by default 10 seconds) ++# to avoid longer delays (USER_INPUT_TIMEOUT is by default 300 seconds) in case of unattended recovery: ++# (taken from 120_confirm_wipedisk_disks.sh) ++local timeout="$USER_INPUT_TIMEOUT" ++is_true "$MIGRATION_MODE" || timeout="$USER_INPUT_INTERRUPT_TIMEOUT" ++ ++rear_workflow="rear $WORKFLOW" ++original_disk_space_usage_file="$VAR_DIR/layout/config/df.txt" ++rear_shell_history="$( echo -e "cd $VAR_DIR/layout/\nvi $DASD_FORMAT_CODE\nless $DASD_FORMAT_CODE" )" ++unset choices ++choices[0]="Confirm DASD format script and continue '$rear_workflow'" ++choices[1]="Edit DASD format script ($DASD_FORMAT_CODE)" ++choices[2]="View DASD format script ($DASD_FORMAT_CODE)" ++choices[3]="View original disk space usage ($original_disk_space_usage_file)" ++choices[4]="Confirm what is currently on the DASDs, skip formatting them and continue '$rear_workflow'" ++choices[5]="Use Relax-and-Recover shell and return back to here" ++choices[6]="Abort '$rear_workflow'" ++prompt="Confirm or edit the DASD format script" ++choice="" ++wilful_input="" ++# When USER_INPUT_DASD_FORMAT_CODE_CONFIRMATION has any 'true' value be liberal in what you accept and ++# assume choices[0] 'Confirm DASD format' was actually meant: ++is_true "$USER_INPUT_DASD_FORMAT_CODE_CONFIRMATION" && USER_INPUT_DASD_FORMAT_CODE_CONFIRMATION="${choices[0]}" ++while true ; do ++ choice="$( UserInput -I DASD_FORMAT_CODE_CONFIRMATION -t "$timeout" -p "$prompt" -D "${choices[0]}" "${choices[@]}" )" && wilful_input="yes" || wilful_input="no" ++ case "$choice" in ++ (${choices[0]}) ++ # Confirm DASD format file and continue: ++ is_true "$wilful_input" && LogPrint "User confirmed DASD format script" || LogPrint "Continuing '$rear_workflow' by default" ++ break ++ ;; ++ (${choices[1]}) ++ # Run 'vi' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user: ++ vi $DASD_FORMAT_CODE 0<&6 1>&7 2>&8 ++ ;; ++ (${choices[2]}) ++ # Run 'less' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user: ++ less $DASD_FORMAT_CODE 0<&6 1>&7 2>&8 ++ ;; ++ (${choices[3]}) ++ # Run 'less' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user: ++ less $original_disk_space_usage_file 0<&6 1>&7 2>&8 ++ ;; ++ (${choices[4]}) ++ # Confirm what is on the disks and continue without formatting ++ FORMAT_DASDS="false" ++ ;; ++ (${choices[5]}) ++ # rear_shell runs 'bash' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user: ++ rear_shell "" "$rear_shell_history" ++ ;; ++ (${choices[6]}) ++ abort_dasd_format ++ Error "User chose to abort '$rear_workflow' in ${BASH_SOURCE[0]}" ++ ;; ++ esac ++done ++ ++chmod +x $DASD_FORMAT_CODE ++ +diff --git a/usr/share/rear/layout/prepare/Linux-s390/400_run_dasd_format_code.sh b/usr/share/rear/layout/prepare/Linux-s390/400_run_dasd_format_code.sh +new file mode 100644 +index 00000000..16451af6 +--- /dev/null ++++ b/usr/share/rear/layout/prepare/Linux-s390/400_run_dasd_format_code.sh +@@ -0,0 +1,185 @@ ++# adapted from 200_run_layout_code.sh ++# ++# Run the DASD format code (dasdformat.sh) ++# again and again until it succeeds or the user aborts. ++# ++ ++# Skip DASD formatting when the user has explicitly specified to not format them ++# or when the user selected "Confirm what is currently on the DASDs, skip formatting them" ++# in 370_confirm_dasd_format_code.sh ++ ++is_false "$FORMAT_DASDS" && return 0 ++ ++function lsdasd_output () { ++ lsdasd 1>> >( tee -a "$RUNTIME_LOGFILE" 1>&7 ) ++} ++ ++rear_workflow="rear $WORKFLOW" ++original_disk_space_usage_file="$VAR_DIR/layout/config/df.txt" ++rear_shell_history="$( echo -e "cd $VAR_DIR/layout/\nvi $DASD_FORMAT_CODE\nless $RUNTIME_LOGFILE" )" ++wilful_input="" ++ ++unset choices ++choices[0]="Rerun DASD format script ($DASD_FORMAT_CODE)" ++choices[1]="View '$rear_workflow' log file ($RUNTIME_LOGFILE)" ++choices[2]="Edit DASD format script ($DASD_FORMAT_CODE)" ++choices[3]="Show what is currently on the disks ('lsdasd' device list)" ++choices[4]="View original disk space usage ($original_disk_space_usage_file)" ++choices[5]="Use Relax-and-Recover shell and return back to here" ++choices[6]="Confirm what is currently on the disks and continue '$rear_workflow'" ++choices[7]="Abort '$rear_workflow'" ++prompt="DASD format choices" ++ ++choice="" ++# When USER_INPUT_DASD_FORMAT_CODE_RUN has any 'true' value be liberal in what you accept and ++# assume choices[0] 'Rerun DASD format script' was actually meant ++# regardless that this likely lets 'rear recover' run an endless loop ++# of failed DASD format attempts but ReaR must obey what the user specified ++# (perhaps it is intended to let 'rear recover' loop here until an admin intervenes): ++is_true "$USER_INPUT_DASD_FORMAT_CODE_RUN" && USER_INPUT_DASD_FORMAT_CODE_RUN="${choices[0]}" ++ ++unset confirm_choices ++confirm_choices[0]="Confirm recreated DASD format and continue '$rear_workflow'" ++confirm_choices[1]="Go back one step to redo DASD format" ++confirm_choices[2]="Use Relax-and-Recover shell and return back to here" ++confirm_choices[3]="Abort '$rear_workflow'" ++confirm_prompt="Confirm the recreated DASD format or go back one step" ++confirm_choice="" ++# When USER_INPUT_DASD_FORMAT_MIGRATED_CONFIRMATION has any 'true' value be liberal in what you accept and ++# assume confirm_choices[0] 'Confirm recreated DASD format and continue' was actually meant: ++is_true "$USER_INPUT_DASD_FORMAT_MIGRATED_CONFIRMATION" && USER_INPUT_DASD_FORMAT_MIGRATED_CONFIRMATION="${confirm_choices[0]}" ++ ++# Run the DASD format code (dasdformat.sh) ++# again and again until it succeeds or the user aborts ++# or the user confirms to continue with what is currently on the disks ++# (the user may have setup manually what he needs via the Relax-and-Recover shell): ++while true ; do ++ prompt="The DASD format had failed" ++ # After switching to recreating with DASD format script ++ # change choices[0] from "Run ..." to "Rerun ...": ++ choices[0]="Rerun DASD format script ($DASD_FORMAT_CODE)" ++ # Run DASD_FORMAT_CODE in a sub-shell because it sets 'set -e' ++ # so that it exits the running shell in case of an error ++ # but that exit must not exit this running bash here: ++ ( source $DASD_FORMAT_CODE ) ++ # One must explicitly test whether or not $? is zero in a separated bash command ++ # because with bash 3.x and bash 4.x code like ++ # # ( set -e ; cat qqq ; echo "hello" ) && echo ok || echo failed ++ # cat: qqq: No such file or directory ++ # hello ++ # ok ++ # does not work as one may expect (cf. what "man bash" describes for 'set -e'). ++ # There is a subtle behavioural difference between bash 3.x and bash 4.x ++ # when a script that has 'set -e' set gets sourced: ++ # With bash 3.x the 'set -e' inside the sourced script is effective: ++ # # echo 'set -e ; cat qqq ; echo hello' >script.sh ++ # # ( source script.sh ) && echo ok || echo failed ++ # cat: qqq: No such file or directory ++ # failed ++ # With bash 4.x the 'set -e' inside the sourced script gets noneffective: ++ # # echo 'set -e ; cat qqq ; echo hello' >script.sh ++ # # ( source script.sh ) && echo ok || echo failed ++ # cat: qqq: No such file or directory ++ # hello ++ # ok ++ # With bash 3.x and bash 4.x testing $? in a separated bash command ++ # keeps the 'set -e' inside the sourced script effective: ++ # # echo 'set -e ; cat qqq ; echo hello' >script.sh ++ # # ( source script.sh ) ; (( $? == 0 )) && echo ok || echo failed ++ # cat: qqq: No such file or directory ++ # failed ++ # See also https://github.com/rear/rear/pull/1573#issuecomment-344303590 ++ if (( $? == 0 )) ; then ++ prompt="DASD format had been successful" ++ # When DASD_FORMAT_CODE succeeded and when not in migration mode ++ # break the outer while loop and continue the "rear recover" workflow ++ # which means continue with restoring the backup: ++ is_true "$MIGRATION_MODE" || break ++ # When DASD_FORMAT_CODE succeeded in migration mode ++ # let the user explicitly confirm the recreated (and usually migrated) format ++ # before continuing the "rear recover" workflow with restoring the backup. ++ # Show the recreated DASD format to the user on his terminal (and also in the log file): ++ LogPrint "Recreated DASD format:" ++ lsdasd_output ++ # Run an inner while loop with a user dialog so that the user can inspect the recreated DASD format ++ # and perhaps even manually fix the recreated DASD format if it is not what the user wants ++ # (e.g. by using the Relax-and-Recover shell and returning back to this user dialog): ++ while true ; do ++ confirm_choice="$( UserInput -I DASD_FORMAT_MIGRATED_CONFIRMATION -p "$confirm_prompt" -D "${confirm_choices[0]}" "${confirm_choices[@]}" )" && wilful_input="yes" || wilful_input="no" ++ case "$confirm_choice" in ++ (${confirm_choices[0]}) ++ # Confirm recreated DASD format and continue: ++ is_true "$wilful_input" && LogPrint "User confirmed recreated DASD format" || LogPrint "Continuing with recreated DASD format by default" ++ # Break the outer while loop and continue with restoring the backup: ++ break 2 ++ ;; ++ (${confirm_choices[1]}) ++ # Go back one step to redo DASD format: ++ # Only break the inner while loop (i.e. this user dialog loop) ++ # and continue with the next user dialog below: ++ break ++ ;; ++ (${confirm_choices[2]}) ++ # rear_shell runs 'bash' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user: ++ rear_shell "" "$rear_shell_history" ++ ;; ++ (${confirm_choices[3]}) ++ abort_dasd_format ++ Error "User did not confirm the recreated DASD format but aborted '$rear_workflow' in ${BASH_SOURCE[0]}" ++ ;; ++ esac ++ done ++ fi ++ # Run an inner while loop with a user dialog so that the user can fix things ++ # when DASD_FORMAT_CODE failed. ++ # Such a fix does not necessarily mean the user must change ++ # the dasdformat.sh script when DASD_FORMAT_CODE failed. ++ # The user might also fix things by only using the Relax-and-Recover shell and ++ # then confirm what is on the disks and continue with restoring the backup ++ # or abort this "rear recover" run to re-try from scratch. ++ while true ; do ++ choice="$( UserInput -I DASD_FORMAT_CODE_RUN -p "$prompt" -D "${choices[0]}" "${choices[@]}" )" && wilful_input="yes" || wilful_input="no" ++ case "$choice" in ++ (${choices[0]}) ++ # Rerun or run (after switching to recreating with DASD format script) DASD format script: ++ is_true "$wilful_input" && LogPrint "User runs DASD format script" || LogPrint "Running DASD format script by default" ++ # Only break the inner while loop (i.e. the user dialog loop): ++ break ++ ;; ++ (${choices[1]}) ++ # Run 'less' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user: ++ less $RUNTIME_LOGFILE 0<&6 1>&7 2>&8 ++ ;; ++ (${choices[2]}) ++ # Run 'vi' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user: ++ vi $DASD_FORMAT_CODE 0<&6 1>&7 2>&8 ++ ;; ++ (${choices[3]}) ++ LogPrint "This is the current list of DASDs:" ++ lsdasd_output ++ ;; ++ (${choices[4]}) ++ # Run 'less' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user: ++ less $original_disk_space_usage_file 0<&6 1>&7 2>&8 ++ ;; ++ (${choices[5]}) ++ # rear_shell runs 'bash' with the original STDIN STDOUT and STDERR when 'rear' was launched by the user: ++ rear_shell "" "$rear_shell_history" ++ ;; ++ (${choices[6]}) ++ # Confirm what is on the disks and continue: ++ # Break the outer while loop and continue with restoring the backup: ++ break 2 ++ ;; ++ (${choices[7]}) ++ abort_dasd_format ++ Error "User chose to abort '$rear_workflow' in ${BASH_SOURCE[0]}" ++ ;; ++ esac ++ done ++# End of the outer while loop: ++done ++ ++# Local functions must be 'unset' because bash does not support 'local function ...' ++# cf. https://unix.stackexchange.com/questions/104755/how-can-i-create-a-local-function-in-my-bashrc ++unset -f lsdasd_output +diff --git a/usr/share/rear/layout/prepare/default/010_prepare_files.sh b/usr/share/rear/layout/prepare/default/010_prepare_files.sh +index 7a980e63..4191be33 100644 +--- a/usr/share/rear/layout/prepare/default/010_prepare_files.sh ++++ b/usr/share/rear/layout/prepare/default/010_prepare_files.sh +@@ -7,6 +7,8 @@ LAYOUT_CODE="$VAR_DIR/layout/diskrestore.sh" + LAYOUT_XFS_OPT_DIR="$VAR_DIR/layout/xfs" + LAYOUT_XFS_OPT_DIR_RESTORE="$LAYOUT_XFS_OPT_DIR/restore" + ++DASD_FORMAT_CODE="$VAR_DIR/layout/dasdformat.sh" ++ + FS_UUID_MAP="$VAR_DIR/layout/fs_uuid_mapping" + LUN_WWID_MAP="$VAR_DIR/layout/lun_wwid_mapping" + +diff --git a/usr/share/rear/layout/prepare/default/250_compare_disks.sh b/usr/share/rear/layout/prepare/default/250_compare_disks.sh +index c459b928..751433ba 100644 +--- a/usr/share/rear/layout/prepare/default/250_compare_disks.sh ++++ b/usr/share/rear/layout/prepare/default/250_compare_disks.sh +@@ -54,7 +54,9 @@ local more_than_one_same_orig_size='' + # Cf. the "Compare disks one by one" code below: + while read disk dev size junk ; do + if IsInArray "$size" "${original_system_used_disk_sizes[@]}" ; then +- more_than_one_same_orig_size='true' ++ if ! has_mapping_hint "$dev" ; then ++ more_than_one_same_orig_size='true' ++ fi + else + original_system_used_disk_sizes+=( "$size" ) + fi +@@ -109,14 +111,17 @@ fi + # No further disk comparisons are needed when MIGRATION_MODE is already set true above: + if ! is_true "$MIGRATION_MODE" ; then + # Compare original disks and their possible target disk one by one: +- while read disk dev size junk ; do +- dev=$( get_sysfs_name $dev ) ++ while read disk devnode size junk ; do ++ dev=$( get_sysfs_name $devnode ) + Log "Comparing $dev" + if test -e "/sys/block/$dev" ; then + Log "Device /sys/block/$dev exists" + newsize=$( get_disk_size $dev ) + if test "$newsize" -eq "$size" ; then + LogPrint "Device $dev has expected (same) size $size bytes (will be used for '$WORKFLOW')" ++ elif test "$( get_mapping_hint $devnode )" == "$devnode" ; then ++ Debug "Found identical mapping hint ${devnode} -> ${devnode}" ++ LogPrint "Device $dev found according to mapping hints (will be used for '$WORKFLOW')" + else + LogPrint "Device $dev has size $newsize bytes but $size bytes is expected (needs manual configuration)" + MIGRATION_MODE='true' +diff --git a/usr/share/rear/layout/prepare/default/300_map_disks.sh b/usr/share/rear/layout/prepare/default/300_map_disks.sh +index 2e90768c..468aa35c 100644 +--- a/usr/share/rear/layout/prepare/default/300_map_disks.sh ++++ b/usr/share/rear/layout/prepare/default/300_map_disks.sh +@@ -112,7 +112,14 @@ while read keyword orig_device orig_size junk ; do + # Continue with next original device when it is already used as source in the mapping file: + is_mapping_source "$orig_device" && continue + # First, try to find if there is a current disk with same name and same size as the original: +- sysfs_device_name="$( get_sysfs_name "$orig_device" )" ++ # (possibly influenced by mapping hints if known) ++ if has_mapping_hint "$orig_device" ; then ++ candidate_target_device_name="$( get_mapping_hint "$orig_device" )" ++ Debug "Using mapping hint ${candidate_target_device_name} as candidate for $orig_device mapping" ++ else ++ candidate_target_device_name="$orig_device" ++ fi ++ sysfs_device_name="$( get_sysfs_name "$candidate_target_device_name" )" + current_device="/sys/block/$sysfs_device_name" + if test -e $current_device ; then + current_size=$( get_disk_size $sysfs_device_name ) +@@ -122,11 +129,16 @@ while read keyword orig_device orig_size junk ; do + # Continue with next one if the current one is already used as target in the mapping file: + is_mapping_target "$preferred_target_device_name" && continue + # Use the current one if it is of same size as the old one: +- if test "$orig_size" -eq "$current_size" ; then ++ if has_mapping_hint "$orig_device" || test "$orig_size" -eq "$current_size" ; then + # Ensure the determined target device is really a block device: + if test -b "$preferred_target_device_name" ; then ++ if has_mapping_hint "$orig_device" ; then ++ mapping_reason="determined by mapping hint" ++ else ++ mapping_reason="same name and same size $current_size" ++ fi + add_mapping "$orig_device" "$preferred_target_device_name" +- LogPrint "Using $preferred_target_device_name (same name and same size) for recreating $orig_device" ++ LogPrint "Using $preferred_target_device_name ($mapping_reason) for recreating $orig_device" + # Continue with next original device in the LAYOUT_FILE: + continue + fi +diff --git a/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh b/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh +index 3ab7357d..da6ce64c 100644 +--- a/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh ++++ b/usr/share/rear/layout/save/GNU/Linux/200_partition_layout.sh +@@ -362,18 +362,27 @@ Log "Saving disk partitions." + + if [[ $blockd == dasd* && "$ARCH" == "Linux-s390" ]] ; then + devname=$(get_device_name $disk) ++ dasdnum=$( lsdasd | awk "\$3 == \"$blockd\" { print \$1}" ) ++ dasdstatus=$( lsdasd | awk "\$3 == \"$blockd\" { print \$2}" ) ++ # ECKD or FBA ++ dasdtype=$( lsdasd | awk "\$3 == \"$blockd\" { print \$5}" ) ++ if [ "$dasdtype" != ECKD ] && [ "$dasdtype" != FBA ]; then ++ LogPrint "Type $dasdtype of DASD $blockd unexpected: neither ECKD nor FBA" ++ fi + +- echo "# active dasd bus and channel" +- echo "# bus-id type" +- echo "dasd_channel $( lsdasd|grep $blockd|awk '{ print $1 " " $2 " " $3 " " $4}' )" +- +- echo "# dasdfmt - disk layout is either cdl for the compatible disk layout (default) or ldl" +- echo "# example usage: dasdfmt -b 4096 -d cdl -y /dev/dasda" +- layout=$(dasdview -x /dev/$blockd|grep "^format"|awk '{print $7}') +- blocksize=$( dasdview -i /dev/$blockd|grep blocksize|awk '{print $6}' ) +- echo "# dasdfmt $devname" +- echo "# dasdfmt -b -d -y " +- echo "dasdfmt -b $blocksize -d $layout -y $devname" ++ echo "# every DASD bus and channel" ++ echo "# Format: dasd_channel " ++ echo "dasd_channel $dasdnum $blockd" ++ ++ # We need to print the dasd_channel line even for ignored devices, ++ # otherwise we could have naming gaps and naming would change when ++ # recreating layout. ++ # E.g. if dasda is ignored, and dasdb is not, we would create only dasdb ++ # during recreation, but it would be named dasda. ++ if [ "$dasdstatus" != active ]; then ++ Log "Ignoring $blockd: it is not active (Status is $dasdstatus)" ++ continue ++ fi + fi + + #FIXME: exclude *rpmb (Replay Protected Memory Block) for nvme*, mmcblk* and uas +@@ -387,11 +396,38 @@ Log "Saving disk partitions." + devname=$(get_device_name $disk) + devsize=$(get_disk_size ${disk#/sys/block/}) + disktype=$(parted -s $devname print | grep -E "Partition Table|Disk label" | cut -d ":" -f "2" | tr -d " ") +- +- echo "# Disk $devname" +- echo "# Format: disk " +- echo "disk $devname $devsize $disktype" +- ++ if [ "$disktype" != "dasd" ]; then ++ echo "# Disk $devname" ++ echo "# Format: disk " ++ echo "disk $devname $devsize $disktype" ++ elif [[ $blockd == dasd* && "$ARCH" == "Linux-s390" ]] ; then ++ layout=$(dasdview -x $devname |grep "^format"|awk '{print $7}') ++ case "$layout" in ++ (NOT) ++ # NOT -> dasdview has printed "NOT formatted" ++ LogPrintError "Ignoring $blockd: it is not formatted" ++ continue ++ ;; ++ (LDL|CDL) ++ ;; ++ (*) ++ BugError "Invalid 'disk $devname' entry (unknown DASD layout $layout)" ++ ;; ++ esac ++ test $disktype || Error "No partition label type for DASD entry 'disk $devname'" ++ blocksize=$( get_block_size "$blockd" ) ++ if ! test $blocksize ; then ++ # fallback - ugly method ++ blocksize=$( dasdview -i $devname |grep blocksize|awk '{print $6}' ) ++ test $blocksize || Error "Unknown block size of DASD $devname" ++ fi ++ dasdcyls=$( get_dasd_cylinders "$blockd" ) ++ echo "# Disk $devname" ++ echo "# Format: disk " ++ echo "disk $devname $devsize $disktype $blocksize $layout $dasdtype $dasdcyls" ++ else ++ Error "Invalid 'disk $devname' entry (DASD partition label on non-s390 arch $ARCH)" ++ fi + echo "# Partitions on $devname" + echo "# Format: part /dev/" + extract_partitions "$devname" +diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh +index 91c5ff73..4f5b8f6f 100644 +--- a/usr/share/rear/lib/layout-functions.sh ++++ b/usr/share/rear/lib/layout-functions.sh +@@ -93,6 +93,12 @@ abort_recreate() { + restore_original_file "$LAYOUT_FILE" + } + ++abort_dasd_format() { ++ Log "Error detected during DASD formatting." ++ Log "Restoring saved original $DASD_FORMAT_FILE" ++ restore_original_file "$DASD_FORMAT_FILE" ++} ++ + # Test and log if a component $1 (type $2) needs to be recreated. + create_component() { + local device="$1" +@@ -722,6 +728,46 @@ get_block_size() { + fi + } + ++# Get the number of cylinders of a DASD. ++# The number of cylinders has the advantage of being fixed - size depends on formatting ++# and number of cylinders is valid even for unformatted DASDs, size is not. ++get_dasd_cylinders() { ++ local disk_name="${1##*/}" # /some/path/dasda -> dasda ++ local dasd_cyls ++ ++ dasd_cyls=$(dasdview -i /dev/$disk_name | grep cylinders | cut -d ':' -f2 | awk '{print $4}') ++ ### Make sure we always return a number ++ echo $(( dasd_cyls )) ++} ++ ++# Sometimes we know what the new device for the original device should be in a more reliable way ++# than by looking at disk sizes. THis information is called "mapping hints". Let's pass them ++# to the mapping code using the DISK_MAPPING_HINTS array. Each element of the array has the form ++# "/dev/source /dev/target" (space-separated). ++ ++# Output the mapping hint for the original device. ++function get_mapping_hint () { ++ local device="$1" ++ local hint mapping_hint_source mapping_hint_target ++ ++ for hint in "${DISK_MAPPING_HINTS[@]}"; do ++ mapping_hint_source=${hint%% *} ++ mapping_hint_target=${hint##* } ++ if [ "${device}" == "${mapping_hint_source}" ] ; then ++ echo "$mapping_hint_target" ++ return 0 ++ fi ++ done ++ return 1 ++} ++ ++# Determine if there is a mapping hint for the original device. ++function has_mapping_hint () { ++ local device="$1" ++ ++ get_mapping_hint "$device" > /dev/null ++} ++ + # Get the UUID of a device. + # Device is something like /dev/sda1. + blkid_uuid_of_device() { diff --git a/SPECS/rear.spec b/SPECS/rear.spec new file mode 100644 index 0000000..ce8f0e9 --- /dev/null +++ b/SPECS/rear.spec @@ -0,0 +1,496 @@ +%define debug_package %{nil} + +Summary: Relax-and-Recover is a Linux disaster recovery and system migration tool +Name: rear +Version: 2.6 +Release: 12%{?dist} +License: GPLv3 +Group: Applications/File +URL: http://relax-and-recover.org/ + +Source0: https://github.com/rear/rear/archive/%{version}.tar.gz#/rear-%{version}.tar.gz +Patch4: rear-bz1492177-warning.patch +Patch29: rear-bz1832394.patch +Patch30: rear-sfdc02772301.patch +Patch31: rear-bz1945869.patch +Patch32: rear-bz1958247.patch +Patch33: rear-bz1930662.patch +Patch34: rear-asciidoc.patch +Patch35: rear-bz1983013.patch +Patch36: rear-bz1993296.patch +Patch37: rear-bz1747468.patch +Patch38: rear-bz2049091.patch +Patch39: rear-pr2675.patch +Patch40: rear-bz2048454.patch +Patch41: rear-bz2035939.patch +Patch42: rear-bz2083272.patch +Patch43: rear-bz2111049.patch +Patch44: rear-bz2104005.patch +Patch48: rear-bz2111059.patch +Patch49: pxe-rsync-output.patch +Patch50: rear-bz2119501.patch +Patch51: rear-bz2120736.patch +Patch52: rear-bz2091163.patch +Patch53: rear-bz2130945.patch +Patch54: rear-bz2131946.patch +Patch56: s390-no-clobber-disks.patch +Patch58: rear-device-shrinking-bz2223895.patch +Patch59: rear-usb-uefi-part-size-bz2228402.patch +Patch60: rear-luks-key-bz2228779.patch +Patch61: rear-uefi-usb-secureboot-bz2196445.patch +Patch62: rear-vg-command-not-found-bz2121476.patch +Patch64: rear-save-lvm-poolmetadatasize-RHEL-6984.patch +Patch65: rear-skip-useless-xfs-mount-options-RHEL-10478.patch + +# make initrd accessible only by root +# https://github.com/rear/rear/commit/89b61793d80bc2cb2abe47a7d0549466fb087d16 +Patch111: rear-CVE-2024-23301.patch + +# Support saving and restoring hybrid BIOS/UEFI bootloader setup and clean up bootloader detection +# https://github.com/rear/rear/pull/3145 +Patch113: rear-restore-hybrid-bootloader-RHEL-16864.patch + +# Skip invalid disk drives (zero sized, no media) when saving layout +# https://github.com/rear/rear/commit/808b15a677191aac62faadd1bc71885484091316 +Patch115: rear-skip-invalid-drives-RHEL-22863.patch + +### Dependencies on all distributions +BuildRequires: asciidoc +Requires: binutils +Requires: ethtool +Requires: gzip +Requires: iputils +Requires: parted +Requires: tar +Requires: openssl +Requires: gawk +Requires: attr +Requires: bc +Requires: file +Requires: dhcp-client + +### If you require NFS, you may need the below packages +#Requires: nfsclient portmap rpcbind + +### We drop LSB requirements because it pulls in too many dependencies +### The OS is hardcoded in /etc/rear/os.conf instead +#Requires: redhat-lsb + +### Required for Bacula/MySQL support +#Requires: bacula-mysql + +### Required for OBDR +#Requires: lsscsi sg3_utils + +### Optional requirement +#Requires: cfg2html + +%ifarch x86_64 i686 +Requires: syslinux +%endif +%ifarch x86_64 i686 aarch64 +# We need mkfs.vfat for recreating EFI System Partition +Recommends: dosfstools +%endif +%ifarch ppc ppc64 +Requires: yaboot +%endif + +%ifarch ppc ppc64 ppc64le +# Called by grub2-install (except on PowerNV) +Requires: /usr/sbin/ofpathname +# Needed to make PowerVM LPARs bootable +Requires: /usr/sbin/bootlist +%endif +%ifarch s390x +# Contain many utilities for working with DASDs +Requires: s390utils-base +Requires: s390utils-core +%endif + +Requires: crontabs +Requires: iproute +# No ISO image support on s390x (may change when we add support for LPARs) +%ifnarch s390x +Requires: xorriso +%endif + +# mingetty is not available anymore with RHEL 7 (use agetty instead via systemd) +# Note that CentOS also has rhel defined so there is no need to use centos +%if 0%{?rhel} && 0%{?rhel} > 6 +Requires: util-linux +%else +Requires: mingetty +Requires: util-linux +%endif + +### The rear-snapshot package is no more +#Obsoletes: rear-snapshot + +%description +Relax-and-Recover is the leading Open Source disaster recovery and system +migration solution. It comprises of a modular +frame-work and ready-to-go workflows for many common situations to produce +a bootable image and restore from backup using this image. As a benefit, +it allows to restore to different hardware and can therefore be used as +a migration tool as well. + +Currently Relax-and-Recover supports various boot media (incl. ISO, PXE, +OBDR tape, USB or eSATA storage), a variety of network protocols (incl. +sftp, ftp, http, nfs, cifs) as well as a multitude of backup strategies +(incl. IBM TSM, HP DataProtector, Symantec NetBackup, EMC NetWorker, +Bacula, Bareos, BORG, Duplicity, rsync). + +Relax-and-Recover was designed to be easy to set up, requires no maintenance +and is there to assist when disaster strikes. Its setup-and-forget nature +removes any excuse for not having a disaster recovery solution implemented. + +Professional services and support are available. + +%pre +if [ $1 -gt 1 ] ; then +# during upgrade remove obsolete directories +%{__rm} -rf %{_datadir}/rear/output/NETFS +fi + +%prep +%setup +%patch4 -p1 +%patch29 -p1 +%patch30 -p1 +%patch31 -p1 +%patch32 -p1 +%patch33 -p1 +%patch34 -p1 +%patch35 -p1 +%patch36 -p1 +%patch37 -p1 +%patch38 -p1 +%patch39 -p1 +%patch40 -p1 +%patch41 -p1 +%patch42 -p1 +%patch43 -p1 +%patch44 -p1 +%patch48 -p1 +%patch49 -p1 +%patch50 -p1 +%patch51 -p1 +%patch52 -p1 +%patch53 -p1 +%patch54 -p1 +%patch56 -p1 +%patch58 -p1 +%patch59 -p1 +%patch60 -p1 +%patch61 -p1 +%patch62 -p1 +%patch64 -p1 +%patch65 -p1 +%patch111 -p1 +%patch113 -p1 +%patch115 -p1 + +echo "30 1 * * * root test -f /var/lib/rear/layout/disklayout.conf && /usr/sbin/rear checklayout || /usr/sbin/rear mkrescue" >rear.cron + +### Add a specific os.conf so we do not depend on LSB dependencies +%{?fedora:echo -e "OS_VENDOR=Fedora\nOS_VERSION=%{?fedora}" >etc/rear/os.conf} +%{?rhel:echo -e "OS_VENDOR=RedHatEnterpriseServer\nOS_VERSION=%{?rhel}" >etc/rear/os.conf} + +%build +# asciidoc writes a timestamp to files it produces, based on the last +# modified date of the source file, but is sensible to the timezone. +# This makes the results differ according to the timezone of the build machine +# and spurious changes will be seen. +# Set the timezone to UTC as a workaround. +# https://wiki.debian.org/ReproducibleBuilds/TimestampsInDocumentationGeneratedByAsciidoc +TZ=UTC %{__make} -C doc + +%install +%{__rm} -rf %{buildroot} +%{__make} install DESTDIR="%{buildroot}" +%{__install} -Dp -m0644 rear.cron %{buildroot}%{_sysconfdir}/cron.d/rear + +%files +%defattr(-, root, root, 0755) +%doc MAINTAINERS COPYING README.adoc doc/*.txt doc/user-guide/relax-and-recover-user-guide.html +%doc %{_mandir}/man8/rear.8* +%config(noreplace) %{_sysconfdir}/cron.d/rear +%config(noreplace) %{_sysconfdir}/rear/ +%config(noreplace) %{_sysconfdir}/rear/cert/ +%{_datadir}/rear/ +%{_localstatedir}/lib/rear/ +%{_sbindir}/rear + +%changelog +* Fri Mar 29 2024 MSVSphere Packaging Team - 2.6-12 +- Rebuilt for MSVSphere 8.10 beta + +* Wed Feb 21 2024 Pavel Cahyna - 2.6-12 +- Skip invalid disk drives when saving layout PR 3047 +- Support saving and restoring hybrid BIOS/UEFI bootloader, PRs 3145 3136 +- make initrd accessible only by root (CVE-2024-23301), PR 3123 +- Backport PR 3061 to save LVM pool metadata volume size in disk layout + and restore it +- Backport PR 3058 to skip useless xfs mount options when mounting + during recovery, prevents mount errors like "logbuf size must be greater + than or equal to log stripe size" + +* Tue Aug 22 2023 Pavel Cahyna - 2.6-10 +- Apply PR 3027 to ensure correct creation of the rescue environment + when a file is shrinking while being read +- Backport PR 2774 to increase USB_UEFI_PART_SIZE to 1024 MiB +- Apply upstream patch for temp dir usage with LUKS to ensure + that during recovery an encrypted disk can be unlocked using a keyfile +- Backport upstream PR 3031: Secure Boot support for OUTPUT=USB +- Correct a mistake done when backporting PR 2691 + +* Wed Feb 22 2023 Pavel Cahyna - 2.6-9 +- Backport PR2943 to fix s390x dasd formatting +- Require s390utils-{core,base} on s390x + +* Sun Jan 15 2023 Pavel Cahyna - 2.6-8 +- Apply PR2903 to protect against colons in pvdisplay output +- Apply PR2873 to fix initrd regeneration on s390x +- Apply PR2431 to migrate XFS configuration files + +* Wed Aug 24 2022 Pavel Cahyna - 2.6-7 +- Avoid stderr message about irrelevant broken links +- Changes for NetBackup (NBU) 9.x support + +* Tue Aug 9 2022 Pavel Cahyna - 2.6-6 +- Restore usr/share/rear/output/PXE/default/820_copy_to_net.sh + removed in 2.4-19 with rsync refactor. + It is still needed to use a rsync OUTPUT_URL when OUTPUT=PXE and BACKUP=RSYNC + +* Mon Aug 8 2022 Pavel Cahyna - 2.6-5 +- Apply PR2795 to detect changes in system files between backup + and rescue image +- Apply PR2808 to exclude dev/watchdog* from recovery system +- Backport upstream PRs 2827 and 2839 to pass -y to lvcreate instead of one "y" + on stdin +- Apply PR2811 to add the PRE/POST_RECOVERY_COMMANDS directives +- Recommend dosfstools on x86 and aarch64, needed for EFI System Partition + +* Sun Feb 27 2022 Pavel Cahyna - 2.6-4 +- Apply PR2675 to fix leftover temp dir bug (introduced in backported PR2625) +- Apply PR2603 to ignore unused PV devices +- Apply upstream PR2750 to avoid exclusion of wanted multipath devices +- Remove unneeded xorriso dep on s390x (no ISO image support there) +- Apply upstream PR2736 to add the EXCLUDE_{IP_ADDRESSES,NETWORK_INTERFACES} + options + +* Mon Aug 30 2021 Pavel Cahyna - 2.6-3 +- Add patch for better handling of thin pools and other LV types not supported + by vgcfgrestore + Resolves: rhbz1747468 + +* Mon Aug 16 2021 Pavel Cahyna - 2.6-2 +- Fix multipath performance regression in 2.6, introduced by upstream PR #2299. + Resolves: rhbz1993296 + +* Sat Aug 7 2021 Pavel Cahyna - 2.6-1 +- Rebase to upstream release 2.6 and drop unneded patches. + Add S/390 support. + Resolves: rhbz1983003, rhbz1988493, rhbz1868421 +- Add missing dependencies on dhcp-client (see #1926451), file +- Patch documents to be compatible with asciidoc, + we don't have asciidoctor +- On POWER add bootlist & ofpathname to the list of required programs + conditionally (bootlist only if running under PowerVM, ofpathname + always except on PowerNV) - upstream PR2665, add them to package + dependencies + Resolves: rhbz1983013 + +* Tue May 11 2021 Pavel Cahyna - 2.4-19 +- Backport PR2608: + Fix setting boot path in case of UEFI partition (ESP) on MD RAID + Resolves: rhbz1945869 +- Backport PR2625 + Prevents accidental backup removal in case of errors + Resolves: rhbz1958247 +- Fix rsync error and option handling + Fixes metadata storage when rsync user is not root + Resolves: rhbz1930662 + +* Mon Jan 11 2021 Vitezslav Crhonek - 2.4-18 +- Fix typo in default.conf + Resolves: #1882060 +- Modify the cron command to avoid an e-mail with error message after + ReaR is installed but not properly configured when the cron command + is triggered for the first time + Resolves: #1729499 +- Backport upstream code related to LUKS2 support + Resolves: #1832394 +- Changes for NetBackup (NBU) support, upstream PR2544 + Resolves: #1898080 + +* Mon Aug 10 2020 Pavel Cahyna - 2.4-17 +- Update the Rubrik patch to include complete PR2445 + Resolves: rhbz1867696 + +* Thu Jun 04 2020 Václav Doležal - 2.4-16 +- Apply upstream PR2373: Skip Longhorn Engine replica devices + Resolves: rhbz1843809 + +* Mon Jun 01 2020 Václav Doležal - 2.4-15 +- Apply upstream PR2346: Have '-iso-level 3' option also for ppc64le + Resolves: rhbz1729502 + +* Mon Jun 01 2020 Václav Doležal - 2.4-14 +- Backport remaining Rubrik related patches. + Related: rhbz1743303 + +* Thu May 21 2020 Václav Doležal - 2.4-13 +- Backport upstream PR #2249 to add support for Rubrik backup method. + Resolves: rhbz1743303 + +* Mon Dec 16 2019 Pavel Cahyna - 2.4-12 +- Backport upstream PR #2293 to use grub-mkstandalone instead of + grub-mkimage for UEFI (ISO image and GRUB_RESCUE image generation). + Avoids hardcoded module lists or paths and so is more robust. + Fixes an issue where the generated ISO image had no GRUB2 modules and + was therefore unbootable. The backport does not add new config settings. + Resolves: rhbz1737042 + +* Mon Nov 18 2019 Pavel Cahyna - 2.4-11 +- Apply upstream PR2122: add additional NBU library path to fix support for + NetBackup 8. + Resolves: rhbz1747393 +- Apply upstream PR2021: Be safe against empty docker_root_dir (issue 1989) + Resolves: rhbz1729493, where ReaR can not create a backup in rescue mode, + because it thinks that the Docker daemon is running and hits the problem + with empty docker_root_dir. +- Apply upstream PR2223 and commit 36cf20e to avoid an empty string in the + list of users to clone, which can lead to bash overflow with lots of users + and groups per user and to wrong passwd/group files in the rescue system. + Resolves: rhbz1729495 +- Backport of Upstream fix for issue 2035: /run is not mounted in the rescue + chroot, which causes LVM to hang, especially if rebuilding initramfs. + Resolves: rhbz1757488 +- Backport upstream PR 2218: avoid keeping build dir on errors + by default when used noninteractively + Resolves: rhbz1729501 +- Apply upstream PR2173 - Cannot restore using Bacula method + due to "bconsole" not showing its prompt + Resolves: rhbz1726992 +- Backport fix for upstream issue 2187 (disklayout.conf file contains + duplicate lines, breaking recovery in migration mode or when + thin pools are used). PR2194, 2196. + Resolves: rhbz1732308 + +* Tue Jun 4 2019 Pavel Cahyna - 2.4-10 +- Apply upstream patch PR1993 + Automatically exclude $BUILD_DIR from the backup + Resolves: rhbz1677733 + +* Mon Jun 3 2019 Pavel Cahyna - 2.4-9 +- Update fix for bz#1657725. Previous fix was not correct, bootlist was still + invoked only with one partition argument due to incorrect array expansion. + See upstream PR2096, 2097, 2098. + +* Tue May 28 2019 Pavel Cahyna - 2.4-8 +- Apply upstream PR2065 (record permanent MAC address for team members) + Resolves: rhbz1685178 + +* Tue May 28 2019 Pavel Cahyna - 2.4-7 +- Apply upstream PR2034 (multipath optimizations for lots of devices) + +* Mon Jan 14 2019 Pavel Cahyna - 2.4-6 +- Require xorriso instead of genisoimage, it is now the preferred method + and supports files over 4GB in size. +- Apply upstream PR2004 (support for custom network interface naming) +- Backport upstream PR2001 (UEFI support broken on Fedora 29 and RHEL 8) + +* Thu Dec 13 2018 Pavel Cahyna - 2.4-4 +- Backport fixes for upstream bugs 1974 and 1975 +- Backport fix for upstream bug 1913 (backup succeeds in case of tar error) +- Backport fix for upstream bug 1926 (support for LACP bonding and teaming) +- Apply upstream PR1954 (record permanent MAC address for bond members) + +* Thu Aug 09 2018 Pavel Cahyna - 2.4-3 +- Merge some spec changes from Fedora. +- Apply upstream patch PR1887 + LPAR/PPC64 bootlist is incorrectly set when having multiple 'prep' partitions +- Apply upstream patch PR1885 + Partition information recorded is unexpected when disk has 4K block size + +* Wed Jul 18 2018 Pavel Cahyna - 2.4-2 +- Build and install the HTML user guide. #1418459 + +* Wed Jun 27 2018 Pavel Cahyna - 2.4-1 +- Rebase to version 2.4, drop patches integrated upstream + Resolves #1534646 #1484051 #1498828 #1571266 #1496518 + +* Wed Feb 14 2018 Pavel Cahyna - 2.00-6 +- Ensure that NetBackup is started automatically upon recovery (PR#1544) + Also do not kill daemons spawned by sysinit.service at the service's end + (PR#1610, applies to NetBackup and also to dhclient) + Resolves #1506231 +- Print a warning if grub2-mkimage is about to fail and suggest what to do. + bz#1492177 +- Update the patch for #1388653 to the one actually merged upstream (PR1418) + +* Fri Jan 12 2018 Pavel Cahyna - 2.00-5 +- cd to the correct directory before md5sum to fix BACKUP_INTEGRITY_CHECK. + Upstream PR#1685, bz1532676 + +* Mon Oct 23 2017 Pavel Cahyna - 2.00-4 +- Retry get_disk_size to fix upstream #1370, bz1388653 + +* Wed Sep 13 2017 Pavel Cahyna - 2.00-3 +- Fix rear mkrescue on systems w/o UEFI. Upstream PR#1481 issue#1478 +- Resolves: #1479002 + +* Wed May 17 2017 Jakub Mazanek - 2.00-2 +- Excluding Archs s390 and s390x +- Related #1355667 + +* Mon Feb 20 2017 Jakub Mazanek - 2.00-1 +- Rebase to version 2.00 +- Resolves #1355667 + +* Tue Jul 19 2016 Petr Hracek - 1.17.2-6 +- Replace experimental grep -P with grep -E +Resolves: #1290205 + +* Wed Mar 23 2016 Petr Hracek - 1.17.2-5 +- Remove backuped patched files +Related: #1283930 + +* Wed Mar 23 2016 Petr Hracek - 1.17.2-4 +- Rear recovery over teaming interface will not work +Resolves: #1283930 + +* Tue Mar 08 2016 Petr Hracek - 1.17.2-3 +- Replace experimental grep -P with grep -E +Resolves: #1290205 + +* Tue Feb 23 2016 Petr Hracek - 1.17.2-2 +- rear does not require syslinux +- changing to arch package so that syslinux is installed +- Resolves: #1283927 + +* Mon Sep 14 2015 Petr Hracek - 1.17.2-1 +- New upstream release 1.17.2 +Related: #1059196 + +* Wed May 13 2015 Petr Hracek 1.17.0-2 +- Fix Source tag +Related: #1059196 + +* Mon May 04 2015 Petr Hracek 1.17.0-1 +- Initial package for RHEL 7 +Resolves: #1059196 + +* Fri Oct 17 2014 Gratien D'haese +- added the suse_version lines to identify the corresponding OS_VERSION + +* Fri Jun 20 2014 Gratien D'haese +- add %%pre section + +* Thu Apr 11 2013 Gratien D'haese +- changes Source + +* Thu Jun 03 2010 Dag Wieers +- Initial package. (using DAR)