forked from msvsphere/leapp-repository
parent
7995cf5d96
commit
ba848801d8
@ -0,0 +1,6 @@
|
||||
MAKE_FILE_DIR := $(realpath $(dir $(firstword $(MAKEFILE_LIST))))
|
||||
|
||||
.PHONY: srpm
|
||||
|
||||
srpm:
|
||||
bash $(MAKE_FILE_DIR)/build.sh $(outdir)
|
@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
REPONAME=leapp-actors
|
||||
SPECNAME=leapp-repository
|
||||
|
||||
OUTDIR="$PWD"
|
||||
if [ -n "$1" ]; then
|
||||
OUTDIR="$(realpath $1)"
|
||||
fi
|
||||
|
||||
command -v which > /dev/null || dnf -y install which
|
||||
|
||||
if [ -z "$(which git)" ]; then
|
||||
dnf -y install git-core
|
||||
fi
|
||||
|
||||
if ! git status 2>&1 > /dev/null; then
|
||||
rm -rf leapp
|
||||
git clone https://github.com/leapp-to/$REPONAME
|
||||
POPD=`pushd leapp`
|
||||
fi
|
||||
|
||||
BRANCH=master
|
||||
LEAPP_PATCHES_SINCE_RELEASE="$(git log `git describe --abbrev=0`..HEAD --format=oneline | wc -l)$LEAPP_PATCHES_SINCE_RELEASE_EXTERNAL"
|
||||
echo LEAPP_PATCHES_SINCE_RELEASE=$LEAPP_PATCHES_SINCE_RELEASE$LEAPP_PATCHES_SINCE_RELEASE_EXTERNAL
|
||||
|
||||
VERSION=$(git describe --abbrev=0|cut -d- -f 2)
|
||||
DIST=$(git describe --abbrev=0|cut -d- -f 3)
|
||||
LEAPP_BUILD_TAG=".$DIST.$(date --rfc-3339=date | tr -d '-').git.$LEAPP_PATCHES_SINCE_RELEASE"
|
||||
|
||||
if [ -n "$POPD" ]
|
||||
then
|
||||
popd
|
||||
fi
|
||||
|
||||
|
||||
echo LEAPP_BUILD_TAG=$LEAPP_BUILD_TAG
|
||||
export toplevel=$(git rev-parse --show-toplevel)
|
||||
git archive --remote "$toplevel" --prefix $REPONAME-master/ HEAD > $REPONAME-$VERSION.tar
|
||||
tar --delete $REPONAME-master/$SPECNAME.spec --file $REPONAME-$VERSION.tar
|
||||
mkdir -p $REPONAME-master
|
||||
/bin/cp $toplevel/$SPECNAME.spec $REPONAME-master/$SPECNAME.spec
|
||||
sed -i "s/^%global dist.*$/%global dist $LEAPP_BUILD_TAG/g" $REPONAME-master/$SPECNAME.spec
|
||||
tar --append --file $REPONAME-$VERSION.tar $REPONAME-master/$SPECNAME.spec
|
||||
|
||||
cat $REPONAME-$VERSION.tar | gzip > $REPONAME-$VERSION.tar.gz
|
||||
|
||||
echo $PWD $OUTDIR
|
||||
SRPMDIR="$OUTDIR"
|
||||
rpmbuild --define "_srcrpmdir $SRPMDIR" --define "version $VERSION" --define "gittag master" -ts ./$REPONAME-$VERSION.tar.gz
|
||||
|
@ -0,0 +1,30 @@
|
||||
name: Codespell
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
codespell:
|
||||
name: Check for spelling errors
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
ignore_words_list: ro,fo,couldn,repositor
|
||||
skip: "./repos/system_upgrade/common/actors/storagescanner/tests/files/mounts,\
|
||||
./repos/system_upgrade/el7toel8/actors/networkmanagerreadconfig/tests/files/nm_cfg_file_error,\
|
||||
./repos/system_upgrade/el8toel9/actors/xorgdrvfact/tests/files/journalctl-xorg-intel,\
|
||||
./repos/system_upgrade/el8toel9/actors/xorgdrvfact/tests/files/journalctl-xorg-qxl,\
|
||||
./repos/system_upgrade/el8toel9/actors/xorgdrvfact/tests/files/journalctl-xorg-without-qxl,\
|
||||
./repos/system_upgrade/common/actors/scancpu/tests/files/lscpu_s390x,\
|
||||
./etc/leapp/files/device_driver_deprecation_data.json,\
|
||||
./etc/leapp/files/pes-events.json,\
|
||||
./etc/leapp/files/repomap.json,\
|
||||
./repos/system_upgrade/common/files/prod-certs"
|
@ -0,0 +1,29 @@
|
||||
---
|
||||
# https://github.com/redhat-plumbers-in-action/differential-shellcheck#readme
|
||||
|
||||
name: Differential ShellCheck
|
||||
on:
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
security-events: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Repository checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Differential ShellCheck
|
||||
uses: redhat-plumbers-in-action/differential-shellcheck@v3
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
@ -0,0 +1,37 @@
|
||||
# IMPORTANT NOTE
|
||||
# In this workflow there should NOT be checkout action - because of security reasons.
|
||||
# More info:
|
||||
# https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request_target
|
||||
# https://securitylab.github.com/research/github-actions-preventing-pwn-requests/
|
||||
|
||||
name: PR welcome message
|
||||
on:
|
||||
pull_request_target:
|
||||
types: opened
|
||||
|
||||
jobs:
|
||||
pr_comment:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create comment
|
||||
uses: peter-evans/create-or-update-comment@a35cf36e5301d70b76f316e867e7788a55a31dae
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
## **Thank you for contributing to the Leapp project!**
|
||||
Please note that every PR needs to comply with the [Leapp Guidelines](https://leapp.readthedocs.io/en/latest/contributing.html#) and must pass all tests in order to be mergeable.
|
||||
If you want to request a review or rebuild a package in copr, you can use following commands as a comment:
|
||||
- **review please @oamg/developers** to notify leapp developers of the review request
|
||||
- **/packit copr-build** to submit a public copr build using packit
|
||||
|
||||
Packit will automatically schedule regression tests for this PR's build and latest upstream leapp build. If you need a different version of leapp from PR#42, use `/packit test oamg/leapp#42`
|
||||
|
||||
To launch regression testing public members of oamg organization can leave the following comment:
|
||||
- **/rerun** to schedule basic regression tests using this pr build and latest upstream leapp build as artifacts
|
||||
- **/rerun 42** to schedule basic regression tests using this pr build and leapp\*PR42\* as artifacts
|
||||
- **/rerun-sst** to schedule sst tests using this pr build and latest upstream leapp build as artifacts
|
||||
- **/rerun-sst 42** to schedule sst tests using this pr build and leapp\*PR42\* as artifacts
|
||||
|
||||
Please [open ticket](https://url.corp.redhat.com/oamg-ci-issue) in case you experience technical problem with the CI. (RH internal only)
|
||||
|
||||
**Note:** In case there are problems with tests not being triggered automatically on new PR/commit or pending for a long time, please contact leapp-infra.
|
@ -0,0 +1,163 @@
|
||||
name: reuse-copr-build@TF
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
secrets:
|
||||
FEDORA_COPR_LOGIN:
|
||||
required: true
|
||||
FEDORA_COPR_TOKEN:
|
||||
required: true
|
||||
outputs:
|
||||
artifacts:
|
||||
description: "A string with test artifacts to install in tft test env"
|
||||
value: ${{ jobs.reusable_workflow_copr_build_job.outputs.artifacts }}
|
||||
|
||||
jobs:
|
||||
reusable_workflow_copr_build_job:
|
||||
# This job only runs for '/rerun' pull request comments by owner, member, or collaborator of the repo/organization.
|
||||
name: Build copr builds for tft tests
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
artifacts: ${{ steps.gen_artifacts.outputs.artifacts }}
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& startsWith(github.event.comment.body, '/rerun')
|
||||
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
steps:
|
||||
- name: Update repository
|
||||
id: repo_update
|
||||
run: sudo apt-get update
|
||||
|
||||
- name: Install necessary deps
|
||||
id: deps_install
|
||||
run: sudo apt-get install -y libkrb5-dev
|
||||
|
||||
- name: Get pull request number
|
||||
id: pr_nr
|
||||
run: |
|
||||
PR_URL="${{ github.event.comment.issue_url }}"
|
||||
echo "::set-output name=pr_nr::${PR_URL##*/}"
|
||||
|
||||
- name: Checkout
|
||||
# TODO: The correct way to checkout would be to use similar approach as in get_commit_by_timestamp function of
|
||||
# the github gluetool module (i.e. do not use HEAD but the last commit before comment).
|
||||
id: checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: "refs/pull/${{ steps.pr_nr.outputs.pr_nr }}/head"
|
||||
|
||||
- name: Get ref and sha
|
||||
id: ref_sha
|
||||
run: |
|
||||
echo "::set-output name=sha::$(git rev-parse --short HEAD)"
|
||||
echo "::set-output name=ref::refs/pull/${{ steps.pr_nr.outputs.pr_nr }}/head"
|
||||
|
||||
- name: Trigger copr build
|
||||
id: copr_build
|
||||
env:
|
||||
COPR_CONFIG: "copr_fedora.conf"
|
||||
COPR_CHROOT: "epel-7-x86_64,epel-8-x86_64"
|
||||
COPR_REPO: "@oamg/leapp"
|
||||
run: |
|
||||
cat << EOF > $COPR_CONFIG
|
||||
[copr-cli]
|
||||
login = ${{ secrets.FEDORA_COPR_LOGIN }}
|
||||
username = oamgbot
|
||||
token = ${{ secrets.FEDORA_COPR_TOKEN }}
|
||||
copr_url = https://copr.fedorainfracloud.org
|
||||
# expiration date: 2030-07-04
|
||||
EOF
|
||||
|
||||
pip install copr-cli
|
||||
PR=${{ steps.pr_nr.outputs.pr_nr }} COPR_CONFIG=$COPR_CONFIG COPR_REPO="$COPR_REPO" COPR_CHROOT=$COPR_CHROOT make copr_build | tee copr.log
|
||||
|
||||
COPR_URL=$(grep -Po 'https://copr.fedorainfracloud.org/coprs/build/\d+' copr.log)
|
||||
echo "::set-output name=copr_url::${COPR_URL}"
|
||||
echo "::set-output name=copr_id::${COPR_URL##*/}"
|
||||
|
||||
- name: Add comment with copr build url
|
||||
# TODO: Create comment when copr build fails.
|
||||
id: link_copr
|
||||
uses: actions/github-script@v4
|
||||
with:
|
||||
script: |
|
||||
github.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: 'Copr build succeeded: ${{ steps.copr_build.outputs.copr_url }}'
|
||||
})
|
||||
|
||||
- name: Get dependent leapp pr number from rerun comment
|
||||
uses: actions-ecosystem/action-regex-match@v2
|
||||
id: leapp_pr_regex_match
|
||||
with:
|
||||
text: ${{ github.event.comment.body }}
|
||||
regex: '^/(rerun|rerun-sst)\s+([0-9]+)\s*$'
|
||||
|
||||
- name: If leapp_pr was specified in the comment - trigger copr build
|
||||
# TODO: XXX FIXME This should schedule copr build for leapp but for now it will be just setting an env var
|
||||
id: leapp_pr
|
||||
if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }}
|
||||
run: |
|
||||
echo "::set-output name=leapp_pr::${{ steps.leapp_pr_regex_match.outputs.group2 }}"
|
||||
|
||||
- name: Checkout leapp
|
||||
id: checkout_leapp
|
||||
if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }}
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: "oamg/leapp"
|
||||
ref: "refs/pull/${{ steps.leapp_pr.outputs.leapp_pr }}/head"
|
||||
|
||||
- name: Get ref and sha for leapp
|
||||
id: ref_sha_leapp
|
||||
if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }}
|
||||
run: |
|
||||
echo "::set-output name=sha::$(git rev-parse --short HEAD)"
|
||||
echo "::set-output name=ref::refs/pull/${{ steps.leapp_pr.outputs.leapp_pr }}/head"
|
||||
|
||||
- name: Trigger copr build for leapp
|
||||
id: copr_build_leapp
|
||||
if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }}
|
||||
env:
|
||||
COPR_CONFIG: "copr_fedora.conf"
|
||||
COPR_CHROOT: "epel-7-x86_64,epel-8-x86_64"
|
||||
COPR_REPO: "@oamg/leapp"
|
||||
run: |
|
||||
cat << EOF > $COPR_CONFIG
|
||||
[copr-cli]
|
||||
login = ${{ secrets.FEDORA_COPR_LOGIN }}
|
||||
username = oamgbot
|
||||
token = ${{ secrets.FEDORA_COPR_TOKEN }}
|
||||
copr_url = https://copr.fedorainfracloud.org
|
||||
# expiration date: 2030-07-04
|
||||
EOF
|
||||
|
||||
pip install copr-cli
|
||||
PR=${{ steps.leapp_pr.outputs.leapp_pr }} COPR_CONFIG=$COPR_CONFIG COPR_REPO="$COPR_REPO" COPR_CHROOT=$COPR_CHROOT make copr_build | tee copr.log
|
||||
|
||||
COPR_URL=$(grep -Po 'https://copr.fedorainfracloud.org/coprs/build/\d+' copr.log)
|
||||
echo "::set-output name=copr_url::${COPR_URL}"
|
||||
echo "::set-output name=copr_id::${COPR_URL##*/}"
|
||||
|
||||
- name: Add comment with copr build url for leapp
|
||||
# TODO: Create comment when copr build fails.
|
||||
id: link_copr_leapp
|
||||
if: ${{ steps.leapp_pr_regex_match.outputs.match != '' }}
|
||||
uses: actions/github-script@v4
|
||||
with:
|
||||
script: |
|
||||
github.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: 'Copr build succeeded: ${{ steps.copr_build_leapp.outputs.copr_url }}'
|
||||
})
|
||||
|
||||
- name: Generate artifacts output
|
||||
id: gen_artifacts
|
||||
env:
|
||||
ARTIFACTS: ${{ steps.leapp_pr_regex_match.outputs.match != '' && format('{0};{1}', steps.copr_build_leapp.outputs.copr_id, steps.copr_build.outputs.copr_id) || steps.copr_build.outputs.copr_id }}
|
||||
run: |
|
||||
echo "::set-output name=artifacts::${{ env.ARTIFACTS }}"
|
@ -0,0 +1,131 @@
|
||||
name: tmt@TF
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types:
|
||||
- created
|
||||
|
||||
jobs:
|
||||
call_workflow_copr_build:
|
||||
uses: ./.github/workflows/reuse-copr-build.yml
|
||||
secrets: inherit
|
||||
|
||||
call_workflow_tests_79to88_integration:
|
||||
needs: call_workflow_copr_build
|
||||
uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
|
||||
secrets: inherit
|
||||
with:
|
||||
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
|
||||
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(?!.*max_sst)"
|
||||
pull_request_status_name: "7.9to8.8"
|
||||
variables: 'SOURCE_RELEASE=7.9;TARGET_RELEASE=8.8;LEAPPDATA_BRANCH=upstream'
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& ! startsWith(github.event.comment.body, '/rerun-sst')
|
||||
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
|
||||
call_workflow_tests_79to86_integration:
|
||||
needs: call_workflow_copr_build
|
||||
uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
|
||||
secrets: inherit
|
||||
with:
|
||||
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
|
||||
tmt_plan_regex: "^(?!.*max_sst)(.*tier1)"
|
||||
variables: 'SOURCE_RELEASE=7.9;TARGET_RELEASE=8.6;LEAPPDATA_BRANCH=upstream'
|
||||
pull_request_status_name: "7.9to8.6"
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& ! startsWith(github.event.comment.body, '/rerun-sst')
|
||||
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
|
||||
call_workflow_tests_79to88_sst:
|
||||
needs: call_workflow_copr_build
|
||||
uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
|
||||
secrets: inherit
|
||||
with:
|
||||
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
|
||||
tmt_plan_regex: "^(?!.*tier[2-3].*)(.*max_sst.*)"
|
||||
pull_request_status_name: "7.9to8.8-sst"
|
||||
update_pull_request_status: 'false'
|
||||
variables: 'SOURCE_RELEASE=7.9;TARGET_RELEASE=8.8;LEAPPDATA_BRANCH=upstream'
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& startsWith(github.event.comment.body, '/rerun-sst')
|
||||
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
|
||||
call_workflow_tests_7to8_aws:
|
||||
needs: call_workflow_copr_build
|
||||
uses: oamg/leapp/.github/workflows/reuse-tests-7to8.yml@master
|
||||
secrets: inherit
|
||||
with:
|
||||
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
|
||||
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
|
||||
compose: "RHEL-7.9-rhui"
|
||||
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; echo 42; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"}}'
|
||||
pull_request_status_name: "7to8-aws-e2e"
|
||||
variables: "SOURCE_RELEASE=7.9;TARGET_RELEASE=8.6;RHUI=aws;LEAPPDATA_BRANCH=upstream"
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& ! startsWith(github.event.comment.body, '/rerun-sst')
|
||||
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
|
||||
call_workflow_tests_86to90_integration:
|
||||
needs: call_workflow_copr_build
|
||||
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
|
||||
secrets: inherit
|
||||
with:
|
||||
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
|
||||
tmt_plan_regex: "^(?!.*max_sst)(.*tier1)"
|
||||
variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;LEAPPDATA_BRANCH=upstream'
|
||||
pull_request_status_name: "8.6to9.0"
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& ! startsWith(github.event.comment.body, '/rerun-sst')
|
||||
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
|
||||
call_workflow_tests_88to92_integration:
|
||||
needs: call_workflow_copr_build
|
||||
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
|
||||
secrets: inherit
|
||||
with:
|
||||
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
|
||||
tmt_plan_regex: "^(?!.*max_sst)(.*tier1)"
|
||||
variables: 'SOURCE_RELEASE=8.8;TARGET_RELEASE=9.2;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-rpms,rhel-8-for-x86_64-baseos-rpms;LEAPPDATA_BRANCH=upstream'
|
||||
compose: "RHEL-8.8.0-Nightly"
|
||||
pull_request_status_name: "8.8to9.2"
|
||||
tmt_context: "distro=rhel-8.8"
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& ! startsWith(github.event.comment.body, '/rerun-sst')
|
||||
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
|
||||
call_workflow_tests_86to90_sst:
|
||||
needs: call_workflow_copr_build
|
||||
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
|
||||
secrets: inherit
|
||||
with:
|
||||
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
|
||||
tmt_plan_regex: "^(?!.*tier[2-3].*)(.*max_sst.*)"
|
||||
variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;LEAPPDATA_BRANCH=upstream'
|
||||
pull_request_status_name: "8to9-sst"
|
||||
update_pull_request_status: 'false'
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& startsWith(github.event.comment.body, '/rerun-sst')
|
||||
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
|
||||
call_workflow_tests_86to90_aws:
|
||||
needs: call_workflow_copr_build
|
||||
uses: oamg/leapp/.github/workflows/reuse-tests-8to9.yml@master
|
||||
secrets: inherit
|
||||
with:
|
||||
copr_artifacts: ${{ needs.call_workflow_copr_build.outputs.artifacts }}
|
||||
tmt_plan_regex: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
|
||||
compose: "RHEL-8.6-rhui"
|
||||
environment_settings: '{"provisioning": {"post_install_script": "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"}}'
|
||||
pull_request_status_name: "8to9-aws-e2e"
|
||||
variables: 'SOURCE_RELEASE=8.6;TARGET_RELEASE=9.0;TARGET_KERNEL=el9;RHSM_REPOS=rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms;RHUI=aws;LEAPPDATA_BRANCH=upstream'
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& ! startsWith(github.event.comment.body, '/rerun-sst')
|
||||
&& contains(fromJson('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
@ -0,0 +1,48 @@
|
||||
name: Unit Tests
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Run unit tests in containers
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
scenarios:
|
||||
- name: Run unit tests for el8toel9 and common repositories on python 3.9
|
||||
python: python3.9
|
||||
repos: 'el8toel9,common'
|
||||
container: ubi8
|
||||
- name: Run unit tests for el7toel8 and common repositories on python 3.6
|
||||
python: python3.6
|
||||
repos: 'el7toel8,common'
|
||||
container: ubi8
|
||||
- name: Run unit tests for el8toel9 and common repositories on python 3.6
|
||||
python: python3.6
|
||||
repos: 'el8toel9,common'
|
||||
container: ubi8
|
||||
- name: Run unit tests for el7toel8 and common repositories on python 2.7
|
||||
python: python2.7
|
||||
repos: 'el7toel8,common'
|
||||
container: ubi7
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# NOTE(ivasilev) fetch-depth 0 is critical here as leapp deps discovery depends on specific substring in
|
||||
# commit message and default 1 option will get us just merge commit which has an unrelevant message.
|
||||
fetch-depth: '0'
|
||||
# NOTE(ivasilev) master -> origin/master is used for leapp deps discovery in Makefile via git log master..HEAD
|
||||
- name: Set master to origin/master
|
||||
if: github.ref != 'refs/heads/master'
|
||||
run: |
|
||||
git branch -f master origin/master
|
||||
- name: ${{matrix.scenarios.name}}
|
||||
run: script -e -c /bin/bash -c 'TERM=xterm podman build --security-opt=seccomp=unconfined -t leapp-tests -f utils/container-tests/Containerfile.${{matrix.scenarios.container}} utils/container-tests && PYTHON_VENV=${{matrix.scenarios.python}} REPOSITORIES=${{matrix.scenarios.repos}} podman run --security-opt=seccomp=unconfined --rm -ti -v ${PWD}:/payload --env=PYTHON_VENV --env=REPOSITORIES leapp-tests'
|
@ -0,0 +1,120 @@
|
||||
repos/**/.leapp/leapp.db
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
packaging/BUILD/
|
||||
packaging/BUILDROOT/
|
||||
packaging/SRPMS/
|
||||
packaging/RPMS/
|
||||
packaging/sources/
|
||||
packaging/tmp/
|
||||
parts/
|
||||
sdist/
|
||||
tut/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# virtualenv
|
||||
.venv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
|
||||
# All kinds of vim stuff
|
||||
**/*~
|
||||
*.sw[a-z]
|
||||
|
||||
# visual studio code configuration
|
||||
.vscode
|
||||
|
||||
# pycharm
|
||||
.idea
|
@ -0,0 +1,34 @@
|
||||
stages:
|
||||
- build-initrd
|
||||
- build-srpm
|
||||
# - build-rpms
|
||||
|
||||
build_initrd:
|
||||
only:
|
||||
- master@leapp/leapp-actors-internal
|
||||
stage: build-initrd
|
||||
cache:
|
||||
key: "${CI_PIPELINE_ID}"
|
||||
paths:
|
||||
- sources/dracut/upgrade-boot-files.tgz
|
||||
script:
|
||||
- 'export BASEDIR="$PWD"'
|
||||
- helpers/docker/docker-run.sh
|
||||
image: docker-registry.engineering.redhat.com/leapp-builds/leapp-initrd-rhel8-build:latest
|
||||
|
||||
build_srpm:
|
||||
only:
|
||||
- master@leapp/leapp-actors-internal
|
||||
stage: build-srpm
|
||||
dependencies:
|
||||
- build_initrd
|
||||
cache:
|
||||
key: "${CI_PIPELINE_ID}"
|
||||
paths:
|
||||
- sources/dracut/upgrade-boot-files.tgz
|
||||
script:
|
||||
- dnf install -y git-core make rpm-build copr-cli
|
||||
- export LEAPP_INITRD_SKIP=1
|
||||
- make srpm
|
||||
image: fedora:28
|
||||
|
@ -0,0 +1,9 @@
|
||||
[settings]
|
||||
line_length=119
|
||||
known_first_party=leapp
|
||||
multi_line_output=3
|
||||
honor_noqa=true
|
||||
# NOTE(ivasilev) Ideal solution would be order_by_type with classes exception (for ex. RPM) but this isn't supported
|
||||
# in 4.3.2
|
||||
case_sensitive=false
|
||||
order_by_type=false
|
@ -0,0 +1,290 @@
|
||||
# See the documentation for more information:
|
||||
# https://packit.dev/docs/configuration/
|
||||
|
||||
specfile_path: packaging/leapp-repository.spec
|
||||
# name in upstream package repository/registry (e.g. in PyPI)
|
||||
upstream_package_name: leapp-repository
|
||||
downstream_package_name: leapp-repository
|
||||
upstream_tag_template: 'v{version}'
|
||||
merge_pr_in_ci: false
|
||||
|
||||
srpm_build_deps:
|
||||
- make
|
||||
|
||||
# This is just for the build from the CLI - all other builds for jobs use own
|
||||
# actions
|
||||
actions:
|
||||
create-archive:
|
||||
- bash -c "rm -f packaging/deps-pkgs.tar.gz"
|
||||
- bash -c "make source"
|
||||
- bash -c "mv packaging/sources/*.gz packaging/"
|
||||
- bash -c "find packaging/*.gz -type f"
|
||||
fix-spec-file:
|
||||
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
|
||||
post-upstream-clone:
|
||||
# builds from PRs should have lower NVR than those from master branch
|
||||
- bash -c "sed -i \"s/1%{?dist}/0%{?dist}/g\" packaging/leapp-repository.spec"
|
||||
|
||||
jobs:
|
||||
- job: copr_build
|
||||
trigger: pull_request
|
||||
metadata:
|
||||
owner: "@oamg"
|
||||
project: leapp
|
||||
targets:
|
||||
- epel-7-x86_64
|
||||
- epel-8-x86_64
|
||||
actions:
|
||||
create-archive:
|
||||
- bash -c "rm -f packaging/deps-pkgs.tar.gz"
|
||||
- bash -c "make source"
|
||||
- bash -c "mv packaging/sources/*.gz packaging/"
|
||||
- bash -c "find packaging/*.gz -type f"
|
||||
fix-spec-file:
|
||||
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
|
||||
post-upstream-clone:
|
||||
# builds from PRs should have lower NVR than those from master branch
|
||||
- bash -c "sed -i \"s/1%{?dist}/0%{?dist}/g\" packaging/leapp-repository.spec"
|
||||
- job: copr_build
|
||||
trigger: commit
|
||||
metadata:
|
||||
branch: master
|
||||
owner: "@oamg"
|
||||
project: leapp
|
||||
targets:
|
||||
- epel-7-x86_64
|
||||
- epel-8-x86_64
|
||||
actions:
|
||||
create-archive:
|
||||
- bash -c "rm -f packaging/deps-pkgs.tar.gz"
|
||||
- bash -c "make source"
|
||||
- bash -c "mv packaging/sources/*.gz packaging/"
|
||||
- bash -c "find packaging/*.gz -type f"
|
||||
fix-spec-file:
|
||||
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
|
||||
post-upstream-clone:
|
||||
# builds from master branch should start with 100 release, to have high priority
|
||||
- bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec"
|
||||
- job: copr_build
|
||||
trigger: release
|
||||
metadata:
|
||||
owner: "@oamg"
|
||||
project: leapp
|
||||
targets:
|
||||
- epel-7-x86_64
|
||||
- epel-8-x86_64
|
||||
actions:
|
||||
create-archive:
|
||||
- bash -c "rm -f packaging/deps-pkgs.tar.gz"
|
||||
- bash -c "make source"
|
||||
- bash -c "mv packaging/sources/*.gz packaging/"
|
||||
- bash -c "find packaging/*.gz -type f"
|
||||
fix-spec-file:
|
||||
- bash -c "sed -i -r \"0,/Release:/ s/Release:(\s*)\S*/Release:\1${PACKIT_RPMSPEC_RELEASE}%{?dist}/\" packaging/leapp-repository.spec"
|
||||
post-upstream-clone:
|
||||
# builds from master branch should start with 100 release, to have high priority
|
||||
- bash -c "sed -i \"s/1%{?dist}/100%{?dist}/g\" packaging/leapp-repository.spec"
|
||||
|
||||
- job: tests
|
||||
fmf_url: "https://gitlab.cee.redhat.com/oamg/tmt-plans"
|
||||
fmf_ref: "master"
|
||||
use_internal_tf: True
|
||||
trigger: pull_request
|
||||
targets:
|
||||
epel-7-x86_64:
|
||||
distros: [RHEL-7.9-ZStream]
|
||||
identifier: tests-7.9to8.6
|
||||
tmt_plan: "^(?!.*max_sst)(.*tier1)"
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- tmt:
|
||||
context:
|
||||
distro: "rhel-7.9"
|
||||
# tag resources as sst_upgrades to enable cost metrics collection
|
||||
settings:
|
||||
provisioning:
|
||||
post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
|
||||
tags:
|
||||
BusinessUnit: sst_upgrades
|
||||
env:
|
||||
SOURCE_RELEASE: "7.9"
|
||||
TARGET_RELEASE: "8.6"
|
||||
LEAPPDATA_BRANCH: "upstream"
|
||||
|
||||
- job: tests
|
||||
fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests"
|
||||
fmf_ref: "master"
|
||||
use_internal_tf: True
|
||||
trigger: pull_request
|
||||
targets:
|
||||
epel-7-x86_64:
|
||||
distros: [RHEL-7.9-ZStream]
|
||||
identifier: tests-7.9to8.8
|
||||
tmt_plan: "^(?!.*max_sst)(.*tier1)"
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- tmt:
|
||||
context:
|
||||
distro: "rhel-7.9"
|
||||
# tag resources as sst_upgrades to enable cost metrics collection
|
||||
settings:
|
||||
provisioning:
|
||||
post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
|
||||
tags:
|
||||
BusinessUnit: sst_upgrades
|
||||
env:
|
||||
SOURCE_RELEASE: "7.9"
|
||||
TARGET_RELEASE: "8.8"
|
||||
LEAPPDATA_BRANCH: "upstream"
|
||||
|
||||
# - job: tests
|
||||
# fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests"
|
||||
# fmf_ref: "master"
|
||||
# use_internal_tf: True
|
||||
# trigger: pull_request
|
||||
# targets:
|
||||
# epel-7-x86_64:
|
||||
# distros: [RHEL-7.9-ZStream]
|
||||
# identifier: tests-7.9to8.8-sst
|
||||
# tmt_plan: "^(?!.*tier[2-3].*)(.*max_sst.*)"
|
||||
# tf_post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
|
||||
# tf_extra_params:
|
||||
# environments:
|
||||
# - tmt:
|
||||
# context:
|
||||
# distro: "rhel-7.9"
|
||||
# env:
|
||||
# SOURCE_RELEASE: "7.9"
|
||||
# TARGET_RELEASE: "8.8"
|
||||
|
||||
- job: tests
|
||||
fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests"
|
||||
fmf_ref: "master"
|
||||
use_internal_tf: True
|
||||
trigger: pull_request
|
||||
targets:
|
||||
epel-7-x86_64:
|
||||
distros: [RHEL-7.9-rhui]
|
||||
identifier: tests-7to8-aws-e2e
|
||||
tmt_plan: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*8to9)(.*e2e)"
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- tmt:
|
||||
context:
|
||||
distro: "rhel-7.9"
|
||||
# tag resources as sst_upgrades to enable cost metrics collection
|
||||
settings:
|
||||
provisioning:
|
||||
post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys; yum-config-manager --enable rhel-7-server-rhui-optional-rpms"
|
||||
tags:
|
||||
BusinessUnit: sst_upgrades
|
||||
env:
|
||||
SOURCE_RELEASE: "7.9"
|
||||
TARGET_RELEASE: "8.6"
|
||||
RHUI: "aws"
|
||||
LEAPPDATA_BRANCH: "upstream"
|
||||
|
||||
- job: tests
|
||||
fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests"
|
||||
fmf_ref: "master"
|
||||
use_internal_tf: True
|
||||
trigger: pull_request
|
||||
targets:
|
||||
epel-8-x86_64:
|
||||
distros: [RHEL-8.6.0-Nightly]
|
||||
identifier: tests-8.6to9.0
|
||||
tmt_plan: "^(?!.*max_sst)(.*tier1)"
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- tmt:
|
||||
context:
|
||||
distro: "rhel-8.6"
|
||||
# tag resources as sst_upgrades to enable cost metrics collection
|
||||
settings:
|
||||
provisioning:
|
||||
post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
|
||||
tags:
|
||||
BusinessUnit: sst_upgrades
|
||||
env:
|
||||
SOURCE_RELEASE: "8.6"
|
||||
TARGET_RELEASE: "9.0"
|
||||
RHSM_REPOS: "rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms"
|
||||
LEAPPDATA_BRANCH: "upstream"
|
||||
|
||||
- job: tests
|
||||
fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests"
|
||||
fmf_ref: "master"
|
||||
use_internal_tf: True
|
||||
trigger: pull_request
|
||||
targets:
|
||||
epel-8-x86_64:
|
||||
distros: [RHEL-8.8.0-Nightly]
|
||||
identifier: tests-8.8to9.2
|
||||
tmt_plan: "^(?!.*max_sst)(.*tier1)"
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- tmt:
|
||||
context:
|
||||
distro: "rhel-8.8"
|
||||
# tag resources as sst_upgrades to enable cost metrics collection
|
||||
settings:
|
||||
provisioning:
|
||||
post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
|
||||
tags:
|
||||
BusinessUnit: sst_upgrades
|
||||
env:
|
||||
SOURCE_RELEASE: "8.8"
|
||||
TARGET_RELEASE: "9.2"
|
||||
RHSM_REPOS: "rhel-8-for-x86_64-appstream-beta-rpms,rhel-8-for-x86_64-baseos-beta-rpms"
|
||||
LEAPPDATA_BRANCH: "upstream"
|
||||
LEAPP_DEVEL_TARGET_RELEASE: "9.2"
|
||||
|
||||
# - job: tests
|
||||
# fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests"
|
||||
# fmf_ref: "master"
|
||||
# use_internal_tf: True
|
||||
# trigger: pull_request
|
||||
# targets:
|
||||
# epel-8-x86_64:
|
||||
# distros: [RHEL-8.6.0-Nightly]
|
||||
# identifier: tests-8.6to9.0-sst
|
||||
# tmt_plan: "^(?!.*tier[2-3].*)(.*max_sst.*)"
|
||||
# tf_post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
|
||||
# tf_extra_params:
|
||||
# environments:
|
||||
# - tmt:
|
||||
# context:
|
||||
# distro: "rhel-8.6"
|
||||
# env:
|
||||
# SOURCE_RELEASE: "8.6"
|
||||
# TARGET_RELEASE: "9.0"
|
||||
# RHSM_REPOS: "rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms"
|
||||
# LEAPPDATA_BRANCH: "upstream"
|
||||
|
||||
- job: tests
|
||||
fmf_url: "https://gitlab.cee.redhat.com/oamg/leapp-tests"
|
||||
fmf_ref: "master"
|
||||
use_internal_tf: True
|
||||
trigger: pull_request
|
||||
targets:
|
||||
epel-8-x86_64:
|
||||
distros: [RHEL-8.6-rhui]
|
||||
identifier: tests-8to9-aws-e2e
|
||||
tmt_plan: "^(?!.*upgrade_plugin)(?!.*tier[2-3].*)(?!.*rhsm)(?!.*c2r)(?!.*sap)(?!.*7to8)(.*e2e)"
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- tmt:
|
||||
context:
|
||||
distro: "rhel-8.6"
|
||||
# tag resources as sst_upgrades to enable cost metrics collection
|
||||
settings:
|
||||
provisioning:
|
||||
post_install_script: "#!/bin/sh\nsudo sed -i s/.*ssh-rsa/ssh-rsa/ /root/.ssh/authorized_keys"
|
||||
tags:
|
||||
BusinessUnit: sst_upgrades
|
||||
env:
|
||||
SOURCE_RELEASE: "8.6"
|
||||
TARGET_RELEASE: "9.0"
|
||||
RHSM_REPOS: "rhel-8-for-x86_64-appstream-eus-rpms,rhel-8-for-x86_64-baseos-eus-rpms"
|
||||
RHUI: "aws"
|
||||
LEAPPDATA_BRANCH: "upstream"
|
@ -0,0 +1,80 @@
|
||||
[MESSAGES CONTROL]
|
||||
disable=
|
||||
# "F" Fatal errors that prevent further processing
|
||||
import-error,
|
||||
# "I" Informational noise
|
||||
# "E" Error for important programming issues (likely bugs)
|
||||
no-member,
|
||||
no-name-in-module,
|
||||
raising-bad-type,
|
||||
redundant-keyword-arg, # it's one or the other, this one is not so bad at all
|
||||
# "W" Warnings for stylistic problems or minor programming issues
|
||||
no-absolute-import,
|
||||
arguments-differ,
|
||||
cell-var-from-loop,
|
||||
fixme,
|
||||
lost-exception,
|
||||
no-init,
|
||||
pointless-string-statement,
|
||||
protected-access,
|
||||
redefined-outer-name,
|
||||
relative-import,
|
||||
undefined-loop-variable,
|
||||
unsubscriptable-object,
|
||||
unused-argument,
|
||||
unused-import,
|
||||
unspecified-encoding,
|
||||
# "C" Coding convention violations
|
||||
bad-continuation,
|
||||
missing-docstring,
|
||||
wrong-import-order,
|
||||
use-maxsplit-arg,
|
||||
consider-using-dict-items,
|
||||
consider-using-enumerate,
|
||||
# "R" Refactor recommendations
|
||||
duplicate-code,
|
||||
no-self-use,
|
||||
too-few-public-methods,
|
||||
too-many-branches,
|
||||
too-many-locals,
|
||||
too-many-statements,
|
||||
consider-using-from-import,
|
||||
use-list-literal,
|
||||
use-dict-literal,
|
||||
# new for python3 version of pylint
|
||||
useless-object-inheritance,
|
||||
consider-using-set-comprehension, # pylint3 force to use comprehension in place we don't want (py2 doesnt have these options, for inline skip)
|
||||
unnecessary-pass,
|
||||
invalid-envvar-default, # pylint3 warnings envvar returns str/none by default
|
||||
bad-option-value, # python 2 doesn't have import-outside-toplevel, but in some case we need to import outside toplevel
|
||||
super-with-arguments, # required in python 2
|
||||
raise-missing-from, # no 'raise from' in python 2
|
||||
use-a-generator, # cannot be modified because of Python2 support
|
||||
consider-using-with, # on bunch spaces we cannot change that...
|
||||
duplicate-string-formatting-argument, # TMP: will be fixed in close future
|
||||
consider-using-f-string, # sorry, not gonna happen, still have to support py2
|
||||
use-dict-literal,
|
||||
redundant-u-string-prefix # still have py2 to support
|
||||
|
||||
[FORMAT]
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=119
|
||||
|
||||
[DESIGN]
|
||||
max-args=11 # 2x + 1 from default
|
||||
max-attributes=21 # 4x + 1 from default
|
||||
|
||||
[REPORTS]
|
||||
msg-template='[{msg_id} {symbol}] {msg} File: {path}, line {line}, in {obj}'
|
||||
|
||||
[BASIC]
|
||||
# In order to make a check more strict add proper regex http://pylint-messages.wikidot.com/messages:c0103
|
||||
argument-rgx=.*
|
||||
attr-rgx=.*
|
||||
class-rgx=.*
|
||||
const-rgx=.*
|
||||
function-rgx=.*
|
||||
method-rgx=.*
|
||||
module-rgx=.*
|
||||
variable-rgx=.*
|
||||
inlinevar-rgx=.*
|
@ -0,0 +1,22 @@
|
||||
git:
|
||||
depth: 3
|
||||
language: shell
|
||||
os: linux
|
||||
dist: xenial
|
||||
env:
|
||||
global:
|
||||
- CONTAINER=registry.centos.org/centos:7
|
||||
services:
|
||||
- docker
|
||||
|
||||
install:
|
||||
- docker pull ${CONTAINER}
|
||||
- docker build -t leapp-tests -f utils/docker-tests/Dockerfile utils/docker-tests
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- stage: deploy
|
||||
script:
|
||||
- docker run --env CI=$CI --rm -ti -v ${PWD}:/payload --entrypoint "/bin/bash" leapp-tests -c "make install-deps && make dashboard_data"
|
||||
- bash ./utils/update_dashboard.sh
|
||||
if: branch = master AND type = push
|
Binary file not shown.
@ -0,0 +1 @@
|
||||
See the [Contribution guidelines](https://leapp.readthedocs.io/en/latest/contributing.html)
|
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -0,0 +1,497 @@
|
||||
# there are bashisms used in this Makefile
|
||||
SHELL=/bin/bash
|
||||
|
||||
__PKGNAME=$${_PKGNAME:-leapp-repository}
|
||||
VENVNAME ?= tut
|
||||
DIST_VERSION ?= 7
|
||||
PKGNAME=leapp-repository
|
||||
DEPS_PKGNAME=leapp-el7toel8-deps
|
||||
VERSION=`grep -m1 "^Version:" packaging/$(PKGNAME).spec | grep -om1 "[0-9].[0-9.]**"`
|
||||
DEPS_VERSION=`grep -m1 "^Version:" packaging/other_specs/$(DEPS_PKGNAME).spec | grep -om1 "[0-9].[0-9.]**"`
|
||||
REPOS_PATH=repos
|
||||
_SYSUPG_REPOS="$(REPOS_PATH)/system_upgrade"
|
||||
LIBRARY_PATH=
|
||||
REPORT_ARG=
|
||||
REPOSITORIES ?= $(shell ls $(_SYSUPG_REPOS) | xargs echo | tr " " ",")
|
||||
SYSUPG_TEST_PATHS=$(shell echo $(REPOSITORIES) | sed -r "s|(,\\|^)| $(_SYSUPG_REPOS)/|g")
|
||||
TEST_PATHS:=commands repos/common $(SYSUPG_TEST_PATHS)
|
||||
|
||||
|
||||
ifdef ACTOR
|
||||
TEST_PATHS=`python utils/actor_path.py $(ACTOR)`
|
||||
endif
|
||||
|
||||
ifeq ($(TEST_LIBS),y)
|
||||
LIBRARY_PATH=`python utils/library_path.py`
|
||||
endif
|
||||
|
||||
ifdef REPORT
|
||||
REPORT_ARG=--junit-xml=$(REPORT)
|
||||
endif
|
||||
|
||||
# needed only in case the Python2 should be used
|
||||
_USE_PYTHON_INTERPRETER=$${_PYTHON_INTERPRETER}
|
||||
|
||||
# python version to run test with
|
||||
_PYTHON_VENV=$${PYTHON_VENV:-python2.7}
|
||||
|
||||
# by default use values you can see below, but in case the COPR_* var is defined
|
||||
# use it instead of the default
|
||||
_COPR_REPO=$${COPR_REPO:-leapp}
|
||||
_COPR_REPO_TMP=$${COPR_REPO_TMP:-leapp-tmp}
|
||||
_COPR_CONFIG=$${COPR_CONFIG:-~/.config/copr_rh_oamg.conf}
|
||||
|
||||
# tool used to run containers for testing and building packages
|
||||
_CONTAINER_TOOL=$${CONTAINER_TOOL:-podman}
|
||||
|
||||
# container to run tests in
|
||||
_TEST_CONTAINER=$${TEST_CONTAINER:-rhel8}
|
||||
|
||||
# In case just specific CHROOTs should be used for the COPR build, you can
|
||||
# set the multiple CHROOTs separated by comma in the COPR_CHROOT envar, e.g.
|
||||
# "epel-7-x86_64,epel-8-x86_64". But for the copr-cli utility, each of them
|
||||
# has to be specified separately for the -r option; So we transform it
|
||||
# automatically to "-r epel-7-x86_64 -r epel-8-x86_64" (without quotes).
|
||||
ifdef COPR_CHROOT
|
||||
_COPR_CHROOT=`echo $${COPR_CHROOT} | grep -o "[^,]*" | sed "s/^/-r /g"`
|
||||
endif
|
||||
|
||||
# just to reduce number of unwanted builds mark as the upstream one when
|
||||
# someone will call copr_build without additional parameters
|
||||
MASTER_BRANCH=master
|
||||
|
||||
# In case the PR or MR is defined or in case build is not coming from the
|
||||
# MATER_BRANCH branch, N_REL=0; (so build is not update of the approved
|
||||
# upstream solution). For upstream builds N_REL=100;
|
||||
N_REL=`_NR=$${PR:+0}; if test "$${_NR:-100}" == "100"; then _NR=$${MR:+0}; fi; git rev-parse --abbrev-ref HEAD | grep -qE "^($(MASTER_BRANCH)|stable)$$" || _NR=0; echo $${_NR:-100}`
|
||||
|
||||
TIMESTAMP:=$${__TIMESTAMP:-$(shell /bin/date -u "+%Y%m%d%H%MZ")}
|
||||
SHORT_SHA=`git rev-parse --short HEAD`
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD | tr -- '-/' '_'`
|
||||
|
||||
# The dependent framework PR connection will be taken from the top commit's depends-on message.
|
||||
REQ_LEAPP_PR=$(shell git log master..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '*[[:digit:]]*')
|
||||
# NOTE(ivasilev) In case of travis relying on top commit is a no go as a top commit will be a merge commit.
|
||||
ifdef CI
|
||||
REQ_LEAPP_PR=$(shell git log master..HEAD | grep -m1 -iE '^[[:space:]]*Depends-On:[[:space:]]*.*[[:digit:]]+[[:space:]]*$$' | grep -Eo '[[:digit:]]*')
|
||||
endif
|
||||
|
||||
# In case anyone would like to add any other suffix, just make it possible
|
||||
_SUFFIX=`if test -n "$$SUFFIX"; then echo ".$${SUFFIX}"; fi; `
|
||||
|
||||
# generate empty string if PR or MR are not specified, otherwise set one of them
|
||||
REQUEST=`if test -n "$$PR"; then echo ".PR$${PR}"; elif test -n "$$MR"; then echo ".MR$${MR}"; fi; `
|
||||
|
||||
# replace "custombuild" with some a describing your build
|
||||
# Examples:
|
||||
# 0.201810080027Z.4078402.packaging.PR2
|
||||
# 0.201810080027Z.4078402.packaging
|
||||
# 0.201810080027Z.4078402.master.MR2
|
||||
# 1.201810080027Z.4078402.master
|
||||
RELEASE="$(N_REL).$(TIMESTAMP).$(SHORT_SHA).$(BRANCH)$(REQUEST)$(_SUFFIX)"
|
||||
|
||||
all: help
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo
|
||||
@echo "Available targets are:"
|
||||
@echo " help show this text"
|
||||
@echo " clean clean the mess"
|
||||
@echo " prepare clean the mess and prepare dirs"
|
||||
@echo " print_release print release how it should look like with"
|
||||
@echo " with the given parameters"
|
||||
@echo " source create the source tarball suitable for"
|
||||
@echo " packaging"
|
||||
@echo " srpm create the SRPM"
|
||||
@echo " build_container create the RPM in container"
|
||||
@echo " - set BUILD_CONTAINER to el7 or el8"
|
||||
@echo " - don't run more than one build at the same time"
|
||||
@echo " since containers operate on the same files!"
|
||||
@echo " copr_build create the COPR build using the COPR TOKEN"
|
||||
@echo " - default path is: $(_COPR_CONFIG)"
|
||||
@echo " - can be changed by the COPR_CONFIG env"
|
||||
@echo " install-deps create python virtualenv and install there"
|
||||
@echo " leapp-repository with dependencies"
|
||||
@echo " install-deps-fedora create python virtualenv and install there"
|
||||
@echo " leapp-repository with dependencies for Fedora OS"
|
||||
@echo " lint lint source code"
|
||||
@echo " lint_container run lint in container"
|
||||
@echo " lint_container_all run lint in all available containers"
|
||||
@echo " see test_container for options"
|
||||
@echo " lint_fix attempt to fix isort violations inplace"
|
||||
@echo " test lint source code and run tests"
|
||||
@echo " test_no_lint run tests without linting the source code"
|
||||
@echo " test_container run lint and tests in container"
|
||||
@echo " - default container is 'rhel8'"
|
||||
@echo " - can be changed by setting TEST_CONTAINER env"
|
||||
@echo " test_container_all run lint and tests in all available containers"
|
||||
@echo " test_container_no_lint run tests without linting in container, see test_container"
|
||||
@echo " test_container_all_no_lint run tests without linting in all available containers"
|
||||
@echo " clean_containers clean all testing and building container images (to force a rebuild for example)"
|
||||
@echo ""
|
||||
@echo "Targets test, lint and test_no_lint support environment variables ACTOR and"
|
||||
@echo "TEST_LIBS."
|
||||
@echo "If ACTOR=<actor> is specified, targets are run against the specified actor."
|
||||
@echo "If TEST_LIBS=y is specified, targets are run against shared libraries."
|
||||
@echo ""
|
||||
@echo "Envars affecting actions with COPR (optional):"
|
||||
@echo " COPR_REPO specify COPR repository, e,g. @oamg/leapp"
|
||||
@echo " (default: leapp)"
|
||||
@echo " COPR_REPO_TMP specify COPR repository for building of tmp"
|
||||
@echo " deps (meta) packages"
|
||||
@echo " (default: leapp-tmp)"
|
||||
@echo " COPR_CONFIG path to the COPR config with API token"
|
||||
@echo " (default: ~/.config/copr_rh_oamg.conf)"
|
||||
@echo " COPR_CHROOT specify the CHROOT which should be used for"
|
||||
@echo " the build, e.g. 'epel-7-x86_64'. You can"
|
||||
@echo " specify multiple CHROOTs separated by comma."
|
||||
@echo ""
|
||||
@echo "Possible use:"
|
||||
@echo " make <target>"
|
||||
@echo " PR=5 make <target>"
|
||||
@echo " MR=6 make <target>"
|
||||
@echo " PR=7 SUFFIX='my_additional_suffix' make <target>"
|
||||
@echo " MR=6 COPR_CONFIG='path/to/the/config/copr/file' make <target>"
|
||||
@echo " ACTOR=<actor> TEST_LIBS=y make test"
|
||||
@echo " BUILD_CONTAINER=el7 make build_container"
|
||||
@echo " TEST_CONTAINER=f34 make test_container"
|
||||
@echo " CONTAINER_TOOL=docker TEST_CONTAINER=rhel7 make test_container_no_lint"
|
||||
@echo ""
|
||||
|
||||
clean:
|
||||
@echo "--- Clean repo ---"
|
||||
@rm -rf packaging/{sources,SRPMS,tmp,BUILD,BUILDROOT,RPMS}/
|
||||
@rm -rf build/ dist/ *.egg-info .pytest_cache/
|
||||
@rm -f *src.rpm packaging/*tar.gz
|
||||
@find . -name 'leapp.db' | grep "\.leapp/leapp.db" | xargs rm -f
|
||||
@find . -name '__pycache__' -exec rm -fr {} +
|
||||
@find . -name '*.pyc' -exec rm -f {} +
|
||||
@find . -name '*.pyo' -exec rm -f {} +
|
||||
|
||||
prepare: clean
|
||||
@echo "--- Prepare build directories ---"
|
||||
@mkdir -p packaging/{sources,SRPMS,BUILD,BUILDROOT,RPMS}/
|
||||
|
||||
source: prepare
|
||||
@echo "--- Create source tarball ---"
|
||||
@echo git archive --prefix "$(PKGNAME)-$(VERSION)/" -o "packaging/sources/$(PKGNAME)-$(VERSION).tar.gz" HEAD
|
||||
@git archive --prefix "$(PKGNAME)-$(VERSION)/" -o "packaging/sources/$(PKGNAME)-$(VERSION).tar.gz" HEAD
|
||||
@echo "--- PREPARE DEPS PKGS ---"
|
||||
mkdir -p packaging/tmp/
|
||||
@__TIMESTAMP=$(TIMESTAMP) $(MAKE) _build_subpkg
|
||||
@__TIMESTAMP=$(TIMESTAMP) $(MAKE) DIST_VERSION=$$(($(DIST_VERSION) + 1)) _build_subpkg
|
||||
@tar -czf packaging/sources/deps-pkgs.tar.gz -C packaging/RPMS/noarch `ls -1 packaging/RPMS/noarch | grep -o "[^/]*rpm$$"`
|
||||
@rm -f packaging/RPMS/noarch/*.rpm
|
||||
|
||||
srpm: source
|
||||
@echo "--- Build SRPM: $(PKGNAME)-$(VERSION)-$(RELEASE).. ---"
|
||||
@cp packaging/$(PKGNAME).spec packaging/$(PKGNAME).spec.bak
|
||||
@sed -i "s/1%{?dist}/$(RELEASE)%{?dist}/g" packaging/$(PKGNAME).spec
|
||||
@rpmbuild -bs packaging/$(PKGNAME).spec \
|
||||
--define "_sourcedir `pwd`/packaging/sources" \
|
||||
--define "_srcrpmdir `pwd`/packaging/SRPMS" \
|
||||
--define "rhel $(DIST_VERSION)" \
|
||||
--define 'dist .el$(DIST_VERSION)' \
|
||||
--define 'el$(DIST_VERSION) 1' || FAILED=1
|
||||
@mv packaging/$(PKGNAME).spec.bak packaging/$(PKGNAME).spec
|
||||
|
||||
_build_subpkg:
|
||||
@echo "--- Build RPM: $(DEPS_PKGNAME)-$(DEPS_VERSION)-$(RELEASE).. ---"
|
||||
@cp packaging/other_specs/$(DEPS_PKGNAME).spec packaging/$(DEPS_PKGNAME).spec
|
||||
@sed -i "s/1%{?dist}/$(RELEASE)%{?dist}/g" packaging/$(DEPS_PKGNAME).spec
|
||||
# Let's be explicit about the path to the binary RPMs; Copr builders can override this
|
||||
# IMPORTANT:
|
||||
# Also, explicitly set the _rpmfilename macro. This is super important as
|
||||
# the COPR build servers are using Mock, which redefines the macro, so packages
|
||||
# are stored inside RPMS directory, instead RPMS/%{ARCH}. The macro must be
|
||||
# defined with double '%'. Using just single %, the macro is expanded when
|
||||
# the specfile is loaded, but it is expected to be expanded during
|
||||
# the build process when particular subpackages (RPMs) are created, so
|
||||
# each RPM has the right name. Using the single %, all RPMs would have the
|
||||
# name of the SRPM - which means effectively that only one RPM per build
|
||||
# would be created. (hopefully the explanation is clear :))
|
||||
@rpmbuild -ba packaging/$(DEPS_PKGNAME).spec \
|
||||
--define "_sourcedir `pwd`/packaging/sources" \
|
||||
--define "_srcrpmdir `pwd`/packaging/SRPMS" \
|
||||
--define "_builddir `pwd`/packaging/BUILD" \
|
||||
--define "_buildrootdir `pwd`/packaging/BUILDROOT" \
|
||||
--define "_rpmdir `pwd`/packaging/RPMS" \
|
||||
--define "rhel $$(($(DIST_VERSION) + 1))" \
|
||||
--define "dist .el$$(($(DIST_VERSION) + 1))" \
|
||||
--define "el$$(($(DIST_VERSION) + 1)) 1" \
|
||||
--define "_rpmfilename %%{ARCH}/%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" || FAILED=1
|
||||
@rm -f packaging/$(DEPS_PKGNAME).spec
|
||||
|
||||
_build_local: source
|
||||
@echo "--- Build RPM: $(PKGNAME)-$(VERSION)-$(RELEASE).. ---"
|
||||
@cp packaging/$(PKGNAME).spec packaging/$(PKGNAME).spec.bak
|
||||
@sed -i "s/1%{?dist}/$(RELEASE)%{?dist}/g" packaging/$(PKGNAME).spec
|
||||
@rpmbuild -ba packaging/$(PKGNAME).spec \
|
||||
--define "_sourcedir `pwd`/packaging/sources" \
|
||||
--define "_srcrpmdir `pwd`/packaging/SRPMS" \
|
||||
--define "_builddir `pwd`/packaging/BUILD" \
|
||||
--define "_buildrootdir `pwd`/packaging/BUILDROOT" \
|
||||
--define "_rpmdir `pwd`/packaging/RPMS" \
|
||||
--define "rhel $(DIST_VERSION)" \
|
||||
--define "dist .el$(DIST_VERSION)" \
|
||||
--define "el$(DIST_VERSION) 1" || FAILED=1
|
||||
@mv packaging/$(PKGNAME).spec.bak packaging/$(PKGNAME).spec
|
||||
|
||||
build_container:
|
||||
echo "--- Build RPM ${PKGNAME}-${VERSION}-${RELEASE}.el$(DIST_VERSION).rpm in container ---"; \
|
||||
case "$(BUILD_CONTAINER)" in \
|
||||
el7) \
|
||||
CONT_FILE="utils/container-builds/Containerfile.centos7"; \
|
||||
;; \
|
||||
el8) \
|
||||
CONT_FILE="utils/container-builds/Containerfile.ubi8"; \
|
||||
;; \
|
||||
"") \
|
||||
echo "BUILD_CONTAINER must be set"; \
|
||||
exit 1; \
|
||||
;; \
|
||||
*) \
|
||||
echo "Available containers are el7, el8"; \
|
||||
exit 1; \
|
||||
;; \
|
||||
esac && \
|
||||
IMAGE="leapp-repo-build-$(BUILD_CONTAINER)"; \
|
||||
$(_CONTAINER_TOOL) image inspect $$IMAGE > /dev/null 2>&1 || \
|
||||
$(_CONTAINER_TOOL) build -f $$CONT_FILE --tag $$IMAGE . && \
|
||||
$(_CONTAINER_TOOL) run --rm --name "$${IMAGE}-cont" -v $$PWD:/repo:Z $$IMAGE
|
||||
|
||||
copr_build: srpm
|
||||
@echo "--- Build RPM ${PKGNAME}-${VERSION}-${RELEASE}.el$(DIST_VERSION).rpm in COPR ---"
|
||||
@echo copr-cli --config $(_COPR_CONFIG) build $(_COPR_CHROOT) $(_COPR_REPO) \
|
||||
packaging/SRPMS/${PKGNAME}-${VERSION}-${RELEASE}*.src.rpm
|
||||
@copr-cli --config $(_COPR_CONFIG) build $(_COPR_CHROOT) $(_COPR_REPO) \
|
||||
packaging/SRPMS/${PKGNAME}-${VERSION}-${RELEASE}*.src.rpm
|
||||
|
||||
print_release:
|
||||
@echo $(RELEASE)
|
||||
|
||||
# Before doing anything, it is good idea to register repos to ensure everything
|
||||
# is in order inside ~/.config/leapp/repos.json
|
||||
register:
|
||||
. $(VENVNAME)/bin/activate; \
|
||||
snactor repo find --path repos
|
||||
|
||||
install-deps:
|
||||
@# in centos:7 python 3.x is not installed by default
|
||||
case $(_PYTHON_VENV) in python3*) yum install -y ${shell echo $(_PYTHON_VENV) | tr -d .}; esac
|
||||
@# in centos:7 python dependencies required gcc
|
||||
case $(_PYTHON_VENV) in python3*) yum install gcc -y; esac
|
||||
virtualenv --system-site-packages -p /usr/bin/$(_PYTHON_VENV) $(VENVNAME); \
|
||||
. $(VENVNAME)/bin/activate; \
|
||||
pip install -U pip; \
|
||||
pip install --upgrade setuptools; \
|
||||
pip install --upgrade -r requirements.txt; \
|
||||
./utils/install_commands.sh $(_PYTHON_VENV); \
|
||||
# In case the top commit Depends-On some yet unmerged framework patch - override master leapp with the proper version
|
||||
if [[ ! -z "$(REQ_LEAPP_PR)" ]] ; then \
|
||||
echo "Leapp-repository depends on the yet unmerged pr of the framework #$(REQ_LEAPP_PR), installing it.." && \
|
||||
$(VENVNAME)/bin/pip install -I "git+https://github.com/oamg/leapp.git@refs/pull/$(REQ_LEAPP_PR)/head"; \
|
||||
fi
|
||||
$(_PYTHON_VENV) utils/install_actor_deps.py --actor=$(ACTOR) --repos="$(TEST_PATHS)"
|
||||
install-deps-fedora:
|
||||
@# Check the necessary rpms are installed for py3 (and py2 below)
|
||||
if ! rpm -q git findutils python3-virtualenv gcc; then \
|
||||
if ! dnf install -y git findutils python3-virtualenv gcc; then \
|
||||
echo 'Please install the following rpms via the command: ' \
|
||||
'sudo dnf install -y git findutils python3-virtualenv gcc'; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
fi
|
||||
@# Prepare the virtual environment
|
||||
virtualenv --system-site-packages --python /usr/bin/$(_PYTHON_VENV) $(VENVNAME)
|
||||
. $(VENVNAME)/bin/activate ; \
|
||||
pip install -U pip; \
|
||||
pip install --upgrade setuptools; \
|
||||
pip install --upgrade -r requirements.txt; \
|
||||
./utils/install_commands.sh $(_PYTHON_VENV); \
|
||||
# In case the top commit Depends-On some yet unmerged framework patch - override master leapp with the proper version
|
||||
if [[ ! -z "$(REQ_LEAPP_PR)" ]] ; then \
|
||||
echo "Leapp-repository depends on the yet unmerged pr of the framework #$(REQ_LEAPP_PR), installing it.." && \
|
||||
$(VENVNAME)/bin/pip install -I "git+https://github.com/oamg/leapp.git@refs/pull/$(REQ_LEAPP_PR)/head"; \
|
||||
fi
|
||||
|
||||
lint:
|
||||
. $(VENVNAME)/bin/activate; \
|
||||
echo "--- Linting ... ---" && \
|
||||
SEARCH_PATH="$(TEST_PATHS)" && \
|
||||
echo "Using search path '$${SEARCH_PATH}'" && \
|
||||
echo "--- Running pylint ---" && \
|
||||
bash -c "[[ ! -z '$${SEARCH_PATH}' ]] && find $${SEARCH_PATH} -name '*.py' | sort -u | xargs pylint -j0" && \
|
||||
echo "--- Running flake8 ---" && \
|
||||
bash -c "[[ ! -z '$${SEARCH_PATH}' ]] && flake8 $${SEARCH_PATH}"
|
||||
|
||||
if [[ "$(_PYTHON_VENV)" == "python2.7" ]] ; then \
|
||||
. $(VENVNAME)/bin/activate; \
|
||||
echo "--- Checking py3 compatibility ---" && \
|
||||
SEARCH_PATH=$(REPOS_PATH) && \
|
||||
bash -c "[[ ! -z '$${SEARCH_PATH}' ]] && find $${SEARCH_PATH} -name '*.py' | sort -u | xargs pylint --py3k" && \
|
||||
echo "--- Linting done. ---"; \
|
||||
fi
|
||||
|
||||
if [[ "`git rev-parse --abbrev-ref HEAD`" != "$(MASTER_BRANCH)" ]] && [[ -n "`git diff $(MASTER_BRANCH) --name-only --diff-filter AMR`" ]]; then \
|
||||
. $(VENVNAME)/bin/activate; \
|
||||
git diff $(MASTER_BRANCH) --name-only --diff-filter AMR | xargs isort -c --diff || \
|
||||
{ \
|
||||
echo; \
|
||||
echo "------------------------------------------------------------------------------"; \
|
||||
echo "Hint: Apply the required changes."; \
|
||||
echo " Execute the following command to apply them automatically: make lint_fix"; \
|
||||
exit 1; \
|
||||
} && echo "--- isort check done. ---"; \
|
||||
fi
|
||||
|
||||
lint_fix:
|
||||
. $(VENVNAME)/bin/activate; \
|
||||
git diff $(MASTER_BRANCH) --name-only --diff-filter AMR | xargs isort && \
|
||||
echo "--- isort inplace fixing done. ---;"
|
||||
|
||||
test_no_lint:
|
||||
. $(VENVNAME)/bin/activate; \
|
||||
snactor repo find --path repos/; \
|
||||
cd repos/system_upgrade/el7toel8/; \
|
||||
snactor workflow sanity-check ipu && \
|
||||
cd - && \
|
||||
$(_PYTHON_VENV) -m pytest $(REPORT_ARG) $(TEST_PATHS) $(LIBRARY_PATH)
|
||||
|
||||
test: lint test_no_lint
|
||||
|
||||
# container images act like a cache so that dependencies can only be downloaded once
|
||||
# to force image rebuild, use clean_containers target
|
||||
_build_container_image:
|
||||
@[ -z "$$CONT_FILE" ] && { echo "CONT_FILE must be set"; exit 1; } || \
|
||||
[ -z "$$TEST_IMAGE" ] && { echo "TEST_IMAGE must be set"; exit 1; }; \
|
||||
$(_CONTAINER_TOOL) image inspect "$$TEST_IMAGE" > /dev/null 2>&1 && exit 0; \
|
||||
echo "=========== Building container test env image ==========="; \
|
||||
$(_CONTAINER_TOOL) build -f $$CONT_FILE --tag $$TEST_IMAGE .
|
||||
|
||||
# tests one IPU, leapp repositories irrelevant to the tested IPU are deleted
|
||||
_test_container_ipu:
|
||||
@case $$TEST_CONT_IPU in \
|
||||
el7toel8) \
|
||||
export REPOSITORIES="common,el7toel8"; \
|
||||
;; \
|
||||
el8toel9) \
|
||||
export REPOSITORIES="common,el8toel9"; \
|
||||
;; \
|
||||
"") \
|
||||
echo "TEST_CONT_IPU must be set"; exit 1; \
|
||||
;; \
|
||||
*) \
|
||||
echo "Only supported TEST_CONT_IPUs are el7toel8, el8toel9"; exit 1; \
|
||||
;; \
|
||||
esac && \
|
||||
$(_CONTAINER_TOOL) exec -w /repocopy $$_CONT_NAME make clean && \
|
||||
$(_CONTAINER_TOOL) exec -w /repocopy -e REPOSITORIES $$_CONT_NAME make $${_TEST_CONT_TARGET:-test}
|
||||
|
||||
|
||||
# Runs lint in a container
|
||||
lint_container:
|
||||
@_TEST_CONT_TARGET="lint" $(MAKE) test_container
|
||||
|
||||
lint_container_all:
|
||||
@for container in "f34" "rhel7" "rhel8"; do \
|
||||
TEST_CONTAINER=$$container $(MAKE) lint_container || exit 1; \
|
||||
done
|
||||
|
||||
# Runs tests in a container
|
||||
# Builds testing image first if it doesn't exist
|
||||
# On some Python versions, we need to test both IPUs,
|
||||
# because e.g. RHEL7 to RHEL8 IPU must work on python2.7 and python3.6
|
||||
# and RHEL8 to RHEL9 IPU must work on python3.6 and python3.9.
|
||||
test_container:
|
||||
@case $(_TEST_CONTAINER) in \
|
||||
f34) \
|
||||
export CONT_FILE="utils/container-tests/Containerfile.f34"; \
|
||||
export _VENV="python3.9"; \
|
||||
;; \
|
||||
rhel7) \
|
||||
export CONT_FILE="utils/container-tests/Containerfile.rhel7"; \
|
||||
export _VENV="python2.7"; \
|
||||
;; \
|
||||
rhel8) \
|
||||
export CONT_FILE="utils/container-tests/Containerfile.rhel8"; \
|
||||
export _VENV="python3.6"; \
|
||||
;; \
|
||||
*) \
|
||||
echo "Error: Available containers are: f34, rhel7, rhel8"; exit 1; \
|
||||
;; \
|
||||
esac; \
|
||||
export TEST_IMAGE="leapp-repo-tests-$(_TEST_CONTAINER)"; \
|
||||
$(MAKE) _build_container_image && \
|
||||
echo "=== Running $(_TEST_CONT_TARGET) in $(_TEST_CONTAINER) container ===" && \
|
||||
export _CONT_NAME="leapp-repo-tests-$(_TEST_CONTAINER)-cont"; \
|
||||
$(_CONTAINER_TOOL) ps -q -f name=$$_CONT_NAME && { $(_CONTAINER_TOOL) kill $$_CONT_NAME; $(_CONTAINER_TOOL) rm $$_CONT_NAME; }; \
|
||||
$(_CONTAINER_TOOL) run -di --name $$_CONT_NAME -v "$$PWD":/repo:Z -e PYTHON_VENV=$$_VENV $$TEST_IMAGE && \
|
||||
$(_CONTAINER_TOOL) exec $$_CONT_NAME rsync -aur --delete --exclude "tut*" /repo/ /repocopy && \
|
||||
case $$_VENV in \
|
||||
python2.7) \
|
||||
TEST_CONT_IPU=el7toel8 $(MAKE) _test_container_ipu; \
|
||||
;;\
|
||||
python3.6) \
|
||||
TEST_CONT_IPU=el7toel8 $(MAKE) _test_container_ipu; \
|
||||
TEST_CONT_IPU=el8toel9 $(MAKE) _test_container_ipu; \
|
||||
;; \
|
||||
python3.9) \
|
||||
TEST_CONT_IPU=el8toel9 $(MAKE) _test_container_ipu; \
|
||||
;; \
|
||||
*) \
|
||||
TEST_CONT_IPU=el8toel9 $(MAKE) _test_container_ipu; \
|
||||
;;\
|
||||
esac; \
|
||||
$(_CONTAINER_TOOL) kill $$_CONT_NAME; \
|
||||
$(_CONTAINER_TOOL) rm $$_CONT_NAME
|
||||
|
||||
test_container_all:
|
||||
@for container in "f34" "rhel7" "rhel8"; do \
|
||||
TEST_CONTAINER=$$container $(MAKE) test_container || exit 1; \
|
||||
done
|
||||
|
||||
test_container_no_lint:
|
||||
@_TEST_CONT_TARGET="test_no_lint" $(MAKE) test_container
|
||||
|
||||
test_container_all_no_lint:
|
||||
@for container in "f34" "rhel7" "rhel8"; do \
|
||||
TEST_CONTAINER=$$container $(MAKE) test_container_no_lint || exit 1; \
|
||||
done
|
||||
|
||||
# clean all testing and building containers and their images
|
||||
clean_containers:
|
||||
@for i in "leapp-repo-tests-f34" "leapp-repo-tests-rhel7" "leapp-repo-tests-rhel8" \
|
||||
"leapp-repo-build-el7" "leapp-repo-build-el8"; do \
|
||||
$(_CONTAINER_TOOL) kill "$$i-cont" || :; \
|
||||
$(_CONTAINER_TOOL) rm "$$i-cont" || :; \
|
||||
$(_CONTAINER_TOOL) rmi "$$i" || :; \
|
||||
done > /dev/null 2>&1
|
||||
|
||||
fast_lint:
|
||||
@. $(VENVNAME)/bin/activate; \
|
||||
FILES_TO_LINT="$$(git diff --name-only $(MASTER_BRANCH) --diff-filter AMR | grep '\.py$$')"; \
|
||||
if [[ -n "$$FILES_TO_LINT" ]]; then \
|
||||
pylint -j 0 $$FILES_TO_LINT && \
|
||||
flake8 $$FILES_TO_LINT; \
|
||||
LINT_EXIT_CODE="$$?"; \
|
||||
if [[ "$$LINT_EXIT_CODE" != "0" ]]; then \
|
||||
exit $$LINT_EXIT_CODE; \
|
||||
fi; \
|
||||
if [[ "$(_PYTHON_VENV)" == "python2.7" ]] ; then \
|
||||
pylint --py3k $$FILES_TO_LINT; \
|
||||
fi; \
|
||||
else \
|
||||
echo "No files to lint."; \
|
||||
fi
|
||||
|
||||
dashboard_data:
|
||||
. $(VENVNAME)/bin/activate; \
|
||||
snactor repo find --path repos/; \
|
||||
pushd repos/system_upgrade/el7toel8/; \
|
||||
$(_PYTHON_VENV) ../../../utils/dashboard-json-dump.py > ../../../discover.json; \
|
||||
popd
|
||||
|
||||
.PHONY: help build clean prepare source srpm copr_build _build_local build_container print_release register install-deps install-deps-fedora lint test_no_lint test dashboard_data fast_lint
|
||||
.PHONY: test_container test_container_no_lint test_container_all test_container_all_no_lint clean_containers _build_container_image _test_container_ipu
|
@ -0,0 +1,44 @@
|
||||
import itertools
|
||||
import sys
|
||||
|
||||
from leapp.cli.commands.config import get_config
|
||||
from leapp.exceptions import UsageError
|
||||
from leapp.messaging.answerstore import AnswerStore
|
||||
from leapp.utils.clicmd import command, command_opt
|
||||
|
||||
|
||||
@command('answer', help='Manage answerfile generation: register persistent user choices for specific dialog sections')
|
||||
@command_opt('section', action='append', metavar='dialog_sections',
|
||||
help='Register answer for a specific section in the answerfile')
|
||||
@command_opt('add', is_flag=True,
|
||||
help='If set sections will be created even if missing in original answerfile')
|
||||
def answer(args):
|
||||
"""A command to record user choices to the questions in the answerfile.
|
||||
Saves user answer between leapp preupgrade runs.
|
||||
"""
|
||||
cfg = get_config()
|
||||
if args.section:
|
||||
args.section = list(itertools.chain(*[i.split(',') for i in args.section]))
|
||||
else:
|
||||
raise UsageError('At least one dialog section must be specified, ex. --section dialog.option=mychoice')
|
||||
try:
|
||||
sections = [tuple((dialog_option.split('.', 2) + [value]))
|
||||
for dialog_option, value in [s.split('=', 2) for s in args.section]]
|
||||
except ValueError:
|
||||
raise UsageError("A bad formatted section has been passed. Expected format is dialog.option=mychoice")
|
||||
answerfile_path = cfg.get('report', 'answerfile')
|
||||
answerstore = AnswerStore()
|
||||
answerstore.load(answerfile_path)
|
||||
for dialog, option, value in sections:
|
||||
answerstore.answer(dialog, option, value)
|
||||
not_updated = answerstore.update(answerfile_path, allow_missing=args.add)
|
||||
if not_updated:
|
||||
sys.stderr.write("WARNING: Only sections found in original userfile can be updated, ignoring {}\n".format(
|
||||
",".join(not_updated)))
|
||||
|
||||
|
||||
def register(base_command):
|
||||
"""
|
||||
Registers `leapp answer`
|
||||
"""
|
||||
base_command.add_sub(answer)
|
@ -0,0 +1,142 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
from leapp.exceptions import CommandError
|
||||
from leapp.utils import path
|
||||
|
||||
HANA_BASE_PATH = '/hana/shared'
|
||||
HANA_SAPCONTROL_PATH_X86_64 = 'exe/linuxx86_64/hdb/sapcontrol'
|
||||
HANA_SAPCONTROL_PATH_PPC64LE = 'exe/linuxppc64le/hdb/sapcontrol'
|
||||
|
||||
LEAPP_UPGRADE_FLAVOUR_DEFAULT = 'default'
|
||||
LEAPP_UPGRADE_FLAVOUR_SAP_HANA = 'saphana'
|
||||
LEAPP_UPGRADE_PATHS = 'upgrade_paths.json'
|
||||
|
||||
VERSION_REGEX = re.compile(r"^([1-9]\d*)\.(\d+)$")
|
||||
|
||||
|
||||
def check_version(version):
|
||||
"""
|
||||
Versioning schema: MAJOR.MINOR
|
||||
In case version contains an invalid version string, an CommandError will be raised.
|
||||
|
||||
:raises: CommandError
|
||||
:return: release tuple
|
||||
"""
|
||||
if not re.match(VERSION_REGEX, version):
|
||||
raise CommandError('Unexpected format of target version: {}'.format(version))
|
||||
return version.split('.')[0]
|
||||
|
||||
|
||||
def get_major_version(version):
|
||||
"""
|
||||
Return the major version from the given version string.
|
||||
|
||||
Versioning schema: MAJOR.MINOR.PATCH
|
||||
|
||||
:param str version: The version string according to the versioning schema described.
|
||||
:rtype: str
|
||||
:returns: The major version from the given version string.
|
||||
"""
|
||||
return str(check_version(version)[0])
|
||||
|
||||
|
||||
def detect_sap_hana():
|
||||
"""
|
||||
Detect SAP HANA based on existence of /hana/shared/*/exe/linuxx86_64/hdb/sapcontrol
|
||||
"""
|
||||
if os.path.exists(HANA_BASE_PATH):
|
||||
for entry in os.listdir(HANA_BASE_PATH):
|
||||
# Does /hana/shared/{entry}/exe/linuxx86_64/hdb/sapcontrol exist?
|
||||
sap_on_intel = os.path.exists(os.path.join(HANA_BASE_PATH, entry, HANA_SAPCONTROL_PATH_X86_64))
|
||||
sap_on_power = os.path.exists(os.path.join(HANA_BASE_PATH, entry, HANA_SAPCONTROL_PATH_PPC64LE))
|
||||
if sap_on_intel or sap_on_power:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_upgrade_flavour():
|
||||
"""
|
||||
Returns the flavour of the upgrade for this system.
|
||||
"""
|
||||
if detect_sap_hana():
|
||||
return LEAPP_UPGRADE_FLAVOUR_SAP_HANA
|
||||
return LEAPP_UPGRADE_FLAVOUR_DEFAULT
|
||||
|
||||
|
||||
def get_os_release_version_id(filepath):
|
||||
"""
|
||||
Retrieve data about System OS release from provided file.
|
||||
|
||||
:return: `str` version_id
|
||||
"""
|
||||
with open(filepath) as f:
|
||||
data = dict(l.strip().split('=', 1) for l in f.readlines() if '=' in l)
|
||||
return data.get('VERSION_ID', '').strip('"')
|
||||
|
||||
|
||||
def get_upgrade_paths_config():
|
||||
# NOTE(ivasilev) Importing here not to have circular dependencies
|
||||
from leapp.cli.commands.upgrade import util # noqa: C415; pylint: disable=import-outside-toplevel
|
||||
|
||||
repository = util.load_repositories_from('repo_path', '/etc/leapp/repo.d/', manager=None)
|
||||
with open(path.get_common_file_path(repository, LEAPP_UPGRADE_PATHS)) as f:
|
||||
upgrade_paths_map = json.loads(f.read())
|
||||
return upgrade_paths_map
|
||||
|
||||
|
||||
def get_target_versions_from_config(src_version_id, flavor):
|
||||
"""
|
||||
Retrieve all possible target versions from upgrade_paths_map.
|
||||
If no match is found returns empty list.
|
||||
"""
|
||||
upgrade_paths_map = get_upgrade_paths_config()
|
||||
return upgrade_paths_map.get(flavor, {}).get(src_version_id, [])
|
||||
|
||||
|
||||
def get_supported_target_versions(flavour=get_upgrade_flavour()):
|
||||
"""
|
||||
Return a list of supported target versions for the given `flavour` of upgrade.
|
||||
The default value for `flavour` is `default`.
|
||||
"""
|
||||
|
||||
current_version_id = get_os_release_version_id('/etc/os-release')
|
||||
target_versions = get_target_versions_from_config(current_version_id, flavour)
|
||||
if not target_versions:
|
||||
# If we cannot find a particular major.minor version in the map,
|
||||
# we fallback to pick a target version just based on a major version.
|
||||
# This can happen for example when testing not yet released versions
|
||||
major_version = get_major_version(current_version_id)
|
||||
target_versions = get_target_versions_from_config(major_version, flavour)
|
||||
|
||||
return target_versions
|
||||
|
||||
|
||||
def get_target_version(flavour):
|
||||
target_versions = get_supported_target_versions(flavour)
|
||||
return target_versions[-1] if target_versions else None
|
||||
|
||||
|
||||
def vet_upgrade_path(args):
|
||||
"""
|
||||
Make sure the user requested upgrade_path is a supported one.
|
||||
If LEAPP_DEVEL_TARGET_RELEASE is set then it's value is not vetted against upgrade_paths_map but used as is.
|
||||
|
||||
:raises: `CommandError` if the specified upgrade_path is not supported
|
||||
:return: `tuple` (target_release, flavor)
|
||||
"""
|
||||
flavor = get_upgrade_flavour()
|
||||
env_version_override = os.getenv('LEAPP_DEVEL_TARGET_RELEASE')
|
||||
if env_version_override:
|
||||
check_version(env_version_override)
|
||||
return (env_version_override, flavor)
|
||||
target_release = args.target or get_target_version(flavor)
|
||||
supported_target_versions = get_supported_target_versions(flavor)
|
||||
if target_release not in supported_target_versions:
|
||||
raise CommandError(
|
||||
"Upgrade to {to} for {flavor} upgrade path is not supported, possible choices are {choices}".format(
|
||||
to=target_release,
|
||||
flavor=flavor,
|
||||
choices=','.join(supported_target_versions)))
|
||||
return (target_release, flavor)
|
@ -0,0 +1,7 @@
|
||||
from leapp import config
|
||||
|
||||
|
||||
def get_config():
|
||||
if not config._LEAPP_CONFIG:
|
||||
config._CONFIG_DEFAULTS['repositories'] = {'repo_path': '/etc/leapp/repos.d'}
|
||||
return config.get_config()
|
@ -0,0 +1,26 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import json
|
||||
import sys
|
||||
|
||||
from leapp.cli.commands.upgrade.util import fetch_all_upgrade_contexts
|
||||
from leapp.exceptions import CommandError
|
||||
from leapp.utils.clicmd import command
|
||||
|
||||
|
||||
@command('list-runs', help='List previous Leapp upgrade executions')
|
||||
def list_runs(args): # noqa; pylint: disable=unused-argument
|
||||
contexts = fetch_all_upgrade_contexts()
|
||||
if contexts:
|
||||
for context in contexts:
|
||||
print('Context ID: {} - time: {} - details: {}'.format(context[0], context[1], json.loads(context[2])),
|
||||
file=sys.stdout)
|
||||
else:
|
||||
raise CommandError('No previous run found!')
|
||||
|
||||
|
||||
def register(base_command):
|
||||
"""
|
||||
Registers `leapp register`
|
||||
"""
|
||||
base_command.add_sub(list_runs)
|
@ -0,0 +1,92 @@
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from leapp.cli.commands import command_utils
|
||||
from leapp.cli.commands.config import get_config
|
||||
from leapp.cli.commands.upgrade import breadcrumbs, util
|
||||
from leapp.exceptions import CommandError, LeappError
|
||||
from leapp.logger import configure_logger
|
||||
from leapp.utils.audit import Execution
|
||||
from leapp.utils.clicmd import command, command_opt
|
||||
from leapp.utils.output import beautify_actor_exception, report_errors, report_info
|
||||
|
||||
|
||||
@command('preupgrade', help='Generate preupgrade report')
|
||||
@command_opt('whitelist-experimental', action='append', metavar='ActorName', help='Enables experimental actors')
|
||||
@command_opt('debug', is_flag=True, help='Enable debug mode', inherit=False)
|
||||
@command_opt('verbose', is_flag=True, help='Enable verbose logging', inherit=False)
|
||||
@command_opt('no-rhsm', is_flag=True, help='Use only custom repositories and skip actions'
|
||||
' with Red Hat Subscription Manager')
|
||||
@command_opt('no-insights-register', is_flag=True, help='Do not register into Red Hat Insights')
|
||||
@command_opt('no-rhsm-facts', is_flag=True, help='Do not store migration information using Red Hat '
|
||||
'Subscription Manager. Automatically implied by --no-rhsm.')
|
||||
@command_opt('enablerepo', action='append', metavar='<repoid>',
|
||||
help='Enable specified repository. Can be used multiple times.')
|
||||
@command_opt('channel',
|
||||
help='Set preferred channel for the IPU target.',
|
||||
choices=['ga', 'tuv', 'e4s', 'eus', 'aus'],
|
||||
value_type=str.lower) # This allows the choices to be case insensitive
|
||||
@command_opt('iso', help='Use provided target RHEL installation image to perform the in-place upgrade.')
|
||||
@command_opt('target', choices=command_utils.get_supported_target_versions(),
|
||||
help='Specify RHEL version to upgrade to for {} detected upgrade flavour'.format(
|
||||
command_utils.get_upgrade_flavour()))
|
||||
@command_opt('report-schema', help='Specify report schema version for leapp-report.json',
|
||||
choices=['1.0.0', '1.1.0', '1.2.0'], default=get_config().get('report', 'schema'))
|
||||
@command_opt('nogpgcheck', is_flag=True, help='Disable RPM GPG checks. Same as yum/dnf --nogpgcheck option.')
|
||||
@breadcrumbs.produces_breadcrumbs
|
||||
def preupgrade(args, breadcrumbs):
|
||||
util.disable_database_sync()
|
||||
context = str(uuid.uuid4())
|
||||
cfg = get_config()
|
||||
util.handle_output_level(args)
|
||||
configuration = util.prepare_configuration(args)
|
||||
answerfile_path = cfg.get('report', 'answerfile')
|
||||
userchoices_path = cfg.get('report', 'userchoices')
|
||||
# NOTE(ivasilev) argparse choices and defaults in enough for validation
|
||||
report_schema = args.report_schema
|
||||
|
||||
if os.getuid():
|
||||
raise CommandError('This command has to be run under the root user.')
|
||||
e = Execution(context=context, kind='preupgrade', configuration=configuration)
|
||||
e.store()
|
||||
util.archive_logfiles()
|
||||
logger = configure_logger('leapp-preupgrade.log')
|
||||
os.environ['LEAPP_EXECUTION_ID'] = context
|
||||
|
||||
try:
|
||||
repositories = util.load_repositories()
|
||||
except LeappError as exc:
|
||||
raise CommandError(exc.message)
|
||||
|
||||
workflow = repositories.lookup_workflow('IPUWorkflow')()
|
||||
util.warn_if_unsupported(configuration)
|
||||
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
|
||||
with beautify_actor_exception():
|
||||
workflow.load_answers(answerfile_path, userchoices_path)
|
||||
until_phase = 'ReportsPhase'
|
||||
logger.info('Executing workflow until phase: %s', until_phase)
|
||||
|
||||
# Set the locale, so that the actors parsing command outputs that might be localized will not fail
|
||||
os.environ['LANGUAGE'] = 'en_US.UTF-8'
|
||||
os.environ['LC_ALL'] = 'en_US.UTF-8'
|
||||
os.environ['LANG'] = 'en_US.UTF-8'
|
||||
workflow.run(context=context, until_phase=until_phase, skip_dialogs=True)
|
||||
|
||||
logger.info("Answerfile will be created at %s", answerfile_path)
|
||||
workflow.save_answers(answerfile_path, userchoices_path)
|
||||
util.generate_report_files(context, report_schema)
|
||||
report_errors(workflow.errors)
|
||||
report_files = util.get_cfg_files('report', cfg)
|
||||
log_files = util.get_cfg_files('logs', cfg)
|
||||
report_info(context, report_files, log_files, answerfile_path, fail=workflow.failure, errors=workflow.errors)
|
||||
|
||||
if workflow.failure:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def register(base_command):
|
||||
"""
|
||||
Registers `leapp preupgrade`
|
||||
"""
|
||||
base_command.add_sub(preupgrade)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import uuid
|
||||
from argparse import Namespace
|
||||
|
||||
from leapp.cli.commands.upgrade import upgrade, util
|
||||
from leapp.exceptions import CommandError
|
||||
from leapp.utils.audit import Execution, get_connection
|
||||
from leapp.utils.audit.contextclone import clone_context
|
||||
from leapp.utils.clicmd import command, command_arg, command_opt
|
||||
|
||||
RERUN_SUPPORTED_PHASES = ('FirstBoot',)
|
||||
|
||||
|
||||
@command('rerun', help='Re-runs the upgrade from the given phase and using the information and progress '
|
||||
'from the last invocation of leapp upgrade.')
|
||||
@command_arg('from-phase',
|
||||
help='Phase to start running from again. Supported values: {}'.format(', '.join(RERUN_SUPPORTED_PHASES)))
|
||||
@command_opt('only-actors-with-tag', action='append', metavar='TagName',
|
||||
help='Restrict actors to be re-run only with given tags. Others will not be executed')
|
||||
@command_opt('debug', is_flag=True, help='Enable debug mode', inherit=False)
|
||||
@command_opt('verbose', is_flag=True, help='Enable verbose logging', inherit=False)
|
||||
def rerun(args):
|
||||
|
||||
if os.environ.get('LEAPP_UNSUPPORTED') != '1':
|
||||
raise CommandError('This command requires the environment variable LEAPP_UNSUPPORTED="1" to be set!')
|
||||
|
||||
if args.from_phase not in RERUN_SUPPORTED_PHASES:
|
||||
raise CommandError('This command is only supported for {}'.format(', '.join(RERUN_SUPPORTED_PHASES)))
|
||||
|
||||
context = str(uuid.uuid4())
|
||||
last_context, configuration = util.fetch_last_upgrade_context()
|
||||
phases = [chkpt['phase'] for chkpt in util.get_checkpoints(context=last_context)]
|
||||
if args.from_phase not in set(phases):
|
||||
raise CommandError('Phase {} has not been executed in the last leapp upgrade execution. '
|
||||
'Cannot rerun not executed phase'.format(args.from_phase))
|
||||
|
||||
if not last_context:
|
||||
raise CommandError('No previous upgrade run to rerun - '
|
||||
'leapp upgrade has to be run before leapp rerun can be used')
|
||||
|
||||
with get_connection(None) as db:
|
||||
e = Execution(context=context, kind='rerun', configuration=configuration)
|
||||
|
||||
e.store(db)
|
||||
|
||||
clone_context(last_context, context, db)
|
||||
db.execute('''
|
||||
DELETE FROM audit WHERE id IN (
|
||||
SELECT
|
||||
audit.id AS id
|
||||
FROM
|
||||
audit
|
||||
JOIN
|
||||
data_source ON data_source.id = audit.data_source_id
|
||||
WHERE
|
||||
audit.context = ? AND audit.event = 'checkpoint'
|
||||
AND data_source.phase LIKE 'FirstBoot%'
|
||||
);
|
||||
''', (context,))
|
||||
db.execute('''DELETE FROM message WHERE context = ? and type = 'ErrorModel';''', (context,))
|
||||
|
||||
util.archive_logfiles()
|
||||
upgrade(Namespace( # pylint: disable=no-value-for-parameter
|
||||
resume=True,
|
||||
resume_context=context,
|
||||
only_with_tags=args.only_actors_with_tag or [],
|
||||
debug=args.debug,
|
||||
verbose=args.verbose,
|
||||
reboot=False,
|
||||
no_rhsm=False,
|
||||
nogpgcheck=False,
|
||||
channel=None,
|
||||
report_schema='1.1.0',
|
||||
whitelist_experimental=[],
|
||||
enablerepo=[]))
|
||||
|
||||
|
||||
def register(base_command):
|
||||
"""
|
||||
Registers `leapp rerun`
|
||||
"""
|
||||
base_command.add_sub(rerun)
|
@ -0,0 +1,52 @@
|
||||
import mock
|
||||
import pytest
|
||||
|
||||
from leapp.cli.commands import command_utils
|
||||
from leapp.exceptions import CommandError
|
||||
|
||||
|
||||
@mock.patch("leapp.cli.commands.command_utils.get_upgrade_paths_config",
|
||||
return_value={"default": {"7.9": ["8.4"], "8.6": ["9.0"], "7": ["8.4"], "8": ["9.0"]}})
|
||||
def test_get_target_version(mock_open, monkeypatch):
|
||||
|
||||
monkeypatch.setattr(command_utils, 'get_os_release_version_id', lambda x: '8.6')
|
||||
assert command_utils.get_target_version('default') == '9.0'
|
||||
|
||||
monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '')
|
||||
monkeypatch.setattr(command_utils, 'get_os_release_version_id', lambda x: '8.6')
|
||||
assert command_utils.get_target_version('default') == '9.0'
|
||||
|
||||
monkeypatch.delenv('LEAPP_DEVEL_TARGET_RELEASE', raising=True)
|
||||
# unsupported path
|
||||
monkeypatch.setattr(command_utils, 'get_os_release_version_id', lambda x: '8.5')
|
||||
assert command_utils.get_target_version('default') == '9.0'
|
||||
|
||||
|
||||
@mock.patch("leapp.cli.commands.command_utils.get_upgrade_paths_config",
|
||||
return_value={"default": {"7.9": ["8.4"], "8.6": ["9.0"], "7": ["8.4"], "8": ["9.0"]}})
|
||||
def test_vet_upgrade_path(mock_open, monkeypatch):
|
||||
monkeypatch.setattr(command_utils, 'get_os_release_version_id', lambda x: '8.6')
|
||||
|
||||
# make sure env var LEAPP_DEVEL_TARGET_RELEASE takes precedence
|
||||
# when env var set to a bad version - abort the upgrade
|
||||
args = mock.Mock(target='9.0')
|
||||
monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '1.2badsemver')
|
||||
with pytest.raises(CommandError) as err:
|
||||
command_utils.vet_upgrade_path(args)
|
||||
assert 'Unexpected format of target version' in err
|
||||
# MAJOR.MINOR.PATCH is considered as bad version, only MAJOR.MINOR is accepted
|
||||
args = mock.Mock(target='9.0')
|
||||
monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.0.0')
|
||||
with pytest.raises(CommandError) as err:
|
||||
command_utils.vet_upgrade_path(args)
|
||||
assert 'Unexpected format of target version' in err
|
||||
# when env var set to a version not in upgrade_paths map - go on and use it
|
||||
monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '1.2')
|
||||
assert command_utils.vet_upgrade_path(args) == ('1.2', 'default')
|
||||
# no env var set, --target is set to proper version
|
||||
monkeypatch.delenv('LEAPP_DEVEL_TARGET_RELEASE', raising=False)
|
||||
assert command_utils.vet_upgrade_path(args) == ('9.0', 'default')
|
||||
# env var is set to proper version, --target is set to a bad one - use env var and go on with the upgrade
|
||||
monkeypatch.setenv('LEAPP_DEVEL_TARGET_RELEASE', '9.0')
|
||||
args = mock.Mock(target='1.2')
|
||||
assert command_utils.vet_upgrade_path(args) == ('9.0', 'default')
|
@ -0,0 +1,122 @@
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from leapp.cli.commands import command_utils
|
||||
from leapp.cli.commands.config import get_config
|
||||
from leapp.cli.commands.upgrade import breadcrumbs, util
|
||||
from leapp.exceptions import CommandError, LeappError
|
||||
from leapp.logger import configure_logger
|
||||
from leapp.utils.audit import Execution
|
||||
from leapp.utils.clicmd import command, command_opt
|
||||
from leapp.utils.output import beautify_actor_exception, report_errors, report_info
|
||||
|
||||
# NOTE:
|
||||
# If you are adding new parameters please ensure that they are set in the upgrade function invocation in `rerun`
|
||||
# otherwise there might be errors.
|
||||
|
||||
|
||||
@command('upgrade', help='Upgrade the current system to the next available major version.')
|
||||
@command_opt('resume', is_flag=True, help='Continue the last execution after it was stopped (e.g. after reboot)')
|
||||
@command_opt('reboot', is_flag=True, help='Automatically performs reboot when requested.')
|
||||
@command_opt('whitelist-experimental', action='append', metavar='ActorName', help='Enable experimental actors')
|
||||
@command_opt('debug', is_flag=True, help='Enable debug mode', inherit=False)
|
||||
@command_opt('verbose', is_flag=True, help='Enable verbose logging', inherit=False)
|
||||
@command_opt('no-rhsm', is_flag=True, help='Use only custom repositories and skip actions'
|
||||
' with Red Hat Subscription Manager')
|
||||
@command_opt('no-insights-register', is_flag=True, help='Do not register into Red Hat Insights')
|
||||
@command_opt('no-rhsm-facts', is_flag=True, help='Do not store migration information using Red Hat '
|
||||
'Subscription Manager. Automatically implied by --no-rhsm.')
|
||||
@command_opt('enablerepo', action='append', metavar='<repoid>',
|
||||
help='Enable specified repository. Can be used multiple times.')
|
||||
@command_opt('channel',
|
||||
help='Set preferred channel for the IPU target.',
|
||||
choices=['ga', 'tuv', 'e4s', 'eus', 'aus'],
|
||||
value_type=str.lower) # This allows the choices to be case insensitive
|
||||
@command_opt('iso', help='Use provided target RHEL installation image to perform the in-place upgrade.')
|
||||
@command_opt('target', choices=command_utils.get_supported_target_versions(),
|
||||
help='Specify RHEL version to upgrade to for {} detected upgrade flavour'.format(
|
||||
command_utils.get_upgrade_flavour()))
|
||||
@command_opt('report-schema', help='Specify report schema version for leapp-report.json',
|
||||
choices=['1.0.0', '1.1.0', '1.2.0'], default=get_config().get('report', 'schema'))
|
||||
@command_opt('nogpgcheck', is_flag=True, help='Disable RPM GPG checks. Same as yum/dnf --nogpgcheck option.')
|
||||
@breadcrumbs.produces_breadcrumbs
|
||||
def upgrade(args, breadcrumbs):
|
||||
skip_phases_until = None
|
||||
context = str(uuid.uuid4())
|
||||
cfg = get_config()
|
||||
util.handle_output_level(args)
|
||||
answerfile_path = cfg.get('report', 'answerfile')
|
||||
userchoices_path = cfg.get('report', 'userchoices')
|
||||
|
||||
# Processing of parameters passed by the rerun call, these aren't actually command line arguments
|
||||
# therefore we have to assume that they aren't even in `args` as they are added only by rerun.
|
||||
only_with_tags = args.only_with_tags if 'only_with_tags' in args else None
|
||||
resume_context = args.resume_context if 'resume_context' in args else None
|
||||
|
||||
# NOTE(ivasilev) argparse choices and defaults in enough for validation
|
||||
report_schema = args.report_schema
|
||||
|
||||
if os.getuid():
|
||||
raise CommandError('This command has to be run under the root user.')
|
||||
|
||||
if args.resume:
|
||||
context, configuration = util.fetch_last_upgrade_context(resume_context)
|
||||
if not context:
|
||||
raise CommandError('No previous upgrade run to continue, remove `--resume` from leapp invocation to'
|
||||
' start a new upgrade flow')
|
||||
os.environ['LEAPP_DEBUG'] = '1' if util.check_env_and_conf('LEAPP_DEBUG', 'debug', configuration) else '0'
|
||||
|
||||
if os.environ['LEAPP_DEBUG'] == '1' or util.check_env_and_conf('LEAPP_VERBOSE', 'verbose', configuration):
|
||||
os.environ['LEAPP_VERBOSE'] = '1'
|
||||
else:
|
||||
os.environ['LEAPP_VERBOSE'] = '0'
|
||||
util.restore_leapp_env_vars(context)
|
||||
skip_phases_until = util.get_last_phase(context)
|
||||
else:
|
||||
util.disable_database_sync()
|
||||
configuration = util.prepare_configuration(args)
|
||||
e = Execution(context=context, kind='upgrade', configuration=configuration)
|
||||
e.store()
|
||||
util.archive_logfiles()
|
||||
|
||||
logger = configure_logger('leapp-upgrade.log')
|
||||
os.environ['LEAPP_EXECUTION_ID'] = context
|
||||
|
||||
if args.resume:
|
||||
logger.info("Resuming execution after phase: %s", skip_phases_until)
|
||||
try:
|
||||
repositories = util.load_repositories()
|
||||
except LeappError as exc:
|
||||
raise CommandError(exc.message)
|
||||
workflow = repositories.lookup_workflow('IPUWorkflow')(auto_reboot=args.reboot)
|
||||
util.process_whitelist_experimental(repositories, workflow, configuration, logger)
|
||||
util.warn_if_unsupported(configuration)
|
||||
with beautify_actor_exception():
|
||||
logger.info("Using answerfile at %s", answerfile_path)
|
||||
workflow.load_answers(answerfile_path, userchoices_path)
|
||||
|
||||
# Set the locale, so that the actors parsing command outputs that might be localized will not fail
|
||||
os.environ['LANGUAGE'] = 'en_US.UTF-8'
|
||||
os.environ['LC_ALL'] = 'en_US.UTF-8'
|
||||
os.environ['LANG'] = 'en_US.UTF-8'
|
||||
workflow.run(context=context, skip_phases_until=skip_phases_until, skip_dialogs=True,
|
||||
only_with_tags=only_with_tags)
|
||||
|
||||
logger.info("Answerfile will be created at %s", answerfile_path)
|
||||
workflow.save_answers(answerfile_path, userchoices_path)
|
||||
report_errors(workflow.errors)
|
||||
util.generate_report_files(context, report_schema)
|
||||
report_files = util.get_cfg_files('report', cfg)
|
||||
log_files = util.get_cfg_files('logs', cfg)
|
||||
report_info(context, report_files, log_files, answerfile_path, fail=workflow.failure, errors=workflow.errors)
|
||||
|
||||
if workflow.failure:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def register(base_command):
|
||||
"""
|
||||
Registers `leapp upgrade`
|
||||
"""
|
||||
base_command.add_sub(upgrade)
|
@ -0,0 +1,171 @@
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from functools import wraps
|
||||
from itertools import chain
|
||||
|
||||
from leapp import FULL_VERSION
|
||||
from leapp.libraries.stdlib.call import _call
|
||||
from leapp.utils.audit import get_messages
|
||||
|
||||
try:
|
||||
from json.decoder import JSONDecodeError # pylint: disable=ungrouped-imports
|
||||
except ImportError:
|
||||
JSONDecodeError = ValueError
|
||||
|
||||
|
||||
def runs_in_container():
|
||||
"""
|
||||
Check if the current process is running inside a container
|
||||
|
||||
:return: True if the process is running inside a container, False otherwise
|
||||
"""
|
||||
return os.path.exists('/run/host/container-manager')
|
||||
|
||||
|
||||
def _flattened(d):
|
||||
""" Flatten nested dicts and lists into a single dict """
|
||||
def expand(key, value):
|
||||
if isinstance(value, dict):
|
||||
return [(key + '.' + k, v) for k, v in _flattened(value).items()]
|
||||
if isinstance(value, list):
|
||||
return chain(*[expand(key + '.' + str(i), v) for i, v in enumerate(value)])
|
||||
return [(key, value)]
|
||||
items = [item for k, v in d.items() for item in expand(k, v)]
|
||||
return dict(items)
|
||||
|
||||
|
||||
class _BreadCrumbs(object):
|
||||
def __init__(self, activity):
|
||||
self._crumbs = {
|
||||
'activity': activity,
|
||||
'packages': self._get_packages(),
|
||||
'leapp_file_changes': [],
|
||||
'executed': ' '.join([v if ' ' not in v else '"{}"'.format(v) for v in sys.argv]),
|
||||
'success': True,
|
||||
'activity_started': datetime.datetime.utcnow().isoformat() + 'Z',
|
||||
'activity_ended': datetime.datetime.utcnow().isoformat() + 'Z',
|
||||
'source_os': '',
|
||||
'target_os': '',
|
||||
'env': dict(),
|
||||
'run_id': '',
|
||||
'version': FULL_VERSION,
|
||||
}
|
||||
|
||||
def fail(self):
|
||||
self._crumbs['success'] = False
|
||||
|
||||
def _save_rhsm_facts(self, activities):
|
||||
if not os.path.isdir('/etc/rhsm/facts'):
|
||||
if not os.path.exists('/etc/rhsm'):
|
||||
# If there's no /etc/rhsm folder just skip it
|
||||
return
|
||||
os.path.mkdir('/etc/rhsm/facts')
|
||||
try:
|
||||
with open('/etc/rhsm/facts/leapp.facts', 'w') as f:
|
||||
json.dump(_flattened({
|
||||
'leapp': [
|
||||
activity for activity in activities
|
||||
if activity.get('activity', '') in ('preupgrade', 'upgrade')]
|
||||
}), f, indent=4)
|
||||
self._commit_rhsm_facts()
|
||||
except OSError:
|
||||
# We don't care about failing to 'create' the file here
|
||||
# even though it shouldn't though, just ignore it
|
||||
pass
|
||||
|
||||
def _commit_rhsm_facts(self):
|
||||
if runs_in_container():
|
||||
return
|
||||
cmd = ['/usr/sbin/subscription-manager', 'facts', '--update']
|
||||
try:
|
||||
_call(cmd, lambda x, y: None, lambda x, y: None)
|
||||
except (OSError, ValueError, TypeError):
|
||||
# We don't care about errors here, just ignore them
|
||||
pass
|
||||
|
||||
def save(self):
|
||||
self._crumbs['run_id'] = os.environ.get('LEAPP_EXECUTION_ID', 'N/A')
|
||||
self._crumbs['leapp_file_changes'].extend(self._verify_leapp_pkgs())
|
||||
messages = get_messages(('IPUConfig',), self._crumbs['run_id'])
|
||||
versions = json.loads((messages or [{}])[0].get('message', {}).get(
|
||||
'data', '{}')).get('version', {'target': 'N/A', 'source': 'N/A'})
|
||||
self._crumbs['target_os'] = 'Red Hat Enterprise Linux {target}'.format(**versions)
|
||||
self._crumbs['source_os'] = 'Red Hat Enterprise Linux {source}'.format(**versions)
|
||||
self._crumbs['activity_ended'] = datetime.datetime.utcnow().isoformat() + 'Z'
|
||||
self._crumbs['env'] = {k: v for k, v in os.environ.items() if k.startswith('LEAPP_')}
|
||||
try:
|
||||
with open('/etc/migration-results', 'a+') as crumbs:
|
||||
crumbs.seek(0)
|
||||
doc = {'activities': []}
|
||||
try:
|
||||
content = json.load(crumbs)
|
||||
if isinstance(content, dict):
|
||||
if isinstance(content.get('activities', None), list):
|
||||
doc = content
|
||||
except JSONDecodeError:
|
||||
# Expected to happen when /etc/migration-results is still empty or does not yet exist
|
||||
pass
|
||||
doc['activities'].append(self._crumbs)
|
||||
crumbs.seek(0)
|
||||
crumbs.truncate()
|
||||
json.dump(doc, crumbs, indent=2, sort_keys=True)
|
||||
crumbs.write('\n')
|
||||
if os.environ.get('LEAPP_NO_RHSM_FACTS', '0') != '1':
|
||||
self._save_rhsm_facts(doc['activities'])
|
||||
except OSError:
|
||||
sys.stderr.write('WARNING: Could not write to /etc/migration-results\n')
|
||||
|
||||
def _get_packages(self):
|
||||
cmd = ['/bin/bash', '-c', 'rpm -qa --queryformat="%{nevra} %{SIGPGP:pgpsig}\n" | grep -Ee "leapp|snactor"']
|
||||
res = _call(cmd, lambda x, y: None, lambda x, y: None)
|
||||
if res.get('exit_code', None) == 0:
|
||||
if res.get('stdout', None):
|
||||
return [{'nevra': t[0], 'signature': t[1]}
|
||||
for t in [line.strip().split(' ', 1) for line in res['stdout'].split('\n') if line.strip()]]
|
||||
return []
|
||||
|
||||
def _verify_leapp_pkgs(self):
|
||||
if not os.environ.get('LEAPP_IPU_IN_PROGRESS'):
|
||||
return []
|
||||
upg_path = os.environ.get('LEAPP_IPU_IN_PROGRESS').split('to')
|
||||
cmd = ['/bin/bash', '-c', 'rpm -V leapp leapp-upgrade-el{}toel{}'.format(upg_path[0], upg_path[1])]
|
||||
res = _call(cmd, lambda x, y: None, lambda x, y: None)
|
||||
if res.get('exit_code', None) == 1:
|
||||
if res.get('stdout', None):
|
||||
return [{'result': t[0], 'file_name': t[1]}
|
||||
for t in [line.strip().split(' ', 1) for line in res['stdout'].split('\n') if line.strip()]]
|
||||
return []
|
||||
|
||||
|
||||
def produces_breadcrumbs(f):
|
||||
"""
|
||||
Ensures that `/etc/migration-results` gets produced on every invocation of `leapp upgrade` & `leapp preupgrade`
|
||||
|
||||
Every execution of the upgrade will have their own entry in the /etc/migration-results file.
|
||||
For a user flow like: leapp preupgrade && leapp upgrade && reboot there should be 5 new entries in the file:
|
||||
|
||||
1. leapp preupgrade
|
||||
2. leapp upgrade (Source OS)
|
||||
3. leapp upgrade (Initram Phase - Until including RPM transaction)
|
||||
4. leapp upgrade (Initram Phase - Post RPM Transaction)
|
||||
5. leapp upgrade (Target OS - First Boot)
|
||||
|
||||
Depending on future design changes of the IPU Worklow, the output may vary.
|
||||
"""
|
||||
@wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
breadcrumbs = _BreadCrumbs(activity=f.__name__)
|
||||
try:
|
||||
return f(*args, breadcrumbs=breadcrumbs, **kwargs)
|
||||
except SystemExit as e:
|
||||
if e.code != 0:
|
||||
breadcrumbs.fail()
|
||||
raise
|
||||
except BaseException:
|
||||
breadcrumbs.fail()
|
||||
raise
|
||||
finally:
|
||||
breadcrumbs.save()
|
||||
return wrapper
|
@ -0,0 +1,247 @@
|
||||
import functools
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import tarfile
|
||||
from datetime import datetime
|
||||
|
||||
from leapp.cli.commands import command_utils
|
||||
from leapp.cli.commands.config import get_config
|
||||
from leapp.exceptions import CommandError
|
||||
from leapp.repository.scan import find_and_scan_repositories
|
||||
from leapp.utils import audit
|
||||
from leapp.utils.audit import get_checkpoints, get_connection, get_messages
|
||||
from leapp.utils.output import report_unsupported
|
||||
from leapp.utils.report import fetch_upgrade_report_messages, generate_report_file
|
||||
|
||||
|
||||
def disable_database_sync():
|
||||
def disable_db_sync_decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
saved = os.environ.get('LEAPP_DEVEL_DATABASE_SYNC_OFF', None)
|
||||
try:
|
||||
os.environ['LEAPP_DEVEL_DATABASE_SYNC_OFF'] = '1'
|
||||
return f(*args, **kwargs)
|
||||
finally:
|
||||
os.environ.pop('LEAPP_DEVEL_DATABASE_SYNC_OFF')
|
||||
if saved:
|
||||
os.environ['LEAPP_DEVEL_DATABASE_SYNC_OFF'] = saved
|
||||
return wrapper
|
||||
|
||||
if not os.environ.get('LEAPP_DATABASE_FORCE_SYNC_ON', None):
|
||||
audit.create_connection = disable_db_sync_decorator(audit.create_connection)
|
||||
|
||||
|
||||
def restore_leapp_env_vars(context):
|
||||
"""
|
||||
Restores leapp environment variables from the `IPUConfig` message.
|
||||
"""
|
||||
messages = get_messages(('IPUConfig',), context)
|
||||
leapp_env_vars = json.loads((messages or [{}])[0].get('message', {}).get('data', '{}')).get('leapp_env_vars', {})
|
||||
for entry in leapp_env_vars:
|
||||
os.environ[entry['name']] = entry['value']
|
||||
|
||||
|
||||
def archive_logfiles():
|
||||
""" Archive log files from a previous run of Leapp """
|
||||
cfg = get_config()
|
||||
|
||||
if not os.path.isdir(cfg.get('files_to_archive', 'dir')):
|
||||
os.makedirs(cfg.get('files_to_archive', 'dir'))
|
||||
|
||||
files_to_archive = [os.path.join(cfg.get('files_to_archive', 'dir'), f)
|
||||
for f in cfg.get('files_to_archive', 'files').split(',')
|
||||
if os.path.isfile(os.path.join(cfg.get('files_to_archive', 'dir'), f))]
|
||||
|
||||
if not os.path.isdir(cfg.get('archive', 'dir')):
|
||||
os.makedirs(cfg.get('archive', 'dir'))
|
||||
|
||||
if files_to_archive:
|
||||
if os.path.isdir(cfg.get('debug', 'dir')):
|
||||
files_to_archive.append(cfg.get('debug', 'dir'))
|
||||
|
||||
now = datetime.now().strftime('%Y%m%d%H%M%S')
|
||||
archive_file = os.path.join(cfg.get('archive', 'dir'), 'leapp-{}-logs.tar.gz'.format(now))
|
||||
|
||||
with tarfile.open(archive_file, "w:gz") as tar:
|
||||
for file_to_add in files_to_archive:
|
||||
tar.add(file_to_add)
|
||||
if os.path.isdir(file_to_add):
|
||||
shutil.rmtree(file_to_add, ignore_errors=True)
|
||||
try:
|
||||
os.remove(file_to_add)
|
||||
except OSError:
|
||||
pass
|
||||
# leapp_db is not in files_to_archive to not have it removed
|
||||
if os.path.isfile(cfg.get('database', 'path')):
|
||||
tar.add(cfg.get('database', 'path'))
|
||||
|
||||
|
||||
def load_repositories_from(name, repo_path, manager=None):
|
||||
if get_config().has_option('repositories', name):
|
||||
repo_path = get_config().get('repositories', name)
|
||||
return find_and_scan_repositories(repo_path, manager=manager)
|
||||
|
||||
|
||||
def load_repositories():
|
||||
manager = load_repositories_from('repo_path', '/etc/leapp/repo.d/', manager=None)
|
||||
manager.load()
|
||||
return manager
|
||||
|
||||
|
||||
def fetch_last_upgrade_context(use_context=None):
|
||||
"""
|
||||
:return: Context of the last execution
|
||||
"""
|
||||
with get_connection(None) as db:
|
||||
if use_context:
|
||||
cursor = db.execute(
|
||||
"SELECT context, stamp, configuration FROM execution WHERE context = ?", (use_context,))
|
||||
else:
|
||||
cursor = db.execute(
|
||||
"SELECT context, stamp, configuration FROM execution WHERE kind = 'upgrade' ORDER BY id DESC LIMIT 1")
|
||||
row = cursor.fetchone()
|
||||
if row:
|
||||
return row[0], json.loads(row[2])
|
||||
return None, {}
|
||||
|
||||
|
||||
def fetch_all_upgrade_contexts():
|
||||
"""
|
||||
:return: All upgrade execution contexts
|
||||
"""
|
||||
with get_connection(None) as db:
|
||||
cursor = db.execute(
|
||||
"SELECT context, stamp, configuration FROM execution WHERE kind = 'upgrade' ORDER BY id DESC")
|
||||
row = cursor.fetchall()
|
||||
if row:
|
||||
return row
|
||||
return None
|
||||
|
||||
|
||||
def get_last_phase(context):
|
||||
checkpoints = get_checkpoints(context=context)
|
||||
if checkpoints:
|
||||
return checkpoints[-1]['phase']
|
||||
return None
|
||||
|
||||
|
||||
def check_env_and_conf(env_var, conf_var, configuration):
|
||||
"""
|
||||
Checks whether the given environment variable or the given configuration value are set to '1'
|
||||
"""
|
||||
return os.getenv(env_var, '0') == '1' or configuration.get(conf_var, '0') == '1'
|
||||
|
||||
|
||||
def generate_report_files(context, report_schema):
|
||||
"""
|
||||
Generates all report files for specific leapp run (txt and json format)
|
||||
"""
|
||||
cfg = get_config()
|
||||
report_txt, report_json = [os.path.join(cfg.get('report', 'dir'),
|
||||
'leapp-report.{}'.format(f)) for f in ['txt', 'json']]
|
||||
# fetch all report messages as a list of dicts
|
||||
messages = fetch_upgrade_report_messages(context)
|
||||
generate_report_file(messages, context, report_txt, report_schema)
|
||||
generate_report_file(messages, context, report_json, report_schema)
|
||||
|
||||
|
||||
def get_cfg_files(section, cfg, must_exist=True):
|
||||
"""
|
||||
Provide files from particular config section
|
||||
"""
|
||||
files = []
|
||||
for file_ in cfg.get(section, 'files').split(','):
|
||||
file_path = os.path.join(cfg.get(section, 'dir'), file_)
|
||||
if not must_exist or must_exist and os.path.isfile(file_path):
|
||||
files.append(file_path)
|
||||
return files
|
||||
|
||||
|
||||
def warn_if_unsupported(configuration):
|
||||
env = os.environ
|
||||
if env.get('LEAPP_UNSUPPORTED', '0') == '1':
|
||||
devel_vars = {k: env[k] for k in env if k.startswith('LEAPP_DEVEL_')}
|
||||
report_unsupported(devel_vars, configuration["whitelist_experimental"])
|
||||
|
||||
|
||||
def handle_output_level(args):
|
||||
"""
|
||||
Set environment variables following command line arguments.
|
||||
"""
|
||||
os.environ['LEAPP_DEBUG'] = '1' if args.debug else os.getenv('LEAPP_DEBUG', '0')
|
||||
if os.environ['LEAPP_DEBUG'] == '1' or args.verbose:
|
||||
os.environ['LEAPP_VERBOSE'] = '1'
|
||||
else:
|
||||
os.environ['LEAPP_VERBOSE'] = os.getenv('LEAPP_VERBOSE', '0')
|
||||
|
||||
|
||||
# NOTE(ivasilev) Please make sure you are not calling prepare_configuration after first reboot.
|
||||
# If called as leapp upgrade --resume this will happily crash in target version container for
|
||||
# the latest supported release because of target_version discovery attempt.
|
||||
def prepare_configuration(args):
|
||||
"""Returns a configuration dict object while setting a few env vars as a side-effect"""
|
||||
if args.whitelist_experimental:
|
||||
args.whitelist_experimental = list(itertools.chain(*[i.split(',') for i in args.whitelist_experimental]))
|
||||
os.environ['LEAPP_EXPERIMENTAL'] = '1'
|
||||
else:
|
||||
os.environ['LEAPP_EXPERIMENTAL'] = '0'
|
||||
os.environ['LEAPP_UNSUPPORTED'] = '0' if os.getenv('LEAPP_UNSUPPORTED', '0') == '0' else '1'
|
||||
if args.no_rhsm:
|
||||
os.environ['LEAPP_NO_RHSM'] = '1'
|
||||
elif os.getenv('LEAPP_NO_RHSM') != '1':
|
||||
os.environ['LEAPP_NO_RHSM'] = os.getenv('LEAPP_DEVEL_SKIP_RHSM', '0')
|
||||
|
||||
if args.no_insights_register:
|
||||
os.environ['LEAPP_NO_INSIGHTS_REGISTER'] = '1'
|
||||
|
||||
if args.enablerepo:
|
||||
os.environ['LEAPP_ENABLE_REPOS'] = ','.join(args.enablerepo)
|
||||
|
||||
if os.environ.get('LEAPP_NO_RHSM', '0') == '1' or args.no_rhsm_facts:
|
||||
os.environ['LEAPP_NO_RHSM_FACTS'] = '1'
|
||||
|
||||
if args.channel:
|
||||
os.environ['LEAPP_TARGET_PRODUCT_CHANNEL'] = args.channel
|
||||
|
||||
if args.iso:
|
||||
os.environ['LEAPP_TARGET_ISO'] = args.iso
|
||||
target_iso_path = os.environ.get('LEAPP_TARGET_ISO')
|
||||
if target_iso_path:
|
||||
# Make sure we convert rel paths into abs ones while we know what CWD is
|
||||
os.environ['LEAPP_TARGET_ISO'] = os.path.abspath(target_iso_path)
|
||||
|
||||
if args.nogpgcheck:
|
||||
os.environ['LEAPP_NOGPGCHECK'] = '1'
|
||||
|
||||
# Check upgrade path and fail early if it's unsupported
|
||||
target_version, flavor = command_utils.vet_upgrade_path(args)
|
||||
os.environ['LEAPP_UPGRADE_PATH_TARGET_RELEASE'] = target_version
|
||||
os.environ['LEAPP_UPGRADE_PATH_FLAVOUR'] = flavor
|
||||
|
||||
current_version = command_utils.get_os_release_version_id('/etc/os-release')
|
||||
os.environ['LEAPP_IPU_IN_PROGRESS'] = '{source}to{target}'.format(
|
||||
source=command_utils.get_major_version(current_version),
|
||||
target=command_utils.get_major_version(target_version)
|
||||
)
|
||||
|
||||
configuration = {
|
||||
'debug': os.getenv('LEAPP_DEBUG', '0'),
|
||||
'verbose': os.getenv('LEAPP_VERBOSE', '0'),
|
||||
'whitelist_experimental': args.whitelist_experimental or (),
|
||||
}
|
||||
return configuration
|
||||
|
||||
|
||||
def process_whitelist_experimental(repositories, workflow, configuration, logger=None):
|
||||
for actor_name in configuration.get('whitelist_experimental', ()):
|
||||
actor = repositories.lookup_actor(actor_name)
|
||||
if actor:
|
||||
workflow.whitelist_experimental_actor(actor)
|
||||
else:
|
||||
msg = 'No such Actor: {}'.format(actor_name)
|
||||
if logger:
|
||||
logger.error(msg)
|
||||
raise CommandError(msg)
|
@ -0,0 +1,97 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from leapp.repository.manager import RepositoryManager
|
||||
from leapp.repository.scan import find_and_scan_repositories
|
||||
from leapp.utils.repository import find_repository_basedir, get_repository_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.getLogger("asyncio").setLevel(logging.INFO)
|
||||
logging.getLogger("parso").setLevel(logging.INFO)
|
||||
|
||||
|
||||
def _load_and_add_repo(manager, repo_path):
|
||||
repo = find_and_scan_repositories(
|
||||
repo_path,
|
||||
include_locals=True
|
||||
)
|
||||
unloaded = set()
|
||||
loaded = {r.repo_id for r in manager.repos}
|
||||
if hasattr(repo, 'repos'):
|
||||
for repo in repo.repos:
|
||||
if not manager.repo_by_id(repo.repo_id):
|
||||
manager.add_repo(repo)
|
||||
unloaded.add(repo.repo_id)
|
||||
else:
|
||||
manager.add_repo(repo)
|
||||
if not loaded:
|
||||
manager.load(skip_actors_discovery=True)
|
||||
else:
|
||||
for repo_id in unloaded:
|
||||
manager.repo_by_id(repo_id).load(skip_actors_discovery=True)
|
||||
|
||||
|
||||
def pytest_collectstart(collector):
|
||||
if collector.nodeid:
|
||||
current_repo_basedir = find_repository_basedir(str(collector.fspath))
|
||||
if not current_repo_basedir:
|
||||
# This is not a repository
|
||||
return
|
||||
if not hasattr(collector.session, "leapp_repository"):
|
||||
collector.session.leapp_repository = RepositoryManager()
|
||||
collector.session.repo_base_dir = current_repo_basedir
|
||||
_load_and_add_repo(collector.session.leapp_repository, current_repo_basedir)
|
||||
else:
|
||||
if not collector.session.leapp_repository.repo_by_id(
|
||||
get_repository_id(current_repo_basedir)
|
||||
):
|
||||
_load_and_add_repo(collector.session.leapp_repository, current_repo_basedir)
|
||||
|
||||
# we're forcing the actor context switch only when traversing new
|
||||
# actor
|
||||
if "/actors/" in str(collector.fspath) and (
|
||||
not hasattr(collector.session, "current_actor_path")
|
||||
or collector.session.current_actor_path + os.sep
|
||||
not in str(collector.fspath)
|
||||
):
|
||||
actor = None
|
||||
for a in collector.session.leapp_repository.actors:
|
||||
if a.full_path == collector.fspath.dirpath().dirname:
|
||||
actor = a
|
||||
break
|
||||
|
||||
if not actor:
|
||||
logger.info("No actor found, exiting collection...")
|
||||
return
|
||||
# we need to tear down the context from the previous
|
||||
# actor
|
||||
try:
|
||||
collector.session.current_actor_context.__exit__(
|
||||
None, None, None
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
logger.info(
|
||||
"Actor %r context teardown complete",
|
||||
collector.session.current_actor.name,
|
||||
)
|
||||
|
||||
logger.info("Injecting actor context for %r", actor.name)
|
||||
collector.session.current_actor = actor
|
||||
collector.session.current_actor_context = actor.injected_context()
|
||||
collector.session.current_actor_context.__enter__()
|
||||
collector.session.current_actor_path = (
|
||||
collector.session.current_actor.full_path
|
||||
)
|
||||
logger.info("Actor %r context injected", actor.name)
|
||||
|
||||
|
||||
def pytest_runtestloop(session):
|
||||
try:
|
||||
session.current_actor_context.__exit__(None, None, None)
|
||||
logger.info(
|
||||
"Actor %r context teardown complete", session.current_actor.name,
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
@ -0,0 +1,3 @@
|
||||
# Leapp repository documentation
|
||||
|
||||
The Leapp repository documentation has been moved to [Read the Docs](https://leapp.readthedocs.io/).
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,2 @@
|
||||
### List of packages (each on new line) to be added to the upgrade transaction
|
||||
### Signed packages which are already installed will be skipped
|
@ -0,0 +1,7 @@
|
||||
### List of packages (each on new line) to be kept in the upgrade transaction
|
||||
|
||||
leapp
|
||||
python2-leapp
|
||||
python3-leapp
|
||||
leapp-repository
|
||||
snactor
|
@ -0,0 +1,3 @@
|
||||
### List of packages (each on new line) to be removed from the upgrade transaction
|
||||
# Removing initial-setup package to avoid it asking for EULA acceptance during upgrade - OAMG-1531
|
||||
initial-setup
|
@ -0,0 +1,279 @@
|
||||
%global leapp_datadir %{_datadir}/leapp-repository
|
||||
%global repositorydir %{leapp_datadir}/repositories
|
||||
%global custom_repositorydir %{leapp_datadir}/custom-repositories
|
||||
|
||||
%define leapp_repo_deps 9
|
||||
|
||||
%if 0%{?rhel} == 7
|
||||
%define leapp_python_sitelib %{python2_sitelib}
|
||||
%define lpr_name leapp-upgrade-el7toel8
|
||||
%else
|
||||
%define leapp_python_sitelib %{python3_sitelib}
|
||||
%define lpr_name leapp-upgrade-el8toel9
|
||||
|
||||
# This drops autogenerated deps on
|
||||
# - /usr/libexec/platform-python (rhel-8 buildroot)
|
||||
# - /usr/bin/python3.x (epel-8 buildroot)
|
||||
# - python(abi) = 3.x
|
||||
# Each of these lead into the removal of leapp rpms as python is changed between
|
||||
# major versions of RHEL
|
||||
%global __requires_exclude ^python\\(abi\\) = 3\\..+|/usr/libexec/platform-python|/usr/bin/python.*
|
||||
%endif
|
||||
|
||||
|
||||
# TODO: not sure whether it's required nowadays. Let's check it and drop
|
||||
# the whole block if not.
|
||||
%if 0%{?rhel} == 7
|
||||
# Defining py_byte_compile macro because it is not defined in old rpm (el7)
|
||||
# Only defined to python2 since python3 is not used in RHEL7
|
||||
%{!?py_byte_compile: %global py_byte_compile py2_byte_compile() {\
|
||||
python_binary="%1"\
|
||||
bytecode_compilation_path="%2"\
|
||||
find $bytecode_compilation_path -type f -a -name "*.py" -print0 | xargs -0 $python_binary -c 'import py_compile, sys; [py_compile.compile(f, dfile=f.partition("$RPM_BUILD_ROOT")[2]) for f in sys.argv[1:]]' || :\
|
||||
find $bytecode_compilation_path -type f -a -name "*.py" -print0 | xargs -0 $python_binary -O -c 'import py_compile, sys; [py_compile.compile(f, dfile=f.partition("$RPM_BUILD_ROOT")[2]) for f in sys.argv[1:]]' || :\
|
||||
}\
|
||||
py2_byte_compile "%1" "%2"}
|
||||
%endif
|
||||
|
||||
|
||||
# We keeps the leapp-repository name for the component, however we do not plan
|
||||
# to create such an rpm. Instead, we are going to introduce new naming for
|
||||
# RHEL 8+ packages to be consistent with other leapp projects in future.
|
||||
|
||||
Name: leapp-repository
|
||||
Version: 0.19.0
|
||||
Release: 1%{?dist}
|
||||
Summary: Repositories for leapp
|
||||
|
||||
License: ASL 2.0
|
||||
URL: https://oamg.github.io/leapp/
|
||||
Source0: https://github.com/oamg/%{name}/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
|
||||
Source1: deps-pkgs.tar.gz
|
||||
|
||||
# NOTE: Our packages must be noarch. Do no drop this in any way.
|
||||
BuildArch: noarch
|
||||
|
||||
### PATCHES HERE
|
||||
# Patch0001: filename.patch
|
||||
|
||||
|
||||
%description
|
||||
%{summary}
|
||||
|
||||
|
||||
# This is the real RPM with the leapp repositories content
|
||||
%package -n %{lpr_name}
|
||||
Summary: Leapp repositories for the in-place upgrade
|
||||
|
||||
%if 0%{?rhel} == 7
|
||||
######### RHEL 7 ############
|
||||
BuildRequires: python-devel
|
||||
Requires: python2-leapp
|
||||
|
||||
# We should not drop this on RHEL 7 because of the compatibility reasons
|
||||
Obsoletes: leapp-repository-data <= 0.6.1
|
||||
Provides: leapp-repository-data <= 0.6.1
|
||||
|
||||
# Former leapp subpackage that is part of the sos package since RHEL 7.8
|
||||
Obsoletes: leapp-repository-sos-plugin <= 0.9.0
|
||||
|
||||
# Set the conflict to be sure this RPM is not upgraded automatically to
|
||||
# the one from the target (upgraded) RHEL. The RPM has to stay untouched
|
||||
# during the whole IPU process.
|
||||
# The manual removal of the RPM is required after the IPU
|
||||
Conflicts: leapp-upgrade-el8toel9
|
||||
|
||||
%else
|
||||
######### RHEL 8 ############
|
||||
BuildRequires: python3-devel
|
||||
Requires: python3-leapp
|
||||
|
||||
# Same as the conflict above - we want to be sure our packages are untouched
|
||||
# during the whole IPU process
|
||||
Conflicts: leapp-upgrade-el7toel8
|
||||
|
||||
%endif
|
||||
|
||||
# IMPORTANT: every time the requirements are changed, increment number by one
|
||||
# - same for Provides in deps subpackage
|
||||
Requires: leapp-repository-dependencies = %{leapp_repo_deps}
|
||||
|
||||
# IMPORTANT: this is capability provided by the leapp framework rpm.
|
||||
# Check that 'version' instead of the real framework rpm version.
|
||||
Requires: leapp-framework >= 5.0, leapp-framework < 6
|
||||
|
||||
# Since we provide sub-commands for the leapp utility, we expect the leapp
|
||||
# tool to be installed as well.
|
||||
Requires: leapp
|
||||
|
||||
# Used to determine RHEL version of a given target RHEL installation image -
|
||||
# uncompressing redhat-release package from the ISO.
|
||||
Requires: cpio
|
||||
|
||||
# The leapp-repository rpm is renamed to %%{lpr_name}
|
||||
Obsoletes: leapp-repository < 0.14.0-%{release}
|
||||
Provides: leapp-repository = %{version}-%{release}
|
||||
|
||||
# Provide "leapp-upgrade" for the user convenience. Users will be pointed
|
||||
# to install "leapp-upgrade" in the official docs.
|
||||
Provides: leapp-upgrade = %{version}-%{release}
|
||||
|
||||
# Provide leapp-commands so the framework could refer to them when customers
|
||||
# do not have installed particular leapp-repositories
|
||||
Provides: leapp-command(answer)
|
||||
Provides: leapp-command(preupgrade)
|
||||
Provides: leapp-command(upgrade)
|
||||
Provides: leapp-command(rerun)
|
||||
Provides: leapp-command(list-runs)
|
||||
|
||||
|
||||
%description -n %{lpr_name}
|
||||
Leapp repositories for the in-place upgrade to the next major version
|
||||
of the Red Hat Enterprise Linux system.
|
||||
|
||||
|
||||
# This metapackage should contain all RPM dependencies excluding deps on *leapp*
|
||||
# RPMs. This metapackage will be automatically replaced during the upgrade
|
||||
# to satisfy dependencies with RPMs from target system.
|
||||
%package -n %{lpr_name}-deps
|
||||
Summary: Meta-package with system dependencies of %{lpr_name} package
|
||||
|
||||
# The package has been renamed, so let's obsoletes the old one
|
||||
Obsoletes: leapp-repository-deps < 0.14.0-%{release}
|
||||
|
||||
# IMPORTANT: every time the requirements are changed, increment number by one
|
||||
# - same for Requires in main package
|
||||
Provides: leapp-repository-dependencies = %{leapp_repo_deps}
|
||||
##################################################
|
||||
# Real requirements for the leapp-repository HERE
|
||||
##################################################
|
||||
Requires: dnf >= 4
|
||||
Requires: pciutils
|
||||
%if 0%{?rhel} && 0%{?rhel} == 7
|
||||
# Required to gather system facts about SELinux
|
||||
Requires: libselinux-python
|
||||
Requires: python-pyudev
|
||||
# required by SELinux actors
|
||||
Requires: policycoreutils-python
|
||||
# Required to fetch leapp data
|
||||
Requires: python-requests
|
||||
|
||||
%else
|
||||
############# RHEL 8 dependencies (when the source system is RHEL 8) ##########
|
||||
# systemd-nspawn utility
|
||||
Requires: systemd-container
|
||||
Requires: python3-pyudev
|
||||
# Required to fetch leapp data
|
||||
Requires: python3-requests
|
||||
# Required because the code is kept Py2 & Py3 compatible
|
||||
Requires: python3-six
|
||||
# required by SELinux actors
|
||||
Requires: policycoreutils-python-utils
|
||||
# required by systemfacts, and several other actors
|
||||
Requires: procps-ng
|
||||
Requires: kmod
|
||||
# since RHEL 8+ dracut does not have to be present on the system all the time
|
||||
# and missing dracut could be killing situation for us :)
|
||||
Requires: dracut
|
||||
|
||||
# Required to scan NetworkManagerConnection (e.g. to recognize secrets)
|
||||
# NM is requested to be used on RHEL 8+ systems
|
||||
Requires: NetworkManager-libnm
|
||||
Requires: python3-gobject-base
|
||||
|
||||
%endif
|
||||
##################################################
|
||||
# end requirement
|
||||
##################################################
|
||||
|
||||
|
||||
%description -n %{lpr_name}-deps
|
||||
%{summary}
|
||||
|
||||
|
||||
%prep
|
||||
%setup -n %{name}-%{version}
|
||||
%setup -q -n %{name}-%{version} -D -T -a 1
|
||||
|
||||
# APPLY PATCHES HERE
|
||||
# %%patch0001 -p1
|
||||
|
||||
|
||||
%build
|
||||
%if 0%{?rhel} == 7
|
||||
cp -a leapp*deps*el8.noarch.rpm repos/system_upgrade/el7toel8/files/bundled-rpms/
|
||||
%else
|
||||
cp -a leapp*deps*el9.noarch.rpm repos/system_upgrade/el8toel9/files/bundled-rpms/
|
||||
%endif
|
||||
|
||||
|
||||
%install
|
||||
install -m 0755 -d %{buildroot}%{custom_repositorydir}
|
||||
install -m 0755 -d %{buildroot}%{repositorydir}
|
||||
cp -r repos/* %{buildroot}%{repositorydir}/
|
||||
install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/repos.d/
|
||||
install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/transaction/
|
||||
install -m 0755 -d %{buildroot}%{_sysconfdir}/leapp/files/
|
||||
install -m 0644 etc/leapp/transaction/* %{buildroot}%{_sysconfdir}/leapp/transaction
|
||||
install -m 0644 etc/leapp/files/* %{buildroot}%{_sysconfdir}/leapp/files
|
||||
|
||||
# install CLI commands for the leapp utility on the expected path
|
||||
install -m 0755 -d %{buildroot}%{leapp_python_sitelib}/leapp/cli/
|
||||
cp -r commands %{buildroot}%{leapp_python_sitelib}/leapp/cli/
|
||||
rm -rf %{buildroot}%{leapp_python_sitelib}/leapp/cli/commands/tests
|
||||
|
||||
# Remove irrelevant repositories - We don't want to ship them for the particular
|
||||
# RHEL version
|
||||
%if 0%{?rhel} == 7
|
||||
rm -rf %{buildroot}%{repositorydir}/system_upgrade/el8toel9
|
||||
%else
|
||||
rm -rf %{buildroot}%{repositorydir}/system_upgrade/el7toel8
|
||||
%endif
|
||||
|
||||
# remove component/unit tests, Makefiles, ... stuff that related to testing only
|
||||
rm -rf %{buildroot}%{repositorydir}/common/actors/testactor
|
||||
find %{buildroot}%{repositorydir}/common -name "test.py" -delete
|
||||
rm -rf `find %{buildroot}%{repositorydir} -name "tests" -type d`
|
||||
find %{buildroot}%{repositorydir} -name "Makefile" -delete
|
||||
|
||||
for DIRECTORY in $(find %{buildroot}%{repositorydir}/ -mindepth 1 -maxdepth 1 -type d);
|
||||
do
|
||||
REPOSITORY=$(basename $DIRECTORY)
|
||||
echo "Enabling repository $REPOSITORY"
|
||||
ln -s %{repositorydir}/$REPOSITORY %{buildroot}%{_sysconfdir}/leapp/repos.d/$REPOSITORY
|
||||
done;
|
||||
|
||||
# __python2 could be problematic on systems with Python3 only, but we have
|
||||
# no choice as __python became error on F33+:
|
||||
# https://fedoraproject.org/wiki/Changes/PythonMacroError
|
||||
%if 0%{?rhel} == 7
|
||||
%py_byte_compile %{__python2} %{buildroot}%{repositorydir}/*
|
||||
%else
|
||||
%py_byte_compile %{__python3} %{buildroot}%{repositorydir}/*
|
||||
%endif
|
||||
|
||||
|
||||
%files -n %{lpr_name}
|
||||
%doc README.md
|
||||
%license LICENSE
|
||||
%dir %{_sysconfdir}/leapp/transaction
|
||||
%dir %{_sysconfdir}/leapp/files
|
||||
%dir %{leapp_datadir}
|
||||
%dir %{repositorydir}
|
||||
%dir %{custom_repositorydir}
|
||||
%dir %{leapp_python_sitelib}/leapp/cli/commands
|
||||
%config %{_sysconfdir}/leapp/files/*
|
||||
%{_sysconfdir}/leapp/repos.d/*
|
||||
%{_sysconfdir}/leapp/transaction/*
|
||||
%{repositorydir}/*
|
||||
%{leapp_python_sitelib}/leapp/cli/commands/*
|
||||
|
||||
|
||||
%files -n %{lpr_name}-deps
|
||||
# no files here
|
||||
|
||||
|
||||
# DO NOT TOUCH SECTION BELOW IN UPSTREAM
|
||||
%changelog
|
||||
* Mon Apr 16 2018 Vinzenz Feenstra <evilissimo@redhat.com> - %{version}-%{release}
|
||||
- Initial RPM
|
@ -0,0 +1,120 @@
|
||||
# The %%{rhel} macro just has to be specified
|
||||
%global lrdname leapp-repository-deps-el%{rhel}
|
||||
%global ldname leapp-deps-el%{rhel}
|
||||
|
||||
%if 0%{?rhel} == 8
|
||||
%define lpr_name_src leapp-upgrade-el7toel8-deps
|
||||
%else
|
||||
%define lpr_name_src leapp-upgrade-el8toel9-deps
|
||||
%endif
|
||||
|
||||
|
||||
%define leapp_repo_deps 9
|
||||
%define leapp_framework_deps 5
|
||||
|
||||
# NOTE: the Version contains the %{rhel} macro just for the convenience to
|
||||
# have always upgrade path between newer and older deps packages. So for
|
||||
# packages built for RHEL 8 it's 5.0.8, for RHEL 9 it's 5.0.9, etc..
|
||||
# Not sure how much it will be beneficial in the end, but why not?
|
||||
|
||||
# TODO: keeping the name of the specfile & srpm leapp-el7toel8-deps even when
|
||||
# it could be confusing as we start to build for el8toel9.
|
||||
Name: leapp-el7toel8-deps
|
||||
Version: 5.0.%{rhel}
|
||||
Release: 1%{?dist}
|
||||
Summary: Dependencies for *leapp* packages
|
||||
|
||||
# NOTE: Our packages must be noarch. Do no drop this in any way.
|
||||
BuildArch: noarch
|
||||
|
||||
License: ASL 2.0
|
||||
URL: https://oamg.github.io/leapp/
|
||||
|
||||
%description
|
||||
%{summary}
|
||||
|
||||
##################################################
|
||||
# DEPS FOR LEAPP REPOSITORY ON RHEL 8+ (IPU target system)
|
||||
##################################################
|
||||
%package -n %{lrdname}
|
||||
Summary: Meta-package with system dependencies for leapp repository
|
||||
Provides: leapp-repository-dependencies = %{leapp_repo_deps}
|
||||
|
||||
# NOTE: we can drop this one Obsoletes later, but keeping it for now...
|
||||
Obsoletes: leapp-repository-deps
|
||||
Obsoletes: %{lpr_name_src}
|
||||
|
||||
Requires: dnf >= 4
|
||||
Requires: pciutils
|
||||
Requires: python3
|
||||
Requires: python3-pyudev
|
||||
# required by SELinux actors
|
||||
Requires: policycoreutils-python-utils
|
||||
|
||||
# we need the dnf configuration manager to check and modify configuration
|
||||
# The package is first installed inside the target userspace container
|
||||
# Than we ensure the rpm will be present after the upgrade transaction.
|
||||
Requires: dnf-command(config-manager)
|
||||
|
||||
# It should not happen that dracut is not present on the target system,
|
||||
# but as dracut is removable on RHEL 8+, let's rather require it to be really
|
||||
# sure
|
||||
Requires: dracut
|
||||
|
||||
# Used to determine RHEL version of a given target RHEL installation image -
|
||||
# uncompressing redhat-release package from the ISO.
|
||||
Requires: cpio
|
||||
|
||||
# just to be sure that /etc/modprobe.d is present
|
||||
Requires: kmod
|
||||
|
||||
|
||||
%description -n %{lrdname}
|
||||
%{summary}
|
||||
|
||||
##################################################
|
||||
# DEPS FOR LEAPP FRAMEWORK ON RHEL 8+ (IPU target system)
|
||||
##################################################
|
||||
%package -n %{ldname}
|
||||
Summary: Meta-package with system dependencies for leapp framework
|
||||
Provides: leapp-framework-dependencies = %{leapp_framework_deps}
|
||||
Obsoletes: leapp-deps
|
||||
|
||||
Requires: findutils
|
||||
|
||||
%if 0%{?rhel} == 8
|
||||
# Keep currently these dependencies as maybe we would need them to finish the
|
||||
# RPMUpgradePhase phase correctly (e.g. postun scriptlets would need py2)
|
||||
Requires: python2-six
|
||||
Requires: python2-setuptools
|
||||
Requires: python2-requests
|
||||
%endif
|
||||
|
||||
# Python3 deps
|
||||
Requires: python3
|
||||
Requires: python3-six
|
||||
Requires: python3-setuptools
|
||||
Requires: python3-requests
|
||||
|
||||
|
||||
%description -n %{ldname}
|
||||
%{summary}
|
||||
|
||||
%prep
|
||||
|
||||
%build
|
||||
|
||||
%install
|
||||
|
||||
# do not create main packages
|
||||
#%files
|
||||
|
||||
%files -n %{lrdname}
|
||||
# no files here
|
||||
|
||||
%files -n %{ldname}
|
||||
# no files here
|
||||
|
||||
%changelog
|
||||
* Tue Jan 22 2019 Petr Stodulka <pstodulk@redhat.com> - %{version}-%{release}
|
||||
- Initial rpm
|
@ -0,0 +1,10 @@
|
||||
[pytest]
|
||||
addopts = -svv
|
||||
testpaths = repos/
|
||||
log_cli = True
|
||||
log_cli_level = DEBUG
|
||||
log_cli_format = | %(asctime)s | %(name)s | %(levelname)s | %(filename)s | %(message)s
|
||||
python_files=
|
||||
test_*.py
|
||||
unit_test_*.py
|
||||
component_test_*.py
|
@ -0,0 +1 @@
|
||||
{"messages": {}, "name": "common", "id": "efcf9016-f2d1-4609-9329-a298e6587b3c"}
|
@ -0,0 +1,6 @@
|
||||
|
||||
[repositories]
|
||||
repo_path=${project:root_dir}
|
||||
|
||||
[database]
|
||||
path=${project:state_dir}/leapp.db
|
@ -0,0 +1,5 @@
|
||||
from leapp.topics import Topic
|
||||
|
||||
|
||||
class SystemInfoTopic(Topic):
|
||||
name = 'system_info'
|
@ -0,0 +1 @@
|
||||
{"name": "system_upgrade_common", "id": "644900a5-c347-43a3-bfab-f448f46d9647", "repos": ["efcf9016-f2d1-4609-9329-a298e6587b3c"]}
|
@ -0,0 +1,6 @@
|
||||
|
||||
[repositories]
|
||||
repo_path=${repository:root_dir}
|
||||
|
||||
[database]
|
||||
path=${repository:state_dir}/leapp.db
|
@ -0,0 +1,39 @@
|
||||
import os
|
||||
|
||||
from leapp.actors import Actor
|
||||
from leapp.exceptions import StopActorExecutionError
|
||||
from leapp.libraries.actor.addupgradebootentry import add_boot_entry, fix_grub_config_error
|
||||
from leapp.models import BootContent, FirmwareFacts, GrubConfigError, TargetKernelCmdlineArgTasks, TransactionDryRun
|
||||
from leapp.tags import InterimPreparationPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class AddUpgradeBootEntry(Actor):
|
||||
"""
|
||||
Add new boot entry for Leapp provided initramfs.
|
||||
|
||||
Using new boot entry, Leapp can continue the upgrade process in the initramfs after reboot
|
||||
"""
|
||||
|
||||
name = 'add_upgrade_boot_entry'
|
||||
consumes = (BootContent, GrubConfigError, FirmwareFacts, TransactionDryRun)
|
||||
produces = (TargetKernelCmdlineArgTasks,)
|
||||
tags = (IPUWorkflowTag, InterimPreparationPhaseTag)
|
||||
|
||||
def process(self):
|
||||
for grub_config_error in self.consume(GrubConfigError):
|
||||
if grub_config_error.error_detected:
|
||||
fix_grub_config_error('/etc/default/grub', grub_config_error.error_type)
|
||||
|
||||
configs = None
|
||||
ff = next(self.consume(FirmwareFacts), None)
|
||||
if not ff:
|
||||
raise StopActorExecutionError(
|
||||
'Could not identify system firmware',
|
||||
details={'details': 'Actor did not receive FirmwareFacts message.'}
|
||||
)
|
||||
|
||||
# related to issue with hybrid BIOS and UEFI images
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1667028
|
||||
if ff.firmware == 'bios' and os.path.ismount('/boot/efi') and os.path.isfile('/boot/efi/EFI/redhat/grub.cfg'):
|
||||
configs = ['/boot/grub2/grub.cfg', '/boot/efi/EFI/redhat/grub.cfg']
|
||||
add_boot_entry(configs)
|
@ -0,0 +1,116 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
from leapp.exceptions import StopActorExecutionError
|
||||
from leapp.libraries.common.config import architecture
|
||||
from leapp.libraries.stdlib import api, CalledProcessError, run
|
||||
from leapp.models import BootContent, KernelCmdlineArg, TargetKernelCmdlineArgTasks
|
||||
|
||||
|
||||
def add_boot_entry(configs=None):
|
||||
debug = 'debug' if os.getenv('LEAPP_DEBUG', '0') == '1' else ''
|
||||
enable_network = os.getenv('LEAPP_DEVEL_INITRAM_NETWORK') in ('network-manager', 'scripts')
|
||||
ip_arg = ' ip=dhcp rd.neednet=1' if enable_network else ''
|
||||
kernel_dst_path, initram_dst_path = get_boot_file_paths()
|
||||
_remove_old_upgrade_boot_entry(kernel_dst_path, configs=configs)
|
||||
try:
|
||||
cmd = [
|
||||
'/usr/sbin/grubby',
|
||||
'--add-kernel', '{0}'.format(kernel_dst_path),
|
||||
'--initrd', '{0}'.format(initram_dst_path),
|
||||
'--title', 'RHEL-Upgrade-Initramfs',
|
||||
'--copy-default',
|
||||
'--make-default',
|
||||
'--args', '{DEBUG}{NET} enforcing=0 rd.plymouth=0 plymouth.enable=0'.format(DEBUG=debug, NET=ip_arg)
|
||||
]
|
||||
if configs:
|
||||
for config in configs:
|
||||
run(cmd + ['-c', config])
|
||||
else:
|
||||
run(cmd)
|
||||
|
||||
if architecture.matches_architecture(architecture.ARCH_S390X):
|
||||
# on s390x we need to call zipl explicitly because of issue in grubby,
|
||||
# otherwise the new boot entry will not be set as default
|
||||
# See https://bugzilla.redhat.com/show_bug.cgi?id=1764306
|
||||
run(['/usr/sbin/zipl'])
|
||||
|
||||
if debug:
|
||||
# The kernelopts for target kernel are generated based on the cmdline used in the upgrade initramfs,
|
||||
# therefore, if we enabled debug above, and the original system did not have the debug kernelopt, we
|
||||
# need to explicitly remove it from the target os boot entry.
|
||||
# NOTE(mhecko): This will also unconditionally remove debug kernelopt if the source system used it.
|
||||
api.produce(TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]))
|
||||
|
||||
# NOTE(mmatuska): This will remove the option even if the source system had it set.
|
||||
# However enforcing=0 shouldn't be set persistently anyway.
|
||||
api.produce(TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')]))
|
||||
|
||||
except CalledProcessError as e:
|
||||
raise StopActorExecutionError(
|
||||
'Cannot configure bootloader.',
|
||||
details={'details': '{}: {}'.format(str(e), e.stderr)}
|
||||
)
|
||||
|
||||
|
||||
def _remove_old_upgrade_boot_entry(kernel_dst_path, configs=None):
|
||||
"""
|
||||
Remove entry referring to the upgrade kernel.
|
||||
|
||||
We have to ensure there are no duplicit boot entries. Main reason is crash
|
||||
of zipl when duplicit entries exist.
|
||||
"""
|
||||
cmd = [
|
||||
'/usr/sbin/grubby',
|
||||
'--remove-kernel', '{0}'.format(kernel_dst_path)
|
||||
]
|
||||
try:
|
||||
if configs:
|
||||
for config in configs:
|
||||
run(cmd + ['-c', config])
|
||||
else:
|
||||
run(cmd)
|
||||
except CalledProcessError:
|
||||
# TODO(pstodulk): instead of this, check whether the entry exists or not
|
||||
# so no warning of problem is reported (info log could be present if the
|
||||
# entry is missing.
|
||||
api.current_logger().warning(
|
||||
'Could not remove {} entry. May be ignored if the entry did not exist.'.format(kernel_dst_path)
|
||||
)
|
||||
|
||||
|
||||
def get_boot_file_paths():
|
||||
boot_content_msgs = api.consume(BootContent)
|
||||
boot_content = next(boot_content_msgs, None)
|
||||
if list(boot_content_msgs):
|
||||
api.current_logger().warning('Unexpectedly received more than one BootContent message.')
|
||||
if not boot_content:
|
||||
raise StopActorExecutionError('Could not create a GRUB boot entry for the upgrade initramfs',
|
||||
details={'details': 'Did not receive a message about the leapp-provided'
|
||||
'kernel and initramfs'})
|
||||
# Returning information about kernel hmac file path is needless as it is not used when adding boot entry
|
||||
return boot_content.kernel_path, boot_content.initram_path
|
||||
|
||||
|
||||
def write_to_file(filename, content):
|
||||
with open(filename, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
|
||||
def fix_grub_config_error(conf_file, error_type):
|
||||
with open(conf_file, 'r') as f:
|
||||
config = f.read()
|
||||
|
||||
if error_type == 'GRUB_CMDLINE_LINUX syntax':
|
||||
# move misplaced '"' to the end
|
||||
pattern = r'GRUB_CMDLINE_LINUX=.+?(?=GRUB|\Z)'
|
||||
original_value = re.search(pattern, config, re.DOTALL).group()
|
||||
parsed_value = original_value.split('"')
|
||||
new_value = '{KEY}"{VALUE}"{END}'.format(KEY=parsed_value[0], VALUE=''.join(parsed_value[1:]).rstrip(),
|
||||
END=original_value[-1])
|
||||
|
||||
config = config.replace(original_value, new_value)
|
||||
write_to_file(conf_file, config)
|
||||
|
||||
elif error_type == 'missing newline':
|
||||
write_to_file(conf_file, config + '\n')
|
@ -0,0 +1,7 @@
|
||||
GRUB_TIMEOUT=5
|
||||
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
|
||||
GRUB_DEFAULT=saved
|
||||
GRUB_DISABLE_SUBMENU=true
|
||||
GRUB_TERMINAL_OUTPUT="console"
|
||||
GRUB_CMDLINE_LINUX="console=tty0 crashkernel=auto console=ttyS0,115200n8 no_timer_check net.ifnames=0"
|
||||
GRUB_DISABLE_RECOVERY="true"
|
@ -0,0 +1,7 @@
|
||||
GRUB_TIMEOUT=5
|
||||
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
|
||||
GRUB_DEFAULT=saved
|
||||
GRUB_DISABLE_SUBMENU=true
|
||||
GRUB_TERMINAL_OUTPUT="console"
|
||||
GRUB_CMDLINE_LINUX="console=tty0 crashkernel=auto" console=ttyS0,115200n8 no_timer_check net.ifnames=0
|
||||
GRUB_DISABLE_RECOVERY="true"
|
@ -0,0 +1,7 @@
|
||||
GRUB_TIMEOUT=5
|
||||
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
|
||||
GRUB_DEFAULT=saved
|
||||
GRUB_DISABLE_SUBMENU=true
|
||||
GRUB_TERMINAL_OUTPUT="console"
|
||||
GRUB_CMDLINE_LINUX="console=tty0 crashkernel=auto console=ttyS0,115200n8 no_timer_check net.ifnames=0"
|
||||
GRUB_DISABLE_RECOVERY="true"
|
@ -0,0 +1,7 @@
|
||||
GRUB_TIMEOUT=5
|
||||
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
|
||||
GRUB_DEFAULT=saved
|
||||
GRUB_DISABLE_SUBMENU=true
|
||||
GRUB_TERMINAL_OUTPUT="console"
|
||||
GRUB_CMDLINE_LINUX="console=tty0 crashkernel=auto console=ttyS0,115200n8 no_timer_check net.ifnames=0"
|
||||
GRUB_DISABLE_RECOVERY="true"
|
@ -0,0 +1,169 @@
|
||||
import os
|
||||
from collections import namedtuple
|
||||
|
||||
import pytest
|
||||
|
||||
from leapp.exceptions import StopActorExecutionError
|
||||
from leapp.libraries.actor import addupgradebootentry
|
||||
from leapp.libraries.common.config.architecture import ARCH_S390X, ARCH_X86_64
|
||||
from leapp.libraries.common.testutils import CurrentActorMocked, produce_mocked
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import BootContent, KernelCmdlineArg, TargetKernelCmdlineArgTasks
|
||||
|
||||
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class run_mocked(object):
|
||||
def __init__(self):
|
||||
self.args = []
|
||||
|
||||
def __call__(self, args, split=False):
|
||||
self.args.append(args)
|
||||
|
||||
|
||||
class write_to_file_mocked(object):
|
||||
def __init__(self):
|
||||
self.content = None
|
||||
|
||||
def __call__(self, filename, content):
|
||||
self.content = content
|
||||
|
||||
|
||||
CONFIGS = ['/boot/grub2/grub.cfg', '/boot/efi/EFI/redhat/grub.cfg']
|
||||
|
||||
RunArgs = namedtuple('RunArgs', 'args_remove args_add args_zipl args_len')
|
||||
|
||||
run_args_remove = [
|
||||
'/usr/sbin/grubby',
|
||||
'--remove-kernel', '/abc'
|
||||
]
|
||||
|
||||
run_args_add = [
|
||||
'/usr/sbin/grubby',
|
||||
'--add-kernel', '/abc',
|
||||
'--initrd', '/def',
|
||||
'--title', 'RHEL-Upgrade-Initramfs',
|
||||
'--copy-default',
|
||||
'--make-default',
|
||||
'--args',
|
||||
'debug enforcing=0 rd.plymouth=0 plymouth.enable=0'
|
||||
]
|
||||
|
||||
run_args_zipl = ['/usr/sbin/zipl']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('run_args, arch', [
|
||||
# non s390x
|
||||
(RunArgs(run_args_remove, run_args_add, None, 2), ARCH_X86_64),
|
||||
# s390x
|
||||
(RunArgs(run_args_remove, run_args_add, run_args_zipl, 3), ARCH_S390X),
|
||||
# config file specified
|
||||
(RunArgs(run_args_remove, run_args_add, None, 2), ARCH_X86_64),
|
||||
])
|
||||
def test_add_boot_entry(monkeypatch, run_args, arch):
|
||||
def get_boot_file_paths_mocked():
|
||||
return '/abc', '/def'
|
||||
|
||||
monkeypatch.setattr(addupgradebootentry, 'get_boot_file_paths', get_boot_file_paths_mocked)
|
||||
monkeypatch.setattr(api, 'produce', produce_mocked())
|
||||
monkeypatch.setenv('LEAPP_DEBUG', '1')
|
||||
monkeypatch.setattr(addupgradebootentry, 'run', run_mocked())
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(arch))
|
||||
|
||||
addupgradebootentry.add_boot_entry()
|
||||
|
||||
assert len(addupgradebootentry.run.args) == run_args.args_len
|
||||
assert addupgradebootentry.run.args[0] == run_args.args_remove
|
||||
assert addupgradebootentry.run.args[1] == run_args.args_add
|
||||
assert api.produce.model_instances == [
|
||||
TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]),
|
||||
TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')])
|
||||
]
|
||||
|
||||
if run_args.args_zipl:
|
||||
assert addupgradebootentry.run.args[2] == run_args.args_zipl
|
||||
|
||||
|
||||
@pytest.mark.parametrize('is_leapp_invoked_with_debug', [True, False])
|
||||
def test_debug_kernelopt_removal_task_production(monkeypatch, is_leapp_invoked_with_debug):
|
||||
def get_boot_file_paths_mocked():
|
||||
return '/abc', '/def'
|
||||
|
||||
monkeypatch.setattr(addupgradebootentry, 'get_boot_file_paths', get_boot_file_paths_mocked)
|
||||
monkeypatch.setattr(api, 'produce', produce_mocked())
|
||||
monkeypatch.setenv('LEAPP_DEBUG', '1' if is_leapp_invoked_with_debug else '0')
|
||||
monkeypatch.setattr(addupgradebootentry, 'run', run_mocked())
|
||||
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
|
||||
|
||||
addupgradebootentry.add_boot_entry()
|
||||
|
||||
expected_produced_messages = []
|
||||
if is_leapp_invoked_with_debug:
|
||||
expected_produced_messages = [TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')])]
|
||||
|
||||
expected_produced_messages.append(
|
||||
TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')])
|
||||
)
|
||||
|
||||
assert api.produce.model_instances == expected_produced_messages
|
||||
|
||||
|
||||
def test_add_boot_entry_configs(monkeypatch):
|
||||
def get_boot_file_paths_mocked():
|
||||
return '/abc', '/def'
|
||||
|
||||
monkeypatch.setattr(addupgradebootentry, 'get_boot_file_paths', get_boot_file_paths_mocked)
|
||||
monkeypatch.setenv('LEAPP_DEBUG', '1')
|
||||
monkeypatch.setattr(addupgradebootentry, 'run', run_mocked())
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
|
||||
monkeypatch.setattr(api, 'produce', produce_mocked())
|
||||
|
||||
addupgradebootentry.add_boot_entry(CONFIGS)
|
||||
|
||||
assert len(addupgradebootentry.run.args) == 4
|
||||
assert addupgradebootentry.run.args[0] == run_args_remove + ['-c', CONFIGS[0]]
|
||||
assert addupgradebootentry.run.args[1] == run_args_remove + ['-c', CONFIGS[1]]
|
||||
assert addupgradebootentry.run.args[2] == run_args_add + ['-c', CONFIGS[0]]
|
||||
assert addupgradebootentry.run.args[3] == run_args_add + ['-c', CONFIGS[1]]
|
||||
assert api.produce.model_instances == [
|
||||
TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='debug')]),
|
||||
TargetKernelCmdlineArgTasks(to_remove=[KernelCmdlineArg(key='enforcing', value='0')])
|
||||
]
|
||||
|
||||
|
||||
def test_get_boot_file_paths(monkeypatch):
|
||||
# BootContent message available
|
||||
def consume_message_mocked(*models):
|
||||
yield BootContent(kernel_path='/ghi', initram_path='/jkl', kernel_hmac_path='/path')
|
||||
|
||||
monkeypatch.setattr('leapp.libraries.stdlib.api.consume', consume_message_mocked)
|
||||
|
||||
kernel_path, initram_path = addupgradebootentry.get_boot_file_paths()
|
||||
|
||||
assert kernel_path == '/ghi' and initram_path == '/jkl'
|
||||
|
||||
# No BootContent message available
|
||||
def consume_no_message_mocked(*models):
|
||||
yield None
|
||||
|
||||
monkeypatch.setattr('leapp.libraries.stdlib.api.consume', consume_no_message_mocked)
|
||||
|
||||
with pytest.raises(StopActorExecutionError):
|
||||
addupgradebootentry.get_boot_file_paths()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('error_type', 'test_file_name'),
|
||||
[
|
||||
('GRUB_CMDLINE_LINUX syntax', 'grub_test'),
|
||||
('missing newline', 'grub_test_newline')
|
||||
]
|
||||
)
|
||||
def test_fix_grub_config_error(monkeypatch, error_type, test_file_name):
|
||||
monkeypatch.setattr(addupgradebootentry, 'write_to_file', write_to_file_mocked())
|
||||
addupgradebootentry.fix_grub_config_error(os.path.join(CUR_DIR, 'files/{}.wrong'.format(test_file_name)),
|
||||
error_type)
|
||||
|
||||
with open(os.path.join(CUR_DIR, 'files/{}.fixed'.format(test_file_name))) as f:
|
||||
assert addupgradebootentry.write_to_file.content == f.read()
|
@ -0,0 +1,18 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.common import dnfplugin
|
||||
from leapp.models import DNFWorkaround
|
||||
from leapp.tags import IPUWorkflowTag, PreparationPhaseTag
|
||||
|
||||
|
||||
class ApplyTransactionWorkarounds(Actor):
|
||||
"""
|
||||
Executes registered workaround scripts on the system before the upgrade transaction
|
||||
"""
|
||||
|
||||
name = 'applytransactionworkarounds'
|
||||
consumes = (DNFWorkaround,)
|
||||
produces = ()
|
||||
tags = (IPUWorkflowTag, PreparationPhaseTag)
|
||||
|
||||
def process(self):
|
||||
dnfplugin.apply_workarounds()
|
@ -0,0 +1,61 @@
|
||||
import os
|
||||
|
||||
from leapp.libraries.common.dnfplugin import api, apply_workarounds, mounting
|
||||
from leapp.libraries.common.testutils import CurrentActorMocked
|
||||
from leapp.models import DNFWorkaround
|
||||
|
||||
|
||||
class ShowMessageCurrentActorMocked(CurrentActorMocked):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ShowMessageCurrentActorMocked, self).__init__(*args, **kwargs)
|
||||
self._show_messages = []
|
||||
|
||||
@property
|
||||
def show_messages(self):
|
||||
return self._show_messages
|
||||
|
||||
def show_message(self, message):
|
||||
self._show_messages.append(message)
|
||||
|
||||
|
||||
class MockedNotIsolatedActions(object):
|
||||
def __init__(self):
|
||||
self.called = 0
|
||||
self.args = None
|
||||
|
||||
def call(self, args):
|
||||
self.called += 1
|
||||
self.args = args
|
||||
return {'stdout': ''}
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self
|
||||
|
||||
|
||||
def _get_tool_path(name):
|
||||
for directory in os.getenv('LEAPP_COMMON_TOOLS', '').split(':'):
|
||||
full_path = os.path.join(directory, name)
|
||||
if os.path.isfile(full_path):
|
||||
return full_path
|
||||
return None
|
||||
|
||||
|
||||
def test_prepare_yum_config(monkeypatch):
|
||||
actions = MockedNotIsolatedActions()
|
||||
monkeypatch.setattr(api, 'get_tool_path', _get_tool_path)
|
||||
monkeypatch.setattr(mounting, 'NotIsolatedActions', actions)
|
||||
display_name = 'Test Action Handle Yum Config'
|
||||
actor = ShowMessageCurrentActorMocked(
|
||||
msgs=(
|
||||
DNFWorkaround(
|
||||
display_name=display_name,
|
||||
script_path='/your/path/might/vary/handleyumconfig'
|
||||
),
|
||||
),
|
||||
)
|
||||
monkeypatch.setattr(api, 'current_actor', actor)
|
||||
apply_workarounds()
|
||||
assert actions.called == 1
|
||||
assert os.path.basename(actions.args[-1]) == 'handleyumconfig'
|
||||
assert actor.show_messages and len(actor.show_messages) == 1
|
||||
assert display_name in actor.show_messages[0]
|
@ -0,0 +1,20 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor.baculacheck import report_installed_packages
|
||||
from leapp.models import InstalledRedHatSignedRPM, Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class BaculaCheck(Actor):
|
||||
"""
|
||||
Actor checking for presence of Bacula installation.
|
||||
|
||||
Provides user with information related to upgrading systems
|
||||
with Bacula installed.
|
||||
"""
|
||||
name = 'bacula_check'
|
||||
consumes = (InstalledRedHatSignedRPM,)
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag)
|
||||
|
||||
def process(self):
|
||||
report_installed_packages()
|
@ -0,0 +1,50 @@
|
||||
from leapp import reporting
|
||||
from leapp.libraries.common.rpms import has_package
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import InstalledRedHatSignedRPM
|
||||
|
||||
# Summary for bacula-director report
|
||||
report_director_inst_summary = (
|
||||
'Bacula director component will be upgraded. Since the new version is'
|
||||
' incompatible with the current version, it is necessary to proceed'
|
||||
' with additional steps for the complete upgrade of the Bacula backup'
|
||||
' database.'
|
||||
)
|
||||
|
||||
report_director_inst_hint = (
|
||||
'Back up your data before proceeding with the upgrade'
|
||||
' and use the command "/usr/libexec/bacula/update_bacula_tables <dbtype>" to upgrade'
|
||||
' the Bacula database after the system upgrade.'
|
||||
' The value of <dbtype> depends on the database backend, possible values are'
|
||||
' sqlite3, mysql, postgresql.'
|
||||
)
|
||||
|
||||
|
||||
def _report_director_installed():
|
||||
"""
|
||||
Create report on bacula-director package installation detection.
|
||||
|
||||
Should remind user about present Bacula director package
|
||||
installation and warn them about necessary additional steps.
|
||||
"""
|
||||
reporting.create_report([
|
||||
reporting.Title('bacula (bacula-director) has been detected on your system'),
|
||||
reporting.Summary(report_director_inst_summary),
|
||||
reporting.Severity(reporting.Severity.MEDIUM),
|
||||
reporting.Groups([reporting.Groups.SERVICES]),
|
||||
reporting.RelatedResource('package', 'bacula-director'),
|
||||
reporting.Remediation(hint=report_director_inst_hint),
|
||||
])
|
||||
|
||||
|
||||
def report_installed_packages(_context=api):
|
||||
"""
|
||||
Create reports according to detected bacula packages.
|
||||
|
||||
Create the report if the bacula-director rpm (RH signed) is installed.
|
||||
"""
|
||||
has_director = has_package(InstalledRedHatSignedRPM, 'bacula-director', context=_context)
|
||||
|
||||
if has_director:
|
||||
# bacula-director
|
||||
_report_director_installed()
|
@ -0,0 +1,65 @@
|
||||
import pytest
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.actor.baculacheck import report_installed_packages
|
||||
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import InstalledRedHatSignedRPM, RPM
|
||||
|
||||
|
||||
def _generate_rpm_with_name(name):
|
||||
"""
|
||||
Generate new RPM model item with given name.
|
||||
|
||||
Parameters:
|
||||
name (str): rpm name
|
||||
|
||||
Returns:
|
||||
rpm (RPM): new RPM object with name parameter set
|
||||
"""
|
||||
return RPM(name=name,
|
||||
version='0.1',
|
||||
release='1.sm01',
|
||||
epoch='1',
|
||||
pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51',
|
||||
packager='Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>',
|
||||
arch='noarch')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('has_director', [
|
||||
(True), # with director
|
||||
(False), # without director
|
||||
])
|
||||
def test_actor_execution(monkeypatch, has_director):
|
||||
"""
|
||||
Parametrized helper function for test_actor_* functions.
|
||||
|
||||
First generate list of RPM models based on set arguments. Then, run
|
||||
the actor fed with our RPM list. Finally, assert Reports
|
||||
according to set arguments.
|
||||
|
||||
Parameters:
|
||||
has_director (bool): bacula-director installed
|
||||
"""
|
||||
|
||||
# Couple of random packages
|
||||
rpms = [_generate_rpm_with_name('sed'),
|
||||
_generate_rpm_with_name('htop')]
|
||||
|
||||
if has_director:
|
||||
# Add bacula-director
|
||||
rpms += [_generate_rpm_with_name('bacula-director')]
|
||||
|
||||
curr_actor_mocked = CurrentActorMocked(msgs=[InstalledRedHatSignedRPM(items=rpms)])
|
||||
monkeypatch.setattr(api, 'current_actor', curr_actor_mocked)
|
||||
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
|
||||
|
||||
# Executed actor fed with out fake RPMs
|
||||
report_installed_packages(_context=api)
|
||||
|
||||
if has_director:
|
||||
# Assert for bacula-director package installed
|
||||
assert reporting.create_report.called == 1
|
||||
else:
|
||||
# Assert for no bacula packages installed
|
||||
assert not reporting.create_report.called
|
@ -0,0 +1,19 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor.biosdevname import check_biosdevname
|
||||
from leapp.models import KernelCmdlineArg, PersistentNetNamesFacts
|
||||
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class Biosdevname(Actor):
|
||||
"""
|
||||
Enable biosdevname on the target RHEL system if all interfaces on the source RHEL
|
||||
system use biosdevname naming scheme and if machine vendor is DELL
|
||||
"""
|
||||
|
||||
name = 'biosdevname'
|
||||
consumes = (PersistentNetNamesFacts,)
|
||||
produces = (KernelCmdlineArg,)
|
||||
tags = (FactsPhaseTag, IPUWorkflowTag)
|
||||
|
||||
def process(self):
|
||||
check_biosdevname()
|
@ -0,0 +1,59 @@
|
||||
import re
|
||||
|
||||
import pyudev
|
||||
|
||||
from leapp.exceptions import StopActorExecutionError
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import KernelCmdlineArg, PersistentNetNamesFacts
|
||||
|
||||
|
||||
def is_biosdevname_disabled():
|
||||
with open('/proc/cmdline') as cmdline:
|
||||
if 'biosdevname=0' in cmdline.read():
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_vendor_dell():
|
||||
context = pyudev.Context()
|
||||
|
||||
# There should be only one dmi/id device
|
||||
for dev in pyudev.Enumerator(context).match_subsystem('dmi').match_sys_name('id'):
|
||||
vendor = dev.attributes.get('sys_vendor')
|
||||
return re.search('Dell.*', str(vendor)) is not None
|
||||
return False
|
||||
|
||||
|
||||
def all_interfaces_biosdevname(interfaces):
|
||||
# Biosdevname supports two naming schemes
|
||||
emx = re.compile('em[0-9]+')
|
||||
pxpy = re.compile('p[0-9]+p[0-9]+')
|
||||
|
||||
for i in interfaces:
|
||||
if emx.match(i.name) is None and pxpy.match(i.name) is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def enable_biosdevname():
|
||||
api.current_logger().info(
|
||||
"Biosdevname naming scheme in use, explicitly enabling biosdevname on the target RHEL system"
|
||||
)
|
||||
api.produce(KernelCmdlineArg(**{'key': 'biosdevname', 'value': '1'}))
|
||||
|
||||
|
||||
def check_biosdevname():
|
||||
if is_biosdevname_disabled():
|
||||
return
|
||||
|
||||
net_names_facts_messages = api.consume(PersistentNetNamesFacts)
|
||||
net_names_facts = next(net_names_facts_messages, None)
|
||||
if not net_names_facts:
|
||||
raise StopActorExecutionError(
|
||||
'Could not read interfaces names',
|
||||
details={'details': 'No PersistentNetNamesFacts message found.'},
|
||||
)
|
||||
|
||||
if is_vendor_dell() and all_interfaces_biosdevname(net_names_facts.interfaces):
|
||||
enable_biosdevname()
|
@ -0,0 +1,129 @@
|
||||
import pytest
|
||||
import pyudev
|
||||
import six
|
||||
from mock import mock_open, patch
|
||||
|
||||
from leapp.exceptions import StopActorExecutionError
|
||||
from leapp.libraries.actor import biosdevname
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import Interface, PCIAddress
|
||||
|
||||
|
||||
class LoggerMocked(object):
|
||||
def __init__(self):
|
||||
self.infomsg = None
|
||||
|
||||
def info(self, *args):
|
||||
self.infomsg = args
|
||||
|
||||
def __call__(self):
|
||||
return self
|
||||
|
||||
|
||||
def test_biosdevname_disabled(monkeypatch):
|
||||
mock_config = mock_open(read_data="biosdevname=0")
|
||||
with patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config):
|
||||
assert biosdevname.is_biosdevname_disabled()
|
||||
|
||||
|
||||
def test_biosdevname_enabled(monkeypatch):
|
||||
mock_config = mock_open(read_data="biosdevname=1")
|
||||
with patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config):
|
||||
assert not biosdevname.is_biosdevname_disabled()
|
||||
|
||||
|
||||
class pyudev_enum_mock(object):
|
||||
def __init__(self, vendor):
|
||||
self.vendor = vendor
|
||||
|
||||
def match_sys_name(self, _):
|
||||
class dev(object):
|
||||
attributes = {'sys_vendor': self.vendor}
|
||||
|
||||
return [dev()]
|
||||
|
||||
def match_subsystem(self, _):
|
||||
return self
|
||||
|
||||
def __call__(self, _):
|
||||
return self
|
||||
|
||||
|
||||
def test_is_vendor_is_dell(monkeypatch):
|
||||
monkeypatch.setattr(pyudev, "Enumerator", pyudev_enum_mock("Dell"))
|
||||
assert biosdevname.is_vendor_dell()
|
||||
|
||||
|
||||
def test_is_vendor_is_not_dell(monkeypatch):
|
||||
monkeypatch.setattr(pyudev, "Enumerator", pyudev_enum_mock("HP"))
|
||||
assert not biosdevname.is_vendor_dell()
|
||||
|
||||
|
||||
def test_all_interfaces_biosdevname(monkeypatch):
|
||||
pci_info = PCIAddress(domain="domain", function="function", bus="bus", device="device")
|
||||
|
||||
interfaces = [
|
||||
Interface(
|
||||
name="eth0", mac="mac", vendor="dell", pci_info=pci_info, devpath="path", driver="drv"
|
||||
)
|
||||
]
|
||||
assert not biosdevname.all_interfaces_biosdevname(interfaces)
|
||||
interfaces = [
|
||||
Interface(
|
||||
name="em0", mac="mac", vendor="dell", pci_info=pci_info, devpath="path", driver="drv"
|
||||
)
|
||||
]
|
||||
assert biosdevname.all_interfaces_biosdevname(interfaces)
|
||||
interfaces = [
|
||||
Interface(
|
||||
name="p0p22", mac="mac", vendor="dell", pci_info=pci_info, devpath="path", driver="drv"
|
||||
)
|
||||
]
|
||||
assert biosdevname.all_interfaces_biosdevname(interfaces)
|
||||
|
||||
interfaces = [
|
||||
Interface(
|
||||
name="p1p2", mac="mac", vendor="dell", pci_info=pci_info, devpath="path", driver="drv"
|
||||
),
|
||||
Interface(
|
||||
name="em2", mac="mac", vendor="dell", pci_info=pci_info, devpath="path", driver="drv"
|
||||
),
|
||||
]
|
||||
assert biosdevname.all_interfaces_biosdevname(interfaces)
|
||||
|
||||
interfaces = [
|
||||
Interface(
|
||||
name="p1p2", mac="mac", vendor="dell", pci_info=pci_info, devpath="path", driver="drv"
|
||||
),
|
||||
Interface(
|
||||
name="em2", mac="mac", vendor="dell", pci_info=pci_info, devpath="path", driver="drv"
|
||||
),
|
||||
Interface(
|
||||
name="eth0", mac="mac", vendor="dell", pci_info=pci_info, devpath="path", driver="drv"
|
||||
),
|
||||
]
|
||||
assert not biosdevname.all_interfaces_biosdevname(interfaces)
|
||||
|
||||
|
||||
def test_enable_biosdevname(monkeypatch):
|
||||
result = []
|
||||
monkeypatch.setattr(api, 'current_logger', LoggerMocked())
|
||||
monkeypatch.setattr(api, 'produce', result.append)
|
||||
|
||||
biosdevname.enable_biosdevname()
|
||||
assert (
|
||||
"Biosdevname naming scheme in use, explicitly enabling biosdevname on the target RHEL system"
|
||||
in api.current_logger.infomsg
|
||||
)
|
||||
assert result[0].key == "biosdevname"
|
||||
assert result[0].value == "1"
|
||||
|
||||
|
||||
def test_check_biosdevname(monkeypatch):
|
||||
def persistent_net_names_mocked(*models):
|
||||
yield None
|
||||
|
||||
monkeypatch.setattr(api, "consume", persistent_net_names_mocked)
|
||||
monkeypatch.setattr(biosdevname, "is_biosdevname_disabled", lambda: False)
|
||||
with pytest.raises(StopActorExecutionError):
|
||||
biosdevname.check_biosdevname()
|
@ -0,0 +1,20 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor import cephvolumescan
|
||||
from leapp.models import CephInfo, InstalledRPM
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CephVolumeScan(Actor):
|
||||
|
||||
"""
|
||||
Retrieves the list of encrypted Ceph OSD
|
||||
"""
|
||||
|
||||
name = 'cephvolumescan'
|
||||
consumes = (InstalledRPM,)
|
||||
produces = (CephInfo,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag)
|
||||
|
||||
def process(self):
|
||||
output = cephvolumescan.encrypted_osds_list()
|
||||
self.produce(CephInfo(encrypted_volumes=output))
|
@ -0,0 +1,72 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
from leapp.exceptions import StopActorExecutionError
|
||||
from leapp.libraries.common.rpms import has_package
|
||||
from leapp.libraries.stdlib import api, CalledProcessError, run
|
||||
from leapp.models import InstalledRPM
|
||||
|
||||
CEPH_CONF = "/etc/ceph/ceph.conf"
|
||||
CONTAINER = "ceph-osd"
|
||||
|
||||
|
||||
def select_osd_container(engine):
|
||||
try:
|
||||
output = run([engine, 'ps'])
|
||||
except CalledProcessError as cpe:
|
||||
raise StopActorExecutionError(
|
||||
'Could not retrieve running containers list',
|
||||
details={'details': 'An exception raised during containers listing {}'.format(str(cpe))}
|
||||
)
|
||||
for line in output['stdout'].splitlines():
|
||||
container_name = line.split()[-1]
|
||||
if re.match(CONTAINER, container_name):
|
||||
return container_name
|
||||
return None
|
||||
|
||||
|
||||
def get_ceph_lvm_list():
|
||||
base_cmd = ['ceph-volume', 'lvm', 'list', '--format', 'json']
|
||||
container_binary = 'podman' if has_package(InstalledRPM, 'podman') else \
|
||||
'docker' if has_package(InstalledRPM, 'docker') else ''
|
||||
if container_binary == '' and has_package(InstalledRPM, 'ceph-osd'):
|
||||
cmd_ceph_lvm_list = base_cmd
|
||||
elif container_binary == '':
|
||||
return None
|
||||
else:
|
||||
container_name = select_osd_container(container_binary)
|
||||
if container_name is None:
|
||||
return None
|
||||
cmd_ceph_lvm_list = [container_binary, 'exec', container_name]
|
||||
cmd_ceph_lvm_list.extend(base_cmd)
|
||||
try:
|
||||
output = run(cmd_ceph_lvm_list)
|
||||
except CalledProcessError as cpe:
|
||||
raise StopActorExecutionError(
|
||||
'Could not retrieve the ceph volumes list',
|
||||
details={'details': 'An exception raised while retrieving ceph volumes {}'.format(str(cpe))}
|
||||
)
|
||||
try:
|
||||
json_output = json.loads(output['stdout'])
|
||||
except ValueError as jve:
|
||||
raise StopActorExecutionError(
|
||||
'Could not load json file containing ceph volume list',
|
||||
details={'details': 'json file wrong format {}'.format(str(jve))}
|
||||
)
|
||||
return json_output
|
||||
|
||||
|
||||
def encrypted_osds_list():
|
||||
result = []
|
||||
if os.path.isfile(CEPH_CONF):
|
||||
output = get_ceph_lvm_list()
|
||||
if output is not None:
|
||||
try:
|
||||
result = [output[key][0]['lv_uuid'] for key in output if output[key][0]['tags']['ceph.encrypted']]
|
||||
except KeyError:
|
||||
# TODO: possibly raise a report item with a medium risk factor
|
||||
# TODO: possibly create list of problematic osds, extend the cephinfo
|
||||
# # model to include the list and then report it.
|
||||
api.current_logger().warning('ceph-osd is installed but no encrypted osd has been found')
|
||||
return result
|
@ -0,0 +1,85 @@
|
||||
import pytest
|
||||
from mock import Mock, patch
|
||||
|
||||
from leapp.libraries.actor import cephvolumescan
|
||||
from leapp.models import InstalledRPM, LsblkEntry, RPM, StorageInfo
|
||||
from leapp.reporting import Report
|
||||
|
||||
CONT_PS_COMMAND_OUTPUT = {
|
||||
"stdout":
|
||||
"""CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
50d96fe72019 registry.redhat.io/rhceph/rhceph-4-rhel8:latest "/opt/ceph-contain..." \
|
||||
2 weeks ago Up 2 weeks ceph-osd-0
|
||||
f93c17b49c40 registry.redhat.io/rhceph/rhceph-4-rhel8:latest "/opt/ceph-contain..." \
|
||||
2 weeks ago Up 2 weeks ceph-osd-1
|
||||
0669880c52dc registry.redhat.io/rhceph/rhceph-4-rhel8:latest "/opt/ceph-contain..." \
|
||||
2 weeks ago Up 2 weeks ceph-mgr-ceph4-standalone
|
||||
d7068301294e registry.redhat.io/rhceph/rhceph-4-rhel8:latest "/opt/ceph-contain..." \
|
||||
2 weeks ago Up 2 weeks ceph-mon-ceph4-standalone
|
||||
63de6d00f241 registry.redhat.io/openshift4/ose-prometheus-alertmanager:4.1 "/bin/alertmanager..." \
|
||||
2 weeks ago Up 2 weeks alertmanager
|
||||
28ed65960c80 registry.redhat.io/rhceph/rhceph-4-dashboard-rhel8:4 "/run.sh" \
|
||||
2 weeks ago Up 2 weeks grafana-server
|
||||
f4b300d7a11f registry.redhat.io/openshift4/ose-prometheus-node-exporter:v4.1 "/bin/node_exporte..." \
|
||||
2 weeks ago Up 2 weeks node-exporter
|
||||
95a03700b3ff registry.redhat.io/openshift4/ose-prometheus:4.1 "/bin/prometheus -..." \
|
||||
2 weeks ago Up 2 weeks prometheus"""
|
||||
}
|
||||
|
||||
CEPH_VOLUME_OUTPUT = {
|
||||
"stdout": """{
|
||||
"0":[
|
||||
{
|
||||
"devices":[
|
||||
"/dev/sda"
|
||||
],
|
||||
"lv_name":"osd-block-c5215ba7-517b-45c7-88df-37a03eeaa0e9",
|
||||
"lv_uuid":"Tyc0TH-RDxr-ebAF-9mWF-Kh5R-YnvJ-cEcGVn",
|
||||
"tags":{
|
||||
"ceph.encrypted":"1"
|
||||
},
|
||||
"type":"block",
|
||||
"vg_name":"ceph-a696c40d-6b1d-448d-a40e-fadca22b64bc"
|
||||
}
|
||||
]
|
||||
}"""
|
||||
}
|
||||
|
||||
CEPH_LVM_LIST = {
|
||||
'0': [{'devices': ['/dev/sda'],
|
||||
'lv_name': 'osd-block-c5215ba7-517b-45c7-88df-37a03eeaa0e9',
|
||||
'lv_uuid': 'Tyc0TH-RDxr-ebAF-9mWF-Kh5R-YnvJ-cEcGVn',
|
||||
'tags': {'ceph.encrypted': '1'},
|
||||
'type': 'block',
|
||||
'vg_name': 'ceph-a696c40d-6b1d-448d-a40e-fadca22b64bc'}]
|
||||
}
|
||||
|
||||
|
||||
@patch('leapp.libraries.actor.cephvolumescan.run')
|
||||
def test_select_osd_container(m_run):
|
||||
|
||||
m_run.return_value = CONT_PS_COMMAND_OUTPUT
|
||||
|
||||
assert cephvolumescan.select_osd_container('docker') == "ceph-osd-0"
|
||||
|
||||
|
||||
@patch('leapp.libraries.actor.cephvolumescan.has_package')
|
||||
@patch('leapp.libraries.actor.cephvolumescan.select_osd_container')
|
||||
@patch('leapp.libraries.actor.cephvolumescan.run')
|
||||
def test_get_ceph_lvm_list(m_run, m_osd_container, m_has_package):
|
||||
|
||||
m_has_package.return_value = True
|
||||
m_osd_container.return_value = 'podman'
|
||||
m_run.return_value = CEPH_VOLUME_OUTPUT
|
||||
|
||||
assert cephvolumescan.get_ceph_lvm_list() == CEPH_LVM_LIST
|
||||
|
||||
|
||||
@patch('leapp.libraries.actor.cephvolumescan.os.path.isfile')
|
||||
@patch('leapp.libraries.actor.cephvolumescan.get_ceph_lvm_list')
|
||||
def test_encrypted_osds_list(m_get_ceph_lvm_list, m_isfile):
|
||||
|
||||
m_get_ceph_lvm_list.return_value = CEPH_LVM_LIST
|
||||
m_isfile.return_value = True
|
||||
|
||||
assert cephvolumescan.encrypted_osds_list() == ['Tyc0TH-RDxr-ebAF-9mWF-Kh5R-YnvJ-cEcGVn']
|
@ -0,0 +1,31 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor.checkbootavailspace import (
|
||||
check_avail_space_on_boot,
|
||||
get_avail_bytes_on_boot,
|
||||
)
|
||||
from leapp.reporting import Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckBootAvailSpace(Actor):
|
||||
"""
|
||||
Check if at least 100Mib of available space on /boot. If not, inhibit the upgrade process.
|
||||
|
||||
Rationale for the requirement of 100MiB:
|
||||
- Before reboot into initramfs, the CopyInitramfsToBoot actor copies kernel and initramfs to
|
||||
/boot, together worth of 66MiB.
|
||||
- After booting into initramfs, the RemoveBootFiles actor removes the copied kernel and
|
||||
initramfs from /boot.
|
||||
- The DnfShellRpmUpgrade installs a new kernel-core package which puts additional 54MiB of data
|
||||
to /boot.
|
||||
- Even though the available space needed at the time of writing this actor is 66MiB, the
|
||||
additional 100-66=34MiB is a leeway for potential growth of the kernel or initramfs in size.
|
||||
"""
|
||||
|
||||
name = 'check_boot_avail_space'
|
||||
consumes = ()
|
||||
produces = (Report,)
|
||||
tags = (IPUWorkflowTag, ChecksPhaseTag)
|
||||
|
||||
def process(self):
|
||||
check_avail_space_on_boot(get_avail_bytes_on_boot)
|
@ -0,0 +1,39 @@
|
||||
from __future__ import division
|
||||
|
||||
from os import statvfs
|
||||
|
||||
from leapp import reporting
|
||||
|
||||
MIN_AVAIL_BYTES_FOR_BOOT = 100 * 2**20 # 100 MiB
|
||||
|
||||
|
||||
def check_avail_space_on_boot(boot_avail_space_getter):
|
||||
avail_bytes = boot_avail_space_getter()
|
||||
if is_additional_space_required(avail_bytes):
|
||||
inhibit_upgrade(avail_bytes)
|
||||
|
||||
|
||||
def get_avail_bytes_on_boot():
|
||||
boot_stat = statvfs('/boot')
|
||||
return boot_stat.f_frsize * boot_stat.f_bavail
|
||||
|
||||
|
||||
def is_additional_space_required(avail_bytes):
|
||||
return avail_bytes < MIN_AVAIL_BYTES_FOR_BOOT
|
||||
|
||||
|
||||
def inhibit_upgrade(avail_bytes):
|
||||
additional_mib_needed = (MIN_AVAIL_BYTES_FOR_BOOT - avail_bytes) / 2**20
|
||||
# we use "reporting.report_generic" to allow mocking in the tests
|
||||
# WIP ^^ check if this still applies
|
||||
reporting.create_report([
|
||||
reporting.Title('Not enough space on /boot'),
|
||||
reporting.Summary(
|
||||
'/boot needs additional {0} MiB to be able to accommodate the upgrade initramfs and new kernel.'.format(
|
||||
additional_mib_needed)
|
||||
),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.FILESYSTEM]),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR]),
|
||||
reporting.RelatedResource('directory', '/boot')
|
||||
])
|
@ -0,0 +1,54 @@
|
||||
from __future__ import division
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.actor.checkbootavailspace import (
|
||||
check_avail_space_on_boot,
|
||||
inhibit_upgrade,
|
||||
MIN_AVAIL_BYTES_FOR_BOOT
|
||||
)
|
||||
from leapp.libraries.common.testutils import create_report_mocked
|
||||
from leapp.utils.report import is_inhibitor
|
||||
|
||||
|
||||
class fake_get_avail_bytes_on_boot(object):
|
||||
def __init__(self, size):
|
||||
self.size = size
|
||||
|
||||
def __call__(self, *args):
|
||||
return self.size
|
||||
|
||||
|
||||
def test_not_enough_space_available(monkeypatch):
|
||||
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
|
||||
|
||||
# Test 0 bytes available /boot
|
||||
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(0)
|
||||
check_avail_space_on_boot(get_avail_bytes_on_boot)
|
||||
|
||||
# Test 0.1 MiB less then required in /boot
|
||||
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT - 0.1 * 2**20)
|
||||
check_avail_space_on_boot(get_avail_bytes_on_boot)
|
||||
|
||||
assert reporting.create_report.called == 2
|
||||
|
||||
|
||||
def test_enough_space_available(monkeypatch):
|
||||
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
|
||||
|
||||
get_avail_bytes_on_boot = fake_get_avail_bytes_on_boot(MIN_AVAIL_BYTES_FOR_BOOT)
|
||||
check_avail_space_on_boot(get_avail_bytes_on_boot)
|
||||
|
||||
assert reporting.create_report.called == 0
|
||||
|
||||
|
||||
def test_inhibit_upgrade(monkeypatch):
|
||||
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
|
||||
|
||||
# Test 4.2 MiB available on /boot
|
||||
bytes_available = 4.2 * 2**20
|
||||
inhibit_upgrade(bytes_available)
|
||||
|
||||
assert reporting.create_report.called == 1
|
||||
assert is_inhibitor(reporting.create_report.report_fields)
|
||||
mib_needed = (MIN_AVAIL_BYTES_FOR_BOOT - bytes_available) / 2**20
|
||||
assert "needs additional {0} MiB".format(mib_needed) in reporting.create_report.report_fields['summary']
|
@ -0,0 +1,21 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor.checkcifs import checkcifs
|
||||
from leapp.models import StorageInfo
|
||||
from leapp.reporting import Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckCIFS(Actor):
|
||||
"""
|
||||
Check if CIFS filesystem is in use. If yes, inhibit the upgrade process.
|
||||
|
||||
Actor looks for CIFS in /ets/fstab.
|
||||
If there is a CIFS entry, the upgrade is inhibited.
|
||||
"""
|
||||
name = "check_cifs"
|
||||
consumes = (StorageInfo,)
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag,)
|
||||
|
||||
def process(self):
|
||||
checkcifs(self.consume(StorageInfo))
|
@ -0,0 +1,23 @@
|
||||
from leapp import reporting
|
||||
from leapp.libraries.common.config import get_env
|
||||
from leapp.reporting import create_report
|
||||
|
||||
|
||||
def checkcifs(storage_info):
|
||||
# if network in initramfs is enabled CIFS inhibitor is redundant
|
||||
if get_env('LEAPP_DEVEL_INITRAM_NETWORK', None):
|
||||
return
|
||||
for storage in storage_info:
|
||||
if any(entry.fs_vfstype == "cifs" for entry in storage.fstab):
|
||||
create_report([
|
||||
reporting.Title("Use of CIFS detected. Upgrade can't proceed"),
|
||||
reporting.Summary("CIFS is currently not supported by the inplace upgrade."),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([
|
||||
reporting.Groups.FILESYSTEM,
|
||||
reporting.Groups.NETWORK
|
||||
]),
|
||||
reporting.Remediation(hint='Comment out CIFS entries to proceed with the upgrade.'),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR]),
|
||||
reporting.RelatedResource('file', '/etc/fstab')
|
||||
])
|
@ -0,0 +1,38 @@
|
||||
from leapp.libraries.common import config
|
||||
from leapp.models import FstabEntry, StorageInfo
|
||||
from leapp.reporting import Report
|
||||
from leapp.snactor.fixture import current_actor_context
|
||||
from leapp.utils.report import is_inhibitor
|
||||
|
||||
|
||||
def test_actor_with_fstab_entry(current_actor_context, monkeypatch):
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
|
||||
with_fstab_entry = [FstabEntry(fs_spec="//10.20.30.42/share1", fs_file="/mnt/win_share1",
|
||||
fs_vfstype="cifs",
|
||||
fs_mntops="credentials=/etc/win-credentials,file_mode=0755,dir_mode=0755",
|
||||
fs_freq="0", fs_passno="0"),
|
||||
FstabEntry(fs_spec="//10.20.30.42/share2", fs_file="/mnt/win_share2",
|
||||
fs_vfstype="cifs",
|
||||
fs_mntops="credentials=/etc/win-credentials,file_mode=0755,dir_mode=0755",
|
||||
fs_freq="0", fs_passno="0"),
|
||||
FstabEntry(fs_spec="/dev/mapper/fedora-home", fs_file="/home",
|
||||
fs_vfstype="ext4",
|
||||
fs_mntops="defaults,x-systemd.device-timeout=0",
|
||||
fs_freq="1", fs_passno="2")]
|
||||
current_actor_context.feed(StorageInfo(fstab=with_fstab_entry))
|
||||
current_actor_context.run()
|
||||
report_fields = current_actor_context.consume(Report)[0].report
|
||||
assert is_inhibitor(report_fields)
|
||||
assert report_fields['severity'] == 'high'
|
||||
assert report_fields['title'] == "Use of CIFS detected. Upgrade can't proceed"
|
||||
|
||||
|
||||
def test_actor_no_cifs(current_actor_context, monkeypatch):
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
|
||||
with_fstab_entry = [FstabEntry(fs_spec="/dev/mapper/fedora-home", fs_file="/home",
|
||||
fs_vfstype="ext4",
|
||||
fs_mntops="defaults,x-systemd.device-timeout=0",
|
||||
fs_freq="1", fs_passno="2")]
|
||||
current_actor_context.feed(StorageInfo(fstab=with_fstab_entry))
|
||||
current_actor_context.run()
|
||||
assert not current_actor_context.consume(Report)
|
@ -0,0 +1,18 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor import check_consumed_assets
|
||||
from leapp.models import ConsumedDataAsset, Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckConsumedAssets(Actor):
|
||||
"""
|
||||
Check whether Leapp is using correct data assets.
|
||||
"""
|
||||
|
||||
name = 'check_consumed_assets'
|
||||
consumes = (ConsumedDataAsset,)
|
||||
produces = (Report,)
|
||||
tags = (IPUWorkflowTag, ChecksPhaseTag)
|
||||
|
||||
def process(self):
|
||||
check_consumed_assets.inhibit_if_assets_with_incorrect_version()
|
@ -0,0 +1,162 @@
|
||||
import re
|
||||
from collections import defaultdict, namedtuple
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.common.config import get_consumed_data_stream_id
|
||||
from leapp.libraries.common.fetch import ASSET_PROVIDED_DATA_STREAMS_FIELD
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import ConsumedDataAsset
|
||||
|
||||
|
||||
def compose_summary_for_incompatible_assets(assets, incompatibility_reason):
|
||||
if not assets:
|
||||
return []
|
||||
|
||||
summary_lines = ['The following assets are {reason}'.format(reason=incompatibility_reason)]
|
||||
for asset in assets:
|
||||
if asset.provided_data_streams is None: # Assets with missing streams are placed only in .outdated bucket
|
||||
details = (' - The asset {what_asset} is missing information about provided data streams '
|
||||
'in its metadata header')
|
||||
details = details.format(what_asset=asset.filename)
|
||||
else:
|
||||
article, multiple_suffix = ('the ', '') if len(asset.provided_data_streams) == 1 else ('', 's')
|
||||
details = ' - The asset {what_asset} provides {article}data stream{mult_suffix} {provided_streams}'
|
||||
details = details.format(what_asset=asset.filename,
|
||||
provided_streams=', '.join(asset.provided_data_streams),
|
||||
article=article, mult_suffix=multiple_suffix)
|
||||
summary_lines.append(details)
|
||||
return summary_lines
|
||||
|
||||
|
||||
def make_report_entries_with_unique_urls(docs_url_to_title_map):
|
||||
report_urls = []
|
||||
# Add every unique asset URL into the report
|
||||
urls_with_multiple_titles = []
|
||||
for url, titles in docs_url_to_title_map.items():
|
||||
if len(titles) > 1:
|
||||
urls_with_multiple_titles.append(url)
|
||||
|
||||
report_entry = reporting.ExternalLink(title=titles[0], url=url)
|
||||
report_urls.append(report_entry)
|
||||
|
||||
if urls_with_multiple_titles:
|
||||
msg = 'Docs URLs {urls} are used with inconsistent URL titles, picking one.'
|
||||
api.current_logger().warning(msg.format(urls=', '.join(urls_with_multiple_titles)))
|
||||
|
||||
return report_urls
|
||||
|
||||
|
||||
def report_incompatible_assets(assets):
|
||||
if not any((assets.outdated, assets.too_new, assets.unknown)):
|
||||
return
|
||||
|
||||
title = 'Incompatible Leapp data assets are present'
|
||||
|
||||
docs_url_to_title_map = defaultdict(list)
|
||||
required_data_stream = get_consumed_data_stream_id()
|
||||
summary_prelude = ('The currently installed Leapp consumes data stream {consumed_data_stream}, but the '
|
||||
'following assets provide different streams:')
|
||||
summary_lines = [summary_prelude.format(consumed_data_stream=required_data_stream)]
|
||||
|
||||
assets_with_shared_summary_entry = [
|
||||
('outdated', assets.outdated),
|
||||
('intended for a newer leapp', assets.too_new),
|
||||
('has an incorrect version', assets.unknown)
|
||||
]
|
||||
|
||||
doc_url_to_title = defaultdict(list) # To make sure we do not spam the user with the same URLs
|
||||
for reason, incompatible_assets in assets_with_shared_summary_entry:
|
||||
summary_lines += compose_summary_for_incompatible_assets(incompatible_assets, reason)
|
||||
|
||||
for asset in incompatible_assets:
|
||||
doc_url_to_title[asset.docs_url].append(asset.docs_title)
|
||||
|
||||
report_parts = [
|
||||
reporting.Title(title),
|
||||
reporting.Summary('\n'.join(summary_lines)),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR, reporting.Groups.REPOSITORY]),
|
||||
]
|
||||
|
||||
report_parts += make_report_entries_with_unique_urls(docs_url_to_title_map)
|
||||
reporting.create_report(report_parts)
|
||||
|
||||
|
||||
def report_malformed_assets(malformed_assets):
|
||||
if not malformed_assets:
|
||||
return
|
||||
|
||||
title = 'Detected malformed Leapp data assets'
|
||||
summary_lines = ['The following assets are malformed:']
|
||||
|
||||
docs_url_to_title_map = defaultdict(list)
|
||||
for asset in malformed_assets:
|
||||
if not asset.provided_data_streams:
|
||||
details = (' - The asset file {filename} contains no values in its "{provided_data_streams_field}" '
|
||||
'field, or the field does not contain a list')
|
||||
details = details.format(filename=asset.filename,
|
||||
provided_data_streams_field=ASSET_PROVIDED_DATA_STREAMS_FIELD)
|
||||
else:
|
||||
# The asset is malformed because we failed to convert its major versions to ints
|
||||
details = ' - The asset file {filename} contains invalid value in its "{data_streams_field}"'
|
||||
details = details.format(filename=asset.filename, data_streams_field=ASSET_PROVIDED_DATA_STREAMS_FIELD)
|
||||
summary_lines.append(details)
|
||||
docs_url_to_title_map[asset.docs_url].append(asset.docs_title)
|
||||
|
||||
report_parts = [
|
||||
reporting.Title(title),
|
||||
reporting.Summary('\n'.join(summary_lines)),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR, reporting.Groups.REPOSITORY]),
|
||||
]
|
||||
|
||||
report_parts += make_report_entries_with_unique_urls(docs_url_to_title_map)
|
||||
reporting.create_report(report_parts)
|
||||
|
||||
|
||||
def inhibit_if_assets_with_incorrect_version():
|
||||
required_data_stream = get_consumed_data_stream_id()
|
||||
required_data_stream_major = int(required_data_stream.split('.', 1)[0])
|
||||
|
||||
# The assets are collected according to why are they considered incompatible, so that a single report is created
|
||||
# for every class
|
||||
IncompatibleAssetsByType = namedtuple('IncompatibleAssets', ('outdated', 'too_new', 'malformed', 'unknown'))
|
||||
incompatible_assets = IncompatibleAssetsByType(outdated=[], too_new=[], malformed=[], unknown=[])
|
||||
|
||||
datastream_version_re = re.compile(r'\d+\.\d+$')
|
||||
|
||||
for consumed_asset in api.consume(ConsumedDataAsset):
|
||||
if consumed_asset.provided_data_streams is None: # There is no provided_data_streams field
|
||||
# Most likely an old file that predates the introduction of versioning to data assets
|
||||
incompatible_assets.outdated.append(consumed_asset)
|
||||
continue
|
||||
|
||||
# Ignore minor stream numbers and search only for a stream matching the same major number
|
||||
if all((datastream_version_re.match(stream) for stream in consumed_asset.provided_data_streams)):
|
||||
provided_major_data_streams = sorted(
|
||||
int(stream.split('.', 1)[0]) for stream in consumed_asset.provided_data_streams
|
||||
)
|
||||
else:
|
||||
incompatible_assets.malformed.append(consumed_asset)
|
||||
continue
|
||||
|
||||
if required_data_stream_major in provided_major_data_streams:
|
||||
continue
|
||||
|
||||
if not provided_major_data_streams:
|
||||
# The field contained [], or something that was not a list, but it was corrected to [] to satisfy model
|
||||
incompatible_assets.malformed.append(consumed_asset)
|
||||
continue
|
||||
|
||||
if required_data_stream_major > max(provided_major_data_streams):
|
||||
incompatible_assets.outdated.append(consumed_asset)
|
||||
elif required_data_stream_major < min(provided_major_data_streams):
|
||||
incompatible_assets.too_new.append(consumed_asset)
|
||||
else:
|
||||
# Since the `provided_data_vers` is a list of values, it is possible that the asset provide, e.g., 4.0
|
||||
# and 6.0, but the leapp consumes 5.0, thus we need to be careful when to say that an asset is too
|
||||
# new/outdated/none.
|
||||
incompatible_assets.unknown.append(consumed_asset)
|
||||
|
||||
report_incompatible_assets(incompatible_assets)
|
||||
report_malformed_assets(incompatible_assets.malformed)
|
@ -0,0 +1,47 @@
|
||||
import pytest
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.actor import check_consumed_assets as check_consumed_assets_lib
|
||||
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import ConsumedDataAsset
|
||||
from leapp.utils.report import is_inhibitor
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('asset_data_streams', 'inhibit_reason'),
|
||||
((['10.0'], None),
|
||||
(['9.3', '10.1', '11.0'], None),
|
||||
(['11.1'], 'incompatible'),
|
||||
(['3.1', '4.0'], 'incompatible'),
|
||||
(['11.1', '12.0'], 'incompatible'),
|
||||
([], 'malformed'),
|
||||
(['malformed'], 'malformed')))
|
||||
def test_asset_version_correctness_assessment(monkeypatch, asset_data_streams, inhibit_reason):
|
||||
|
||||
monkeypatch.setattr(check_consumed_assets_lib, 'get_consumed_data_stream_id', lambda: '10.0')
|
||||
used_asset = ConsumedDataAsset(filename='asset.json',
|
||||
fulltext_name='',
|
||||
docs_url='',
|
||||
docs_title='',
|
||||
provided_data_streams=asset_data_streams)
|
||||
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[used_asset]))
|
||||
create_report_mock = create_report_mocked()
|
||||
monkeypatch.setattr(reporting, 'create_report', create_report_mock)
|
||||
|
||||
check_consumed_assets_lib.inhibit_if_assets_with_incorrect_version()
|
||||
|
||||
expected_report_count = 1 if inhibit_reason else 0
|
||||
assert create_report_mock.called == expected_report_count
|
||||
if inhibit_reason:
|
||||
report = create_report_mock.reports[0]
|
||||
assert is_inhibitor(report)
|
||||
assert inhibit_reason in report['title'].lower()
|
||||
|
||||
|
||||
def test_make_report_entries_with_unique_urls():
|
||||
# Check that multiple titles produce one report
|
||||
docs_url_to_title_map = {'/path/to/asset1': ['asset1_title1', 'asset1_title2'],
|
||||
'/path/to/asset2': ['asset2_title']}
|
||||
report_urls = check_consumed_assets_lib.make_report_entries_with_unique_urls(docs_url_to_title_map)
|
||||
assert set([ru.value['url'] for ru in report_urls]) == {'/path/to/asset1', '/path/to/asset2'}
|
@ -0,0 +1,21 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor import checkdddd
|
||||
from leapp.models import DetectedDeviceOrDriver, Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckDetectedDevicesAndDrivers(Actor):
|
||||
"""
|
||||
Checks whether or not detected devices and drivers are usable on the target system.
|
||||
|
||||
In case a driver is no longer present in the target system, an inhibitor will be raised.
|
||||
If the device or driver is not maintained anymore, a warning report will be generated.
|
||||
"""
|
||||
|
||||
name = 'check_detected_devices_and_drivers'
|
||||
consumes = (DetectedDeviceOrDriver,)
|
||||
produces = (Report,)
|
||||
tags = (IPUWorkflowTag, ChecksPhaseTag)
|
||||
|
||||
def process(self):
|
||||
checkdddd.process()
|
@ -0,0 +1,180 @@
|
||||
from collections import defaultdict
|
||||
from enum import IntEnum
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.common.config.version import get_source_major_version, get_target_major_version
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import DetectedDeviceOrDriver
|
||||
|
||||
|
||||
class MessagingClass(IntEnum):
|
||||
UNKNOWN = 0
|
||||
DRIVERS = 1
|
||||
DEVICES = 2
|
||||
CPUS = 3
|
||||
|
||||
|
||||
def create_inhibitors(inhibiting_entries):
|
||||
if not inhibiting_entries:
|
||||
return
|
||||
|
||||
drivers = inhibiting_entries.get(MessagingClass.DRIVERS)
|
||||
if drivers:
|
||||
reporting.create_report([
|
||||
reporting.Title(
|
||||
'Leapp detected loaded kernel drivers which have been removed '
|
||||
'in RHEL {}. Upgrade cannot proceed.'.format(get_target_major_version())
|
||||
),
|
||||
reporting.Summary(
|
||||
(
|
||||
'Support for the following RHEL {source} device drivers has been removed in RHEL {target}:\n'
|
||||
' - {drivers}\n'
|
||||
).format(
|
||||
drivers='\n - '.join([entry.driver_name for entry in drivers]),
|
||||
target=get_target_major_version(),
|
||||
source=get_source_major_version(),
|
||||
)
|
||||
),
|
||||
reporting.Audience('sysadmin'),
|
||||
reporting.Groups([reporting.Groups.KERNEL, reporting.Groups.DRIVERS]),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR])
|
||||
])
|
||||
|
||||
devices = inhibiting_entries.get(MessagingClass.DEVICES)
|
||||
if devices:
|
||||
reporting.create_report([
|
||||
reporting.Title(
|
||||
'Leapp detected devices which are no longer supported in RHEL {}. Upgrade cannot proceed.'.format(
|
||||
get_target_major_version())
|
||||
),
|
||||
reporting.Summary(
|
||||
(
|
||||
'Support for the following devices has been removed in RHEL {target}:\n'
|
||||
' - {devices}\n'
|
||||
).format(
|
||||
devices='\n - '.join(['{name} ({pci})'.format(name=entry.device_name,
|
||||
pci=entry.device_id) for entry in devices]),
|
||||
target=get_target_major_version(),
|
||||
)
|
||||
),
|
||||
reporting.Audience('sysadmin'),
|
||||
reporting.Groups([reporting.Groups.KERNEL]),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR])
|
||||
])
|
||||
|
||||
cpus = inhibiting_entries.get(MessagingClass.CPUS)
|
||||
if cpus:
|
||||
reporting.create_report([
|
||||
reporting.Title(
|
||||
'Leapp detected a processor which is no longer supported in RHEL {}. Upgrade cannot proceed.'.format(
|
||||
get_target_major_version())
|
||||
),
|
||||
reporting.Summary(
|
||||
(
|
||||
'Support for the following processors has been removed in RHEL {target}:\n'
|
||||
' - {processors}\n'
|
||||
).format(
|
||||
processors='\n - '.join([entry.device_name for entry in cpus]),
|
||||
target=get_target_major_version(),
|
||||
)
|
||||
),
|
||||
reporting.Audience('sysadmin'),
|
||||
reporting.Groups([reporting.Groups.KERNEL, reporting.Groups.BOOT]),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR])
|
||||
])
|
||||
|
||||
|
||||
def create_warnings(unmaintained_entries):
|
||||
if not unmaintained_entries:
|
||||
return
|
||||
|
||||
drivers = unmaintained_entries.get(MessagingClass.DRIVERS)
|
||||
if drivers:
|
||||
reporting.create_report([
|
||||
reporting.Title(
|
||||
'Leapp detected loaded kernel drivers which are no longer maintained in RHEL {}.'.format(
|
||||
get_target_major_version())
|
||||
),
|
||||
reporting.Summary(
|
||||
(
|
||||
'The following RHEL {source} device drivers are no longer maintained RHEL {target}:\n'
|
||||
' - {drivers}\n'
|
||||
).format(
|
||||
drivers='\n - '.join([entry.driver_name for entry in drivers]),
|
||||
target=get_target_major_version(),
|
||||
source=get_source_major_version(),
|
||||
)
|
||||
),
|
||||
reporting.Audience('sysadmin'),
|
||||
reporting.Groups([reporting.Groups.KERNEL, reporting.Groups.DRIVERS]),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
])
|
||||
|
||||
devices = unmaintained_entries.get(MessagingClass.DEVICES)
|
||||
if devices:
|
||||
reporting.create_report([
|
||||
reporting.Title(
|
||||
'Leapp detected devices which are no longer maintained in RHEL {}'.format(
|
||||
get_target_major_version())
|
||||
),
|
||||
reporting.Summary(
|
||||
(
|
||||
'The support for the following devices has been removed in RHEL {target} and '
|
||||
'are no longer maintained:\n - {devices}\n'
|
||||
).format(
|
||||
devices='\n - '.join(['{name} ({pci})'.format(name=entry.device_name,
|
||||
pci=entry.device_id) for entry in devices]),
|
||||
target=get_target_major_version(),
|
||||
)
|
||||
),
|
||||
reporting.Audience('sysadmin'),
|
||||
reporting.Groups([reporting.Groups.KERNEL]),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
])
|
||||
|
||||
cpus = unmaintained_entries.get(MessagingClass.CPUS)
|
||||
if cpus:
|
||||
reporting.create_report([
|
||||
reporting.Title(
|
||||
'Leapp detected a processor which is no longer maintained in RHEL {}.'.format(
|
||||
get_target_major_version())
|
||||
),
|
||||
reporting.Summary(
|
||||
(
|
||||
'The following processors are no longer maintained in RHEL {target}:\n'
|
||||
' - {processors}\n'
|
||||
).format(
|
||||
processors='\n - '.join([entry.device_name for entry in cpus]),
|
||||
target=get_target_major_version(),
|
||||
)
|
||||
),
|
||||
reporting.Audience('sysadmin'),
|
||||
reporting.Groups([reporting.Groups.KERNEL, reporting.Groups.BOOT]),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
])
|
||||
|
||||
|
||||
def classify(entry):
|
||||
if entry.device_type == 'pci':
|
||||
if entry.device_id:
|
||||
return MessagingClass.DEVICES
|
||||
return MessagingClass.DRIVERS
|
||||
if entry.device_type == 'cpu':
|
||||
return MessagingClass.CPUS
|
||||
return MessagingClass.UNKNOWN
|
||||
|
||||
|
||||
def process():
|
||||
target_version = int(get_target_major_version())
|
||||
inhibiting = defaultdict(list)
|
||||
unmaintained = defaultdict(list)
|
||||
for entry in api.consume(DetectedDeviceOrDriver):
|
||||
if target_version not in entry.available_in_rhel:
|
||||
inhibiting[classify(entry)].append(entry)
|
||||
elif target_version not in entry.maintained_in_rhel:
|
||||
unmaintained[classify(entry)].append(entry)
|
||||
create_inhibitors(inhibiting)
|
||||
create_warnings(unmaintained)
|
@ -0,0 +1,18 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor import checketcreleasever
|
||||
from leapp.models import PkgManagerInfo, Report, RHUIInfo
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckEtcReleasever(Actor):
|
||||
"""
|
||||
Check releasever info and provide a guidance based on the facts
|
||||
"""
|
||||
|
||||
name = 'check_etc_releasever'
|
||||
consumes = (PkgManagerInfo, RHUIInfo)
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag)
|
||||
|
||||
def process(self):
|
||||
checketcreleasever.process()
|
@ -0,0 +1,34 @@
|
||||
from leapp import reporting
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import PkgManagerInfo, RHUIInfo
|
||||
|
||||
|
||||
def handle_etc_releasever():
|
||||
|
||||
target_version = api.current_actor().configuration.version.target
|
||||
reporting.create_report([
|
||||
reporting.Title(
|
||||
'Release version in /etc/dnf/vars/releasever will be set to the current target release'
|
||||
),
|
||||
reporting.Summary(
|
||||
'On this system, Leapp detected "releasever" variable is either configured through DNF/YUM configuration '
|
||||
'file and/or the system is using RHUI infrastructure. In order to avoid issues with repofile URLs '
|
||||
'(when --release option is not provided) in cases where there is the previous major.minor version value '
|
||||
'in the configuration, release version will be set to the target release version ({}). This will also '
|
||||
'ensure the system stays on the expected target version after the upgrade'.format(target_version)
|
||||
),
|
||||
reporting.Severity(reporting.Severity.INFO),
|
||||
reporting.Groups([reporting.Groups.UPGRADE_PROCESS]),
|
||||
])
|
||||
|
||||
|
||||
def process():
|
||||
pkg_facts = next(api.consume(PkgManagerInfo), None)
|
||||
rhui_facts = next(api.consume(RHUIInfo), None)
|
||||
if pkg_facts and pkg_facts.etc_releasever is not None or rhui_facts:
|
||||
handle_etc_releasever()
|
||||
else:
|
||||
api.current_logger().debug(
|
||||
'Skipping execution. "releasever" is not set in DNF/YUM vars directory and no RHUIInfo has '
|
||||
'been produced'
|
||||
)
|
@ -0,0 +1,110 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.actor import checketcreleasever
|
||||
from leapp.libraries.common.testutils import (
|
||||
create_report_mocked,
|
||||
CurrentActorMocked,
|
||||
logger_mocked
|
||||
)
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import PkgManagerInfo, Report, RHUIInfo
|
||||
|
||||
|
||||
@pytest.mark.parametrize('exists', [True, False])
|
||||
def test_etc_releasever(monkeypatch, exists):
|
||||
pkg_mgr_msg = [PkgManagerInfo(etc_releasever='7.7')] if exists else []
|
||||
expected_rel_ver = '6.10'
|
||||
|
||||
mocked_report = create_report_mocked()
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(
|
||||
msgs=pkg_mgr_msg, dst_ver=expected_rel_ver
|
||||
)
|
||||
)
|
||||
monkeypatch.setattr(reporting, 'create_report', mocked_report)
|
||||
monkeypatch.setattr(api, 'current_logger', logger_mocked())
|
||||
|
||||
checketcreleasever.process()
|
||||
|
||||
if exists:
|
||||
assert reporting.create_report.called == 1
|
||||
assert expected_rel_ver in mocked_report.report_fields['summary']
|
||||
assert not api.current_logger.dbgmsg
|
||||
else:
|
||||
assert not reporting.create_report.called
|
||||
assert api.current_logger.dbgmsg
|
||||
|
||||
|
||||
def test_etc_releasever_empty(monkeypatch):
|
||||
pkg_mgr_msg = [PkgManagerInfo(etc_releasever=None)]
|
||||
expected_rel_ver = '6.10'
|
||||
|
||||
mocked_report = create_report_mocked()
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(
|
||||
msgs=pkg_mgr_msg, dst_ver=expected_rel_ver
|
||||
)
|
||||
)
|
||||
monkeypatch.setattr(reporting, 'create_report', mocked_report)
|
||||
monkeypatch.setattr(api, 'current_logger', logger_mocked())
|
||||
|
||||
checketcreleasever.process()
|
||||
|
||||
assert not reporting.create_report.called
|
||||
assert api.current_logger.dbgmsg
|
||||
|
||||
|
||||
@pytest.mark.parametrize('is_rhui', [True, False])
|
||||
def test_etc_releasever_rhui(monkeypatch, is_rhui):
|
||||
rhui_msg = [RHUIInfo(provider='aws')] if is_rhui else []
|
||||
expected_rel_ver = '6.10'
|
||||
|
||||
mocked_report = create_report_mocked()
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(
|
||||
msgs=rhui_msg, dst_ver=expected_rel_ver
|
||||
)
|
||||
)
|
||||
monkeypatch.setattr(reporting, 'create_report', mocked_report)
|
||||
monkeypatch.setattr(api, 'current_logger', logger_mocked())
|
||||
|
||||
checketcreleasever.process()
|
||||
|
||||
if is_rhui:
|
||||
assert reporting.create_report.called == 1
|
||||
assert expected_rel_ver in mocked_report.report_fields['summary']
|
||||
assert not api.current_logger.dbgmsg
|
||||
else:
|
||||
assert not reporting.create_report.called
|
||||
assert api.current_logger.dbgmsg
|
||||
|
||||
|
||||
def test_etc_releasever_neither(monkeypatch):
|
||||
mocked_report = create_report_mocked()
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
|
||||
monkeypatch.setattr(reporting, 'create_report', mocked_report)
|
||||
monkeypatch.setattr(api, 'current_logger', logger_mocked())
|
||||
|
||||
checketcreleasever.process()
|
||||
|
||||
assert not reporting.create_report.called
|
||||
assert api.current_logger.dbgmsg
|
||||
|
||||
|
||||
def test_etc_releasever_both(monkeypatch):
|
||||
msgs = [RHUIInfo(provider='aws'), PkgManagerInfo(etc_releasever='7.7')]
|
||||
expected_rel_ver = '6.10'
|
||||
|
||||
mocked_report = create_report_mocked()
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(
|
||||
msgs=msgs, dst_ver=expected_rel_ver
|
||||
)
|
||||
)
|
||||
monkeypatch.setattr(reporting, 'create_report', mocked_report)
|
||||
monkeypatch.setattr(api, 'current_logger', logger_mocked())
|
||||
|
||||
checketcreleasever.process()
|
||||
|
||||
assert reporting.create_report.called == 1
|
||||
assert expected_rel_ver in mocked_report.report_fields['summary']
|
||||
assert not api.current_logger.dbgmsg
|
@ -0,0 +1,55 @@
|
||||
from leapp import reporting
|
||||
from leapp.actors import Actor
|
||||
from leapp.exceptions import StopActorExecutionError
|
||||
from leapp.libraries.common.config import version
|
||||
from leapp.models import DracutModule, FIPSInfo, Report, UpgradeInitramfsTasks
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckFips(Actor):
|
||||
"""
|
||||
Inhibit upgrade if FIPS is detected as enabled.
|
||||
"""
|
||||
|
||||
name = 'check_fips'
|
||||
consumes = (FIPSInfo,)
|
||||
produces = (Report, UpgradeInitramfsTasks)
|
||||
tags = (IPUWorkflowTag, ChecksPhaseTag)
|
||||
|
||||
def process(self):
|
||||
fips_info = next(self.consume(FIPSInfo), None)
|
||||
|
||||
if not fips_info:
|
||||
raise StopActorExecutionError('Cannot check FIPS state due to not receiving necessary FIPSInfo message',
|
||||
details={'Problem': 'Did not receive a message with information about FIPS '
|
||||
'usage'})
|
||||
|
||||
if version.get_target_major_version() == '8':
|
||||
if fips_info.is_enabled:
|
||||
title = 'Automated upgrades from RHEL 7 to RHEL 8 in FIPS mode are not supported'
|
||||
summary = ('Leapp has detected that FIPS is enabled on this system. '
|
||||
'Automated in-place upgrade of RHEL 7 systems in FIPS mode is currently unsupported '
|
||||
'and manual intervention is required.')
|
||||
|
||||
fips_7to8_steps_docs_url = 'https://red.ht/planning-upgrade-to-rhel8'
|
||||
|
||||
reporting.create_report([
|
||||
reporting.Title(title),
|
||||
reporting.Summary(summary),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.SECURITY, reporting.Groups.INHIBITOR]),
|
||||
reporting.ExternalLink(url=fips_7to8_steps_docs_url,
|
||||
title='Planning an upgrade from RHEL 7 to RHEL 8')
|
||||
])
|
||||
else:
|
||||
# FIXME(mhecko): We include these files manually as they are not included automatically when the fips
|
||||
# module is used due to a bug in dracut. This code should be removed, once the dracut bug is resolved.
|
||||
# See https://bugzilla.redhat.com/show_bug.cgi?id=2176560
|
||||
if fips_info.is_enabled:
|
||||
fips_required_initramfs_files = [
|
||||
'/etc/crypto-policies/back-ends/opensslcnf.config',
|
||||
'/etc/pki/tls/openssl.cnf',
|
||||
'/usr/lib64/ossl-modules/fips.so',
|
||||
]
|
||||
self.produce(UpgradeInitramfsTasks(include_files=fips_required_initramfs_files,
|
||||
include_dracut_modules=[DracutModule(name='fips')]))
|
@ -0,0 +1,23 @@
|
||||
import pytest
|
||||
|
||||
from leapp.libraries.common.config import version
|
||||
from leapp.models import FIPSInfo, Report
|
||||
from leapp.utils.report import is_inhibitor
|
||||
|
||||
|
||||
@pytest.mark.parametrize(('fips_info', 'target_major_version', 'should_inhibit'), [
|
||||
(FIPSInfo(is_enabled=True), '8', True),
|
||||
(FIPSInfo(is_enabled=True), '9', False),
|
||||
(FIPSInfo(is_enabled=False), '8', False),
|
||||
(FIPSInfo(is_enabled=False), '9', False),
|
||||
])
|
||||
def test_check_fips(monkeypatch, current_actor_context, fips_info, target_major_version, should_inhibit):
|
||||
monkeypatch.setattr(version, 'get_target_major_version', lambda: target_major_version)
|
||||
current_actor_context.feed(fips_info)
|
||||
current_actor_context.run()
|
||||
if should_inhibit:
|
||||
output = current_actor_context.consume(Report)
|
||||
assert len(output) == 1
|
||||
assert is_inhibitor(output[0].report)
|
||||
else:
|
||||
assert not any(is_inhibitor(msg.report) for msg in current_actor_context.consume(Report))
|
@ -0,0 +1,19 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor.checkfstabmountorder import check_fstab_mount_order
|
||||
from leapp.models import StorageInfo
|
||||
from leapp.reporting import Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckFstabMountOrder(Actor):
|
||||
"""
|
||||
Checks order of entries in /etc/fstab based on their mount point and inhibits upgrade if overshadowing is detected.
|
||||
"""
|
||||
|
||||
name = "check_fstab_mount_order"
|
||||
consumes = (StorageInfo,)
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag,)
|
||||
|
||||
def process(self):
|
||||
check_fstab_mount_order()
|
@ -0,0 +1,95 @@
|
||||
import os
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import StorageInfo
|
||||
|
||||
FMT_LIST_SEPARATOR = '\n - '
|
||||
|
||||
|
||||
def _get_common_path(path1, path2):
|
||||
"""
|
||||
Return the longest common absolute sub-path for pair of given absolute paths.
|
||||
|
||||
Note that this function implements similar functionality as os.path.commonpath(), however this function is not
|
||||
available in python2.7, thus can't be used here.
|
||||
"""
|
||||
|
||||
if not path1 or not path2:
|
||||
return ''
|
||||
|
||||
path1 = path1.strip('/').split('/')
|
||||
path2 = path2.strip('/').split('/')
|
||||
|
||||
common_path = []
|
||||
for path1_part, path2_part in zip(path1, path2):
|
||||
if path1_part != path2_part:
|
||||
break
|
||||
common_path.append(path1_part)
|
||||
return os.path.join('/', *common_path)
|
||||
|
||||
|
||||
def _get_overshadowing_mount_points(mount_points):
|
||||
"""
|
||||
Retrieve set of overshadowing and overshadowed mount points.
|
||||
|
||||
:param list[str] mount_points: absolute paths to mount points without trailing /
|
||||
:returns: set of unique mount points without trailing /
|
||||
"""
|
||||
overshadowing = set()
|
||||
for i, mount_point in enumerate(mount_points):
|
||||
for overshadowing_mount_point in mount_points[i+1:]:
|
||||
if _get_common_path(mount_point, overshadowing_mount_point) == overshadowing_mount_point:
|
||||
overshadowing.add(overshadowing_mount_point)
|
||||
overshadowing.add(mount_point)
|
||||
return overshadowing
|
||||
|
||||
|
||||
def check_fstab_mount_order():
|
||||
storage_info = next(api.consume(StorageInfo), None)
|
||||
|
||||
if not storage_info:
|
||||
return
|
||||
|
||||
mount_points = []
|
||||
for fstab_entry in storage_info.fstab:
|
||||
mount_point = fstab_entry.fs_file
|
||||
if mount_point != '/':
|
||||
mount_point = mount_point.rstrip('/')
|
||||
if os.path.isabs(mount_point):
|
||||
mount_points.append(mount_point)
|
||||
|
||||
overshadowing = _get_overshadowing_mount_points(mount_points)
|
||||
duplicates = {mp for mp in mount_points if mount_points.count(mp) > 1}
|
||||
|
||||
if not overshadowing:
|
||||
return
|
||||
|
||||
overshadowing_in_order = [mp for mp in mount_points if mp in overshadowing]
|
||||
overshadowing_fixed = sorted(set(mount_points), key=len)
|
||||
summary = 'Leapp detected incorrect /etc/fstab format that causes overshadowing of mount points.'
|
||||
hint = 'To prevent the overshadowing:'
|
||||
|
||||
if duplicates:
|
||||
summary += '\nDetected mount points with duplicates: {}'.format(', '.join(duplicates))
|
||||
hint += ' Remove detected duplicates by using unique mount points.'
|
||||
|
||||
if overshadowing:
|
||||
summary += '\nDetected order of overshadowing mount points: {}'.format(', '.join(overshadowing_in_order))
|
||||
hint += (
|
||||
' Reorder the detected overshadowing entries. Possible order of all mount '
|
||||
'points without overshadowing:{}{}'
|
||||
).format(FMT_LIST_SEPARATOR, FMT_LIST_SEPARATOR.join(overshadowing_fixed))
|
||||
|
||||
reporting.create_report([
|
||||
reporting.Title(
|
||||
'Detected incorrect order of entries or duplicate entries in /etc/fstab, preventing a successful '
|
||||
'in-place upgrade.'
|
||||
),
|
||||
reporting.Summary(summary),
|
||||
reporting.Remediation(hint=hint),
|
||||
reporting.RelatedResource('file', '/etc/fstab'),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.FILESYSTEM]),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR]),
|
||||
])
|
@ -0,0 +1,89 @@
|
||||
import pytest
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.actor.checkfstabmountorder import (
|
||||
_get_common_path,
|
||||
_get_overshadowing_mount_points,
|
||||
check_fstab_mount_order
|
||||
)
|
||||
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import FstabEntry, MountEntry, StorageInfo
|
||||
|
||||
VAR_ENTRY = FstabEntry(fs_spec='', fs_file='/var', fs_vfstype='',
|
||||
fs_mntops='defaults', fs_freq='0', fs_passno='0')
|
||||
VAR_DUPLICATE_ENTRY = FstabEntry(fs_spec='', fs_file='/var/', fs_vfstype='',
|
||||
fs_mntops='defaults', fs_freq='0', fs_passno='0')
|
||||
VAR_LOG_ENTRY = FstabEntry(fs_spec='', fs_file='/var/log', fs_vfstype='',
|
||||
fs_mntops='defaults', fs_freq='0', fs_passno='0')
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('path1', 'path2', 'expected_output'),
|
||||
[
|
||||
('', '', ''),
|
||||
('/var', '/var', '/var'),
|
||||
('/var/lib/leapp', '/var/lib', '/var/lib'),
|
||||
('/var/lib/leapp', '/home', '/'),
|
||||
('/var/lib/leapp', '/var/lib/lea', '/var/lib'),
|
||||
]
|
||||
)
|
||||
def test_get_common_path(path1, path2, expected_output):
|
||||
assert _get_common_path(path1, path2) == expected_output
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('fstab_entries', 'expected_output'),
|
||||
[
|
||||
(
|
||||
['/var', '/var/log'],
|
||||
set()
|
||||
),
|
||||
(
|
||||
['/var', '/'],
|
||||
{'/var', '/'}
|
||||
),
|
||||
(
|
||||
['/var/log', '/var', '/var'],
|
||||
{'/var/log', '/var'}
|
||||
),
|
||||
(
|
||||
['/var/log', '/home', '/var', '/var/lib/leapp'],
|
||||
{'/var/log', '/var'}
|
||||
),
|
||||
(
|
||||
['/var/log', '/home', '/var/lib/leapp', '/var'],
|
||||
{'/var/log', '/var', '/var/lib/leapp'}
|
||||
),
|
||||
(
|
||||
['/var/log', '/home', '/var', '/var/lib/lea', '/var/lib/leapp'],
|
||||
{'/var/log', '/var'}
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_get_overshadowing_mount_points(fstab_entries, expected_output):
|
||||
assert _get_overshadowing_mount_points(fstab_entries) == expected_output
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('storage_info', 'should_inhibit', 'duplicates'),
|
||||
[
|
||||
(StorageInfo(fstab=[]), False, False),
|
||||
(StorageInfo(fstab=[VAR_LOG_ENTRY, VAR_ENTRY]), True, False),
|
||||
(StorageInfo(fstab=[VAR_LOG_ENTRY, VAR_ENTRY, VAR_DUPLICATE_ENTRY]), True, True),
|
||||
(StorageInfo(fstab=[VAR_ENTRY, VAR_LOG_ENTRY]), False, False),
|
||||
]
|
||||
)
|
||||
def test_var_lib_leapp_non_persistent_is_detected(monkeypatch, storage_info, should_inhibit, duplicates):
|
||||
|
||||
created_reports = create_report_mocked()
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[storage_info]))
|
||||
monkeypatch.setattr(reporting, 'create_report', created_reports)
|
||||
|
||||
check_fstab_mount_order()
|
||||
|
||||
if should_inhibit:
|
||||
assert created_reports.called == 1
|
||||
|
||||
if duplicates:
|
||||
assert 'Detected mount points with duplicates:' in created_reports.reports[-1]['summary']
|
@ -0,0 +1,56 @@
|
||||
from leapp import reporting
|
||||
from leapp.actors import Actor
|
||||
from leapp.exceptions import StopActorExecutionError
|
||||
from leapp.libraries.common.config import architecture
|
||||
from leapp.models import FirmwareFacts, GrubInfo
|
||||
from leapp.reporting import create_report, Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
GRUB_SUMMARY = ('On legacy (BIOS) systems, GRUB2 core (located in the gap between the MBR and the '
|
||||
'first partition) cannot be updated during the rpm transaction and Leapp has to initiate '
|
||||
'the update running "grub2-install" after the transaction. No action is needed before the '
|
||||
'upgrade. After the upgrade, it is recommended to check the GRUB configuration.')
|
||||
|
||||
|
||||
class CheckGrubCore(Actor):
|
||||
"""
|
||||
Check whether we are on legacy (BIOS) system and instruct Leapp to upgrade GRUB core
|
||||
"""
|
||||
|
||||
name = 'check_grub_core'
|
||||
consumes = (FirmwareFacts, GrubInfo)
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag)
|
||||
|
||||
def process(self):
|
||||
if architecture.matches_architecture(architecture.ARCH_S390X):
|
||||
# s390x archs use ZIPL instead of GRUB
|
||||
return
|
||||
|
||||
ff = next(self.consume(FirmwareFacts), None)
|
||||
if ff and ff.firmware == 'bios':
|
||||
grub_info = next(self.consume(GrubInfo), None)
|
||||
if not grub_info:
|
||||
raise StopActorExecutionError('Actor did not receive any GrubInfo message.')
|
||||
if grub_info.orig_devices:
|
||||
create_report([
|
||||
reporting.Title(
|
||||
'GRUB2 core will be automatically updated during the upgrade'
|
||||
),
|
||||
reporting.Summary(GRUB_SUMMARY),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.BOOT]),
|
||||
])
|
||||
else:
|
||||
create_report([
|
||||
reporting.Title('Leapp could not identify where GRUB2 core is located'),
|
||||
reporting.Summary(
|
||||
'We assumed GRUB2 core is located on the same device(s) as /boot, '
|
||||
'however Leapp could not detect GRUB2 on the device(s). '
|
||||
'GRUB2 core needs to be updated maually on legacy (BIOS) systems. '
|
||||
),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.BOOT]),
|
||||
reporting.Remediation(
|
||||
hint='Please run "grub2-install <GRUB_DEVICE> command manually after the upgrade'),
|
||||
])
|
@ -0,0 +1,35 @@
|
||||
from leapp.libraries.common.config import mock_configs
|
||||
from leapp.models import FirmwareFacts, GrubInfo
|
||||
from leapp.reporting import Report
|
||||
|
||||
NO_GRUB = 'Leapp could not identify where GRUB2 core is located'
|
||||
GRUB = 'GRUB2 core will be automatically updated during the upgrade'
|
||||
|
||||
|
||||
def test_actor_update_grub(current_actor_context):
|
||||
current_actor_context.feed(FirmwareFacts(firmware='bios'))
|
||||
current_actor_context.feed(GrubInfo(orig_devices=['/dev/vda', '/dev/vdb']))
|
||||
current_actor_context.run(config_model=mock_configs.CONFIG)
|
||||
assert current_actor_context.consume(Report)
|
||||
assert current_actor_context.consume(Report)[0].report['title'].startswith(GRUB)
|
||||
|
||||
|
||||
def test_actor_no_grub_device(current_actor_context):
|
||||
current_actor_context.feed(FirmwareFacts(firmware='bios'))
|
||||
current_actor_context.feed(GrubInfo())
|
||||
current_actor_context.run(config_model=mock_configs.CONFIG)
|
||||
assert current_actor_context.consume(Report)
|
||||
assert current_actor_context.consume(Report)[0].report['title'].startswith(NO_GRUB)
|
||||
|
||||
|
||||
def test_actor_with_efi(current_actor_context):
|
||||
current_actor_context.feed(FirmwareFacts(firmware='efi'))
|
||||
current_actor_context.run(config_model=mock_configs.CONFIG)
|
||||
assert not current_actor_context.consume(Report)
|
||||
|
||||
|
||||
def test_s390x(current_actor_context):
|
||||
current_actor_context.feed(FirmwareFacts(firmware='bios'))
|
||||
current_actor_context.feed(GrubInfo(orig_devices=['/dev/vda', '/dev/vdb']))
|
||||
current_actor_context.run(config_model=mock_configs.CONFIG_S390X)
|
||||
assert not current_actor_context.consume(Report)
|
@ -0,0 +1,29 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor import checkinsightsautoregister
|
||||
from leapp.models import InstalledRPM, RpmTransactionTasks
|
||||
from leapp.reporting import Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckInsightsAutoregister(Actor):
|
||||
"""
|
||||
Checks if system can be automatically registered into Red Hat Insights
|
||||
|
||||
The registration is skipped if NO_INSIGHTS_REGISTER=1 environment variable
|
||||
is set, the --no-insights-register command line argument present. if the
|
||||
system isn't registered with subscription-manager.
|
||||
|
||||
Additionally, the insights-client package is required. If it's missing an
|
||||
RpmTransactionTasks is produced to install it during the upgrade.
|
||||
|
||||
A report is produced informing about the automatic registration and
|
||||
eventual insights-client package installation.
|
||||
"""
|
||||
|
||||
name = 'check_insights_auto_register'
|
||||
consumes = (InstalledRPM,)
|
||||
produces = (Report, RpmTransactionTasks)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag)
|
||||
|
||||
def process(self):
|
||||
checkinsightsautoregister.process()
|
@ -0,0 +1,53 @@
|
||||
from leapp import reporting
|
||||
from leapp.libraries.common import rhsm
|
||||
from leapp.libraries.common.config import get_env
|
||||
from leapp.libraries.common.rpms import has_package
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import InstalledRPM, RpmTransactionTasks
|
||||
|
||||
INSIGHTS_CLIENT_PKG = "insights-client"
|
||||
|
||||
|
||||
def _ensure_package(package):
|
||||
"""
|
||||
Produce install tasks if the given package is missing
|
||||
|
||||
:return: True if the install task is produced else False
|
||||
"""
|
||||
has_client_package = has_package(InstalledRPM, package)
|
||||
if not has_client_package:
|
||||
api.produce(RpmTransactionTasks(to_install=[package]))
|
||||
|
||||
return not has_client_package
|
||||
|
||||
|
||||
def _report_registration_info(installing_client):
|
||||
pkg_msg = " The '{}' package required for the registration will be installed during the upgrade."
|
||||
|
||||
title = "Automatic registration into Red Hat Insights"
|
||||
summary = (
|
||||
"After the upgrade, this system will be automatically registered into Red Hat Insights."
|
||||
"{}"
|
||||
" To skip the automatic registration, use the '--no-insights-register' command line option or"
|
||||
" set the LEAPP_NO_INSIGHTS_REGISTER environment variable."
|
||||
).format(pkg_msg.format(INSIGHTS_CLIENT_PKG) if installing_client else "")
|
||||
|
||||
reporting.create_report(
|
||||
[
|
||||
reporting.Title(title),
|
||||
reporting.Summary(summary),
|
||||
reporting.Severity(reporting.Severity.INFO),
|
||||
reporting.Groups([reporting.Groups.SERVICES]),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def process():
|
||||
if rhsm.skip_rhsm():
|
||||
return
|
||||
|
||||
if get_env("LEAPP_NO_INSIGHTS_REGISTER", "0") == "1":
|
||||
return
|
||||
|
||||
installing_client = _ensure_package(INSIGHTS_CLIENT_PKG)
|
||||
_report_registration_info(installing_client)
|
@ -0,0 +1,80 @@
|
||||
import pytest
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.actor import checkinsightsautoregister
|
||||
from leapp.libraries.common import rhsm
|
||||
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked
|
||||
from leapp.libraries.stdlib import api
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("skip_rhsm", "no_register", "should_report"),
|
||||
[
|
||||
(False, False, True),
|
||||
(False, True, False),
|
||||
(True, False, False),
|
||||
(True, True, False),
|
||||
],
|
||||
)
|
||||
def test_should_report(monkeypatch, skip_rhsm, no_register, should_report):
|
||||
|
||||
monkeypatch.setattr(rhsm, "skip_rhsm", lambda: skip_rhsm)
|
||||
monkeypatch.setattr(
|
||||
api,
|
||||
"current_actor",
|
||||
CurrentActorMocked(
|
||||
envars={"LEAPP_NO_INSIGHTS_REGISTER": "1" if no_register else "0"}
|
||||
),
|
||||
)
|
||||
|
||||
def ensure_package_mocked(package):
|
||||
assert package == checkinsightsautoregister.INSIGHTS_CLIENT_PKG
|
||||
return False
|
||||
|
||||
monkeypatch.setattr(
|
||||
checkinsightsautoregister, "_ensure_package", ensure_package_mocked
|
||||
)
|
||||
|
||||
called = [False]
|
||||
|
||||
def _report_registration_info_mocked(_):
|
||||
called[0] = True
|
||||
|
||||
monkeypatch.setattr(
|
||||
checkinsightsautoregister,
|
||||
"_report_registration_info",
|
||||
_report_registration_info_mocked,
|
||||
)
|
||||
|
||||
checkinsightsautoregister.process()
|
||||
|
||||
assert called[0] == should_report
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"already_installed, should_install", [(True, False), (False, True)]
|
||||
)
|
||||
def test_install_task_produced(monkeypatch, already_installed, should_install):
|
||||
|
||||
def has_package_mocked(*args, **kwargs):
|
||||
return already_installed
|
||||
|
||||
monkeypatch.setattr(checkinsightsautoregister, "has_package", has_package_mocked)
|
||||
monkeypatch.setattr(api, "produce", produce_mocked())
|
||||
|
||||
checkinsightsautoregister._ensure_package(
|
||||
checkinsightsautoregister.INSIGHTS_CLIENT_PKG
|
||||
)
|
||||
|
||||
assert api.produce.called == should_install
|
||||
|
||||
|
||||
@pytest.mark.parametrize("installing_client", (True, False))
|
||||
def test_report_created(monkeypatch, installing_client):
|
||||
|
||||
created_reports = create_report_mocked()
|
||||
monkeypatch.setattr(reporting, "create_report", created_reports)
|
||||
|
||||
checkinsightsautoregister._report_registration_info(installing_client)
|
||||
|
||||
assert created_reports.called
|
@ -0,0 +1,27 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor.checkipaserver import ipa_inhibit_upgrade, ipa_warn_pkg_installed
|
||||
from leapp.models import IpaInfo
|
||||
from leapp.reporting import Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckIPAServer(Actor):
|
||||
"""
|
||||
Check for ipa-server and inhibit upgrade
|
||||
"""
|
||||
|
||||
name = "check_ipa_server"
|
||||
consumes = (IpaInfo,)
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag)
|
||||
|
||||
def process(self):
|
||||
for ipainfo in self.consume(IpaInfo):
|
||||
if ipainfo.is_server_configured:
|
||||
self.log.info(
|
||||
"IdM server instance detected, inhibit upgrade"
|
||||
)
|
||||
ipa_inhibit_upgrade(ipainfo)
|
||||
elif ipainfo.has_server_package:
|
||||
self.log.info("Unused ipa-server package detected")
|
||||
ipa_warn_pkg_installed(ipainfo)
|
@ -0,0 +1,72 @@
|
||||
from leapp import reporting
|
||||
from leapp.libraries.common.config.version import get_source_major_version
|
||||
|
||||
MIGRATION_GUIDE_7 = "https://red.ht/IdM-upgrading-RHEL-7-to-RHEL-8"
|
||||
|
||||
# TBD: update the doc url when migration guide 8->9 becomes available
|
||||
MIGRATION_GUIDE_8 = "https://red.ht/IdM-upgrading-RHEL-8-to-RHEL-9"
|
||||
MIGRATION_GUIDES = {
|
||||
'7': MIGRATION_GUIDE_7,
|
||||
'8': MIGRATION_GUIDE_8
|
||||
}
|
||||
|
||||
|
||||
def ipa_inhibit_upgrade(ipainfo):
|
||||
"""
|
||||
Create upgrade inhibitor for configured ipa-server
|
||||
"""
|
||||
entries = [
|
||||
reporting.Title(
|
||||
"ipa-server does not support in-place upgrade"
|
||||
),
|
||||
reporting.Summary(
|
||||
"An IdM server installation was detected on the system. IdM "
|
||||
"does not support in-place upgrade."
|
||||
),
|
||||
reporting.Remediation(
|
||||
hint="Follow the IdM RHEL migration guide lines."
|
||||
),
|
||||
reporting.ExternalLink(
|
||||
url=MIGRATION_GUIDES[get_source_major_version()],
|
||||
title="IdM migration guide",
|
||||
),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR]),
|
||||
reporting.Groups([reporting.Groups.SERVICES]),
|
||||
reporting.RelatedResource("package", "ipa-server"),
|
||||
]
|
||||
return reporting.create_report(entries)
|
||||
|
||||
|
||||
def ipa_warn_pkg_installed(ipainfo):
|
||||
"""
|
||||
Warn that unused ipa-server package is installed
|
||||
"""
|
||||
if ipainfo.is_client_configured:
|
||||
summary = (
|
||||
"The ipa-server package is installed but only IdM client is "
|
||||
"configured on this system."
|
||||
)
|
||||
else:
|
||||
summary = (
|
||||
"The ipa-server package is installed but neither IdM server "
|
||||
"nor client is configured on this system."
|
||||
)
|
||||
entries = [
|
||||
reporting.Title(
|
||||
"ipa-server package is installed but no IdM is configured"
|
||||
),
|
||||
reporting.Summary(summary),
|
||||
reporting.Remediation(
|
||||
hint="Remove unused ipa-server package",
|
||||
commands=[["yum", "remove", "-y", "ipa-server"]],
|
||||
),
|
||||
reporting.ExternalLink(
|
||||
url=MIGRATION_GUIDES[get_source_major_version()],
|
||||
title="Migrating IdM from RHEL 7 to 8",
|
||||
),
|
||||
reporting.Severity(reporting.Severity.MEDIUM),
|
||||
reporting.Groups([reporting.Groups.SERVICES]),
|
||||
reporting.RelatedResource("package", "ipa-server"),
|
||||
]
|
||||
return reporting.create_report(entries)
|
@ -0,0 +1,58 @@
|
||||
import pytest
|
||||
|
||||
from leapp.libraries.common.config import version
|
||||
from leapp.models import IpaInfo
|
||||
from leapp.reporting import Report
|
||||
from leapp.snactor.fixture import current_actor_context
|
||||
from leapp.utils.report import is_inhibitor
|
||||
|
||||
|
||||
def mock_ipa_info(client, server_pkg, server_configured):
|
||||
return IpaInfo(
|
||||
has_client_package=client,
|
||||
is_client_configured=client,
|
||||
has_server_package=server_pkg,
|
||||
is_server_configured=server_configured,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('src_v', ['7', '8'])
|
||||
def test_inhibit_ipa_configured(monkeypatch, current_actor_context, src_v):
|
||||
monkeypatch.setattr(version, "get_source_major_version", lambda: src_v)
|
||||
current_actor_context.feed(mock_ipa_info(True, True, True))
|
||||
current_actor_context.run()
|
||||
reports = current_actor_context.consume(Report)
|
||||
|
||||
assert len(reports) == 1
|
||||
fields = reports[0].report
|
||||
assert is_inhibitor(fields)
|
||||
assert "ipa-server" in fields["title"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('src_v', ['7', '8'])
|
||||
def test_warn_server_pkg(monkeypatch, current_actor_context, src_v):
|
||||
monkeypatch.setattr(version, "get_source_major_version", lambda: src_v)
|
||||
current_actor_context.feed(mock_ipa_info(True, True, False))
|
||||
current_actor_context.run()
|
||||
reports = current_actor_context.consume(Report)
|
||||
|
||||
assert len(reports) == 1
|
||||
fields = reports[0].report
|
||||
assert not is_inhibitor(fields)
|
||||
assert "ipa-server" in fields["title"]
|
||||
|
||||
|
||||
def test_client_only(current_actor_context):
|
||||
current_actor_context.feed(mock_ipa_info(True, False, False))
|
||||
current_actor_context.run()
|
||||
reports = current_actor_context.consume(Report)
|
||||
|
||||
assert not reports
|
||||
|
||||
|
||||
def test_no_ipa(current_actor_context):
|
||||
current_actor_context.feed(mock_ipa_info(False, False, False))
|
||||
current_actor_context.run()
|
||||
reports = current_actor_context.consume(Report)
|
||||
|
||||
assert not reports
|
@ -0,0 +1,20 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor import checkmemory
|
||||
from leapp.models import MemoryInfo, Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckMemory(Actor):
|
||||
"""
|
||||
The actor check the size of RAM against RHEL8 minimal hardware requirements
|
||||
|
||||
Using the following resource: https://access.redhat.com/articles/rhel-limits
|
||||
"""
|
||||
|
||||
name = 'checkmemory'
|
||||
consumes = (MemoryInfo,)
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag)
|
||||
|
||||
def process(self):
|
||||
checkmemory.process()
|
@ -0,0 +1,49 @@
|
||||
from leapp import reporting
|
||||
from leapp.exceptions import StopActorExecutionError
|
||||
from leapp.libraries.common.config import architecture, version
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import MemoryInfo
|
||||
|
||||
min_req_memory = {
|
||||
architecture.ARCH_X86_64: 1572864, # 1.5G
|
||||
architecture.ARCH_ARM64: 1572864, # 1.5G
|
||||
architecture.ARCH_PPC64LE: 3145728, # 3G
|
||||
architecture.ARCH_S390X: 1572864, # 1.5G
|
||||
}
|
||||
|
||||
|
||||
def _check_memory(mem_info):
|
||||
msg = {}
|
||||
|
||||
for arch, min_req in iter(min_req_memory.items()):
|
||||
if architecture.matches_architecture(arch):
|
||||
is_ok = mem_info.mem_total >= min_req
|
||||
msg = {} if is_ok else {'detected': mem_info.mem_total,
|
||||
'minimal_req': min_req}
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
def process():
|
||||
memoryinfo = next(api.consume(MemoryInfo), None)
|
||||
if memoryinfo is None:
|
||||
raise StopActorExecutionError(message="Missing information about Memory.")
|
||||
|
||||
minimum_req_error = _check_memory(memoryinfo)
|
||||
|
||||
if minimum_req_error:
|
||||
title = 'Minimum memory requirements for RHEL {} are not met'.format(version.get_target_major_version())
|
||||
summary = 'Memory detected: {} MiB, required: {} MiB'.format(
|
||||
int(minimum_req_error['detected'] / 1024), # noqa: W1619; pylint: disable=old-division
|
||||
int(minimum_req_error['minimal_req'] / 1024), # noqa: W1619; pylint: disable=old-division
|
||||
)
|
||||
reporting.create_report([
|
||||
reporting.Title(title),
|
||||
reporting.Summary(summary),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.SANITY, reporting.Groups.INHIBITOR]),
|
||||
reporting.ExternalLink(
|
||||
url='https://access.redhat.com/articles/rhel-limits',
|
||||
title='Red Hat Enterprise Linux Technology Capabilities and Limits'
|
||||
),
|
||||
])
|
@ -0,0 +1,31 @@
|
||||
from leapp import reporting
|
||||
from leapp.libraries.actor import checkmemory
|
||||
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import MemoryInfo
|
||||
from leapp.utils.report import is_inhibitor
|
||||
|
||||
|
||||
def test_check_memory_low(monkeypatch):
|
||||
minimum_req_error = []
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
|
||||
minimum_req_error = checkmemory._check_memory(MemoryInfo(mem_total=1024))
|
||||
assert minimum_req_error
|
||||
|
||||
|
||||
def test_check_memory_high(monkeypatch):
|
||||
minimum_req_error = []
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
|
||||
minimum_req_error = checkmemory._check_memory(MemoryInfo(mem_total=16273492))
|
||||
assert not minimum_req_error
|
||||
|
||||
|
||||
def test_report(monkeypatch):
|
||||
title_msg = 'Minimum memory requirements for RHEL 8 are not met'
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())
|
||||
monkeypatch.setattr(api, 'consume', lambda x: iter([MemoryInfo(mem_total=129)]))
|
||||
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
|
||||
checkmemory.process()
|
||||
assert reporting.create_report.called
|
||||
assert title_msg == reporting.create_report.report_fields['title']
|
||||
assert is_inhibitor(reporting.create_report.report_fields)
|
@ -0,0 +1,21 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor.checkmountoptions import check_mount_options
|
||||
from leapp.models import StorageInfo
|
||||
from leapp.reporting import Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckMountOptions(Actor):
|
||||
"""
|
||||
Check for mount options preventing the upgrade.
|
||||
|
||||
Checks performed:
|
||||
- /var is mounted with the noexec option
|
||||
"""
|
||||
name = "check_mount_options"
|
||||
consumes = (StorageInfo,)
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag,)
|
||||
|
||||
def process(self):
|
||||
check_mount_options()
|
@ -0,0 +1,75 @@
|
||||
from leapp import reporting
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import StorageInfo
|
||||
|
||||
|
||||
def inhibit_upgrade_due_var_with_noexec(mountpoint, found_in_fstab=False):
|
||||
summary = (
|
||||
'Leapp detected that the {0} mountpoint is mounted with the "noexec" option, '
|
||||
'which prevents binaries necessary for the upgrade from being executed. '
|
||||
'The upgrade process cannot continue with {0} mounted using the "noexec" option.'
|
||||
)
|
||||
|
||||
if found_in_fstab:
|
||||
hint = (
|
||||
'Temporarily remove the "noexec" option from {0} entry in /etc/fstab until the system is upgraded, '
|
||||
'and remount the partition without the "noexec" option.'
|
||||
)
|
||||
related_resource = [reporting.RelatedResource('file', '/etc/fstab')]
|
||||
else:
|
||||
hint = (
|
||||
'Remount {0} without the noexec option and make sure the change is persistent'
|
||||
'during the entire in-place upgrade process.'
|
||||
)
|
||||
related_resource = []
|
||||
|
||||
reporting.create_report([
|
||||
reporting.Title(
|
||||
'Detected partitions mounted with the "noexec" option, preventing a successful in-place upgrade.'
|
||||
),
|
||||
reporting.Summary(summary.format(mountpoint)),
|
||||
reporting.Remediation(hint=hint.format(mountpoint)),
|
||||
reporting.RelatedResource('file', '/etc/fstab'),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([reporting.Groups.FILESYSTEM]),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR]),
|
||||
] + related_resource)
|
||||
|
||||
|
||||
def find_mount_entry_with_mountpoint(mount_entries, mountpoint):
|
||||
for mount_entry in mount_entries:
|
||||
if mount_entry.mount == mountpoint:
|
||||
return mount_entry
|
||||
return None
|
||||
|
||||
|
||||
def find_fstab_entry_with_mountpoint(fstab_entries, mountpoint):
|
||||
for fstab_entry in fstab_entries:
|
||||
if fstab_entry.fs_file == mountpoint:
|
||||
return fstab_entry
|
||||
return None
|
||||
|
||||
|
||||
def check_noexec_on_var(storage_info):
|
||||
"""Check for /var or /var/lib being mounted with noexec mount option."""
|
||||
|
||||
# Order of checking is important as mount options on /var/lib override those on /var
|
||||
mountpoints_to_check = ('/var/lib/leapp', '/var/lib', '/var')
|
||||
for mountpoint in mountpoints_to_check:
|
||||
fstab_entry = find_fstab_entry_with_mountpoint(storage_info.fstab, mountpoint)
|
||||
if fstab_entry and 'noexec' in fstab_entry.fs_mntops.split(','):
|
||||
inhibit_upgrade_due_var_with_noexec(fstab_entry.fs_file, found_in_fstab=True)
|
||||
return # Do not check further as present mounts would likely reflect fstab
|
||||
|
||||
# Make sure present mountpoints don't contain noexec as well - user might have fixed noexec in fstab
|
||||
# but did not remount the partition, or, less likely, mounted the partition without creating a fstab entry
|
||||
for mountpoint in mountpoints_to_check:
|
||||
mount_entry = find_mount_entry_with_mountpoint(storage_info.mount, mountpoint)
|
||||
if mount_entry and 'noexec' in mount_entry.options.split(','):
|
||||
inhibit_upgrade_due_var_with_noexec(mount_entry.mount, found_in_fstab=False)
|
||||
return
|
||||
|
||||
|
||||
def check_mount_options():
|
||||
for storage_info in api.consume(StorageInfo):
|
||||
check_noexec_on_var(storage_info)
|
@ -0,0 +1,61 @@
|
||||
from collections import namedtuple
|
||||
from functools import partial
|
||||
|
||||
import pytest
|
||||
|
||||
from leapp import reporting
|
||||
from leapp.libraries.actor.checkmountoptions import check_mount_options
|
||||
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
|
||||
from leapp.libraries.stdlib import api
|
||||
from leapp.models import FstabEntry, MountEntry, StorageInfo
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('fstab_entries', 'mounts', 'should_inhibit'),
|
||||
[
|
||||
(
|
||||
(('/var', 'default'), ),
|
||||
(('/var', 'default'), ),
|
||||
False
|
||||
),
|
||||
(
|
||||
(('/var', 'default'), ('/var/lib', 'default'), ),
|
||||
(('/var', 'default'), ('/var/lib', 'default'), ),
|
||||
False
|
||||
),
|
||||
(
|
||||
(('/var', 'default'), ('/var/lib/leapp', 'noexec')),
|
||||
(('/var', 'default'), ('/var/lib/leapp', 'noexec')),
|
||||
True
|
||||
),
|
||||
(
|
||||
(('/var', 'defaults'), ('/var/lib', 'noexec')),
|
||||
(('/var', 'noexec'), ('/var/lib', 'noexec')),
|
||||
True
|
||||
),
|
||||
(
|
||||
(('/var', 'noexec'), ('/var/lib', 'defaults')),
|
||||
(('/var', 'noexec'), ('/var/lib', 'noexec')),
|
||||
True
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_var_mounted_with_noexec_is_detected(monkeypatch, fstab_entries, mounts, should_inhibit):
|
||||
mounts = [
|
||||
MountEntry(name='/dev/sdaX', tp='ext4', mount=mountpoint, options=options) for mountpoint, options in mounts
|
||||
]
|
||||
|
||||
fstab_entries = [
|
||||
FstabEntry(fs_spec='', fs_file=mountpoint, fs_vfstype='',
|
||||
fs_mntops=opts, fs_freq='0', fs_passno='0') for mountpoint, opts in fstab_entries
|
||||
]
|
||||
|
||||
storage_info = StorageInfo(mount=mounts, fstab=fstab_entries)
|
||||
|
||||
created_reports = create_report_mocked()
|
||||
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[storage_info]))
|
||||
monkeypatch.setattr(reporting, 'create_report', created_reports)
|
||||
|
||||
check_mount_options()
|
||||
|
||||
assert bool(created_reports.called) == should_inhibit
|
@ -0,0 +1,77 @@
|
||||
from leapp import reporting
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.common.config import get_env
|
||||
from leapp.models import StorageInfo
|
||||
from leapp.reporting import create_report, Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckNfs(Actor):
|
||||
"""
|
||||
Check if NFS filesystem is in use. If yes, inhibit the upgrade process.
|
||||
|
||||
Actor looks for NFS in the following sources: /ets/fstab, mount and systemd-mount.
|
||||
If there is NFS in any of the mentioned sources, actors inhibits the upgrade.
|
||||
"""
|
||||
name = "check_nfs"
|
||||
consumes = (StorageInfo,)
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag,)
|
||||
|
||||
def process(self):
|
||||
# if network in initramfs is enabled NFS inhibitors are redundant
|
||||
if get_env('LEAPP_DEVEL_INITRAM_NETWORK', None):
|
||||
return
|
||||
details = "NFS is currently not supported by the inplace upgrade.\n" \
|
||||
"We have found NFS usage at the following locations:\n"
|
||||
|
||||
def _is_nfs(a_type):
|
||||
return a_type.startswith('nfs') and a_type != 'nfsd'
|
||||
|
||||
for storage in self.consume(StorageInfo):
|
||||
# Check fstab
|
||||
fstab_nfs_mounts = []
|
||||
for fstab in storage.fstab:
|
||||
if _is_nfs(fstab.fs_vfstype):
|
||||
fstab_nfs_mounts.append(" - {} {}\n".format(fstab.fs_spec, fstab.fs_file))
|
||||
|
||||
# Check mount
|
||||
nfs_mounts = []
|
||||
for mount in storage.mount:
|
||||
if _is_nfs(mount.tp):
|
||||
nfs_mounts.append(" - {} {}\n".format(mount.name, mount.mount))
|
||||
|
||||
# Check systemd-mount
|
||||
systemd_nfs_mounts = []
|
||||
for systemdmount in storage.systemdmount:
|
||||
if _is_nfs(systemdmount.fs_type):
|
||||
# mountpoint is not available in the model
|
||||
systemd_nfs_mounts.append(" - {}\n".format(systemdmount.node))
|
||||
|
||||
if any((fstab_nfs_mounts, nfs_mounts, systemd_nfs_mounts)):
|
||||
if fstab_nfs_mounts:
|
||||
details += "- NFS shares found in /etc/fstab:\n"
|
||||
details += ''.join(fstab_nfs_mounts)
|
||||
|
||||
if nfs_mounts:
|
||||
details += "- NFS shares currently mounted:\n"
|
||||
details += ''.join(nfs_mounts)
|
||||
|
||||
if systemd_nfs_mounts:
|
||||
details += "- NFS mounts configured with systemd-mount:\n"
|
||||
details += ''.join(systemd_nfs_mounts)
|
||||
|
||||
fstab_related_resource = [reporting.RelatedResource('file', '/etc/fstab')] if fstab_nfs_mounts else []
|
||||
|
||||
create_report([
|
||||
reporting.Title("Use of NFS detected. Upgrade can't proceed"),
|
||||
reporting.Summary(details),
|
||||
reporting.Severity(reporting.Severity.HIGH),
|
||||
reporting.Groups([
|
||||
reporting.Groups.FILESYSTEM,
|
||||
reporting.Groups.NETWORK
|
||||
]),
|
||||
reporting.Remediation(hint='Disable NFS temporarily for the upgrade if possible.'),
|
||||
reporting.Groups([reporting.Groups.INHIBITOR]),
|
||||
] + fstab_related_resource
|
||||
)
|
@ -0,0 +1,123 @@
|
||||
import pytest
|
||||
|
||||
from leapp.libraries.common import config
|
||||
from leapp.models import FstabEntry, MountEntry, StorageInfo, SystemdMountEntry
|
||||
from leapp.reporting import Report
|
||||
from leapp.snactor.fixture import current_actor_context
|
||||
from leapp.utils.report import is_inhibitor
|
||||
|
||||
|
||||
@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4'))
|
||||
def test_actor_with_systemdmount_entry(current_actor_context, nfs_fstype, monkeypatch):
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
|
||||
with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
|
||||
wwn="n/a", fs_type=nfs_fstype, label="n/a",
|
||||
uuid="n/a")]
|
||||
current_actor_context.feed(StorageInfo(systemdmount=with_systemdmount_entry))
|
||||
current_actor_context.run()
|
||||
report_fields = current_actor_context.consume(Report)[0].report
|
||||
assert is_inhibitor(report_fields)
|
||||
|
||||
|
||||
def test_actor_without_systemdmount_entry(current_actor_context, monkeypatch):
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
|
||||
without_systemdmount_entry = [SystemdMountEntry(node="/dev/sda1",
|
||||
path="pci-0000:00:17.0-ata-2",
|
||||
model="TOSHIBA_THNSNJ512GDNU_A",
|
||||
wwn="0x500080d9108e8753",
|
||||
fs_type="ext4", label="n/a",
|
||||
uuid="5675d309-eff7-4eb1-9c27-58bc5880ec72")]
|
||||
current_actor_context.feed(StorageInfo(systemdmount=without_systemdmount_entry))
|
||||
current_actor_context.run()
|
||||
assert not current_actor_context.consume(Report)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4'))
|
||||
def test_actor_with_fstab_entry(current_actor_context, nfs_fstype, monkeypatch):
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
|
||||
with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data",
|
||||
fs_vfstype=nfs_fstype,
|
||||
fs_mntops="noauto,noatime,rsize=32768,wsize=32768",
|
||||
fs_freq="0", fs_passno="0")]
|
||||
current_actor_context.feed(StorageInfo(fstab=with_fstab_entry))
|
||||
current_actor_context.run()
|
||||
report_fields = current_actor_context.consume(Report)[0].report
|
||||
assert is_inhibitor(report_fields)
|
||||
|
||||
|
||||
def test_actor_without_fstab_entry(current_actor_context, monkeypatch):
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
|
||||
without_fstab_entry = [FstabEntry(fs_spec="/dev/mapper/fedora-home", fs_file="/home",
|
||||
fs_vfstype="ext4",
|
||||
fs_mntops="defaults,x-systemd.device-timeout=0",
|
||||
fs_freq="1", fs_passno="2")]
|
||||
current_actor_context.feed(StorageInfo(fstab=without_fstab_entry))
|
||||
current_actor_context.run()
|
||||
assert not current_actor_context.consume(Report)
|
||||
|
||||
|
||||
def test_actor_with_nfsd(current_actor_context, monkeypatch):
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
|
||||
with_nfsd = [MountEntry(name="nfsd", mount="/proc/fs/nfsd", tp="nfsd", options="rw,relatime")]
|
||||
current_actor_context.feed(StorageInfo(mount=with_nfsd))
|
||||
current_actor_context.run()
|
||||
assert not current_actor_context.consume(Report)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('nfs_fstype', ('nfs', 'nfs4'))
|
||||
def test_actor_with_mount_share(current_actor_context, nfs_fstype, monkeypatch):
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
|
||||
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp=nfs_fstype,
|
||||
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
|
||||
current_actor_context.feed(StorageInfo(mount=with_mount_share))
|
||||
current_actor_context.run()
|
||||
report_fields = current_actor_context.consume(Report)[0].report
|
||||
assert is_inhibitor(report_fields)
|
||||
|
||||
|
||||
def test_actor_without_mount_share(current_actor_context, monkeypatch):
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: y)
|
||||
without_mount_share = [MountEntry(name="tmpfs", mount="/run/snapd/ns", tp="tmpfs",
|
||||
options="rw,nosuid,nodev,seclabel,mode=755")]
|
||||
current_actor_context.feed(StorageInfo(mount=without_mount_share))
|
||||
current_actor_context.run()
|
||||
assert not current_actor_context.consume(Report)
|
||||
|
||||
|
||||
def test_actor_skipped_if_initram_network_enabled(current_actor_context, monkeypatch):
|
||||
"""Check that previous inhibitors are not stopping the upgrade in case env var is set"""
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: 'network-manager' if x == 'LEAPP_DEVEL_INITRAM_NETWORK' else y)
|
||||
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
|
||||
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
|
||||
with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
|
||||
wwn="n/a", fs_type='nfs', label="n/a",
|
||||
uuid="n/a")]
|
||||
with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data",
|
||||
fs_vfstype='nfs',
|
||||
fs_mntops="noauto,noatime,rsize=32768,wsize=32768",
|
||||
fs_freq="0", fs_passno="0")]
|
||||
current_actor_context.feed(StorageInfo(mount=with_mount_share,
|
||||
systemdmount=with_systemdmount_entry,
|
||||
fstab=with_fstab_entry))
|
||||
current_actor_context.run()
|
||||
assert not current_actor_context.consume(Report)
|
||||
|
||||
|
||||
def test_actor_not_skipped_if_initram_network_empty(current_actor_context, monkeypatch):
|
||||
"""Check that previous inhibitors are not stopping the upgrade in case env var is set"""
|
||||
monkeypatch.setattr(config, 'get_env', lambda x, y: '' if x == 'LEAPP_DEVEL_INITRAM_NETWORK' else y)
|
||||
with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs',
|
||||
options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")]
|
||||
with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a",
|
||||
wwn="n/a", fs_type='nfs', label="n/a",
|
||||
uuid="n/a")]
|
||||
with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data",
|
||||
fs_vfstype='nfs',
|
||||
fs_mntops="noauto,noatime,rsize=32768,wsize=32768",
|
||||
fs_freq="0", fs_passno="0")]
|
||||
current_actor_context.feed(StorageInfo(mount=with_mount_share,
|
||||
systemdmount=with_systemdmount_entry,
|
||||
fstab=with_fstab_entry))
|
||||
current_actor_context.run()
|
||||
report_fields = current_actor_context.consume(Report)[0].report
|
||||
assert is_inhibitor(report_fields)
|
@ -0,0 +1,21 @@
|
||||
from leapp.actors import Actor
|
||||
from leapp.libraries.actor.checkosrelease import check_os_version, skip_check
|
||||
from leapp.reporting import Report
|
||||
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
|
||||
|
||||
|
||||
class CheckOSRelease(Actor):
|
||||
"""
|
||||
Check if the current RHEL minor version is supported. If not, inhibit the upgrade process.
|
||||
|
||||
This check can be skipped by using the LEAPP_DEVEL_SKIP_CHECK_OS_RELEASE environment variable.
|
||||
"""
|
||||
|
||||
name = 'check_os_release'
|
||||
consumes = ()
|
||||
produces = (Report,)
|
||||
tags = (ChecksPhaseTag, IPUWorkflowTag)
|
||||
|
||||
def process(self):
|
||||
if not skip_check():
|
||||
check_os_version()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue