From f1d425f5598f575a9f132a37acebc9c5644aa936 Mon Sep 17 00:00:00 2001 From: Andrew Price Date: Tue, 5 Jul 2022 13:50:09 +0100 Subject: [PATCH] feat: gfs2 role to manage GFS2 clustered filesystems A basic working gfs2 ansible role. Signed-off-by: Rich Megginson --- .ansible-lint | 24 ++ .commitlintrc.js | 141 ++++++++++ .github/dependabot.yml | 9 + .github/pull_request_template.md | 7 + .github/workflows/ansible-lint.yml | 49 ++++ .../workflows/ansible-managed-var-comment.yml | 38 +++ .github/workflows/ansible-plugin-scan.yml | 38 +++ .github/workflows/ansible-test.yml | 47 ++++ .github/workflows/build_docs.yml | 104 +++++++ .github/workflows/changelog_to_tag.yml | 91 ++++++ .github/workflows/codeql.yml | 48 ++++ .github/workflows/markdownlint.yml | 38 +++ .github/workflows/pr-title-lint.yml | 31 +++ .github/workflows/python-unit-test.yml | 86 ++++++ .github/workflows/test_converting_readme.yml | 46 +++ .github/workflows/weekly_ci.yml | 84 ++++++ .github/workflows/woke.yml | 19 ++ .gitignore | 15 + .markdownlint.yaml | 261 ++++++++++++++++++ .ostree/README.md | 3 + .ostree/get_ostree_data.sh | 132 +++++++++ .ostree/packages-runtime.txt | 3 + .pandoc_template.html5 | 166 +++++++++++ .yamllint.yml | 5 + .yamllint_defaults.yml | 16 ++ LICENSE | 2 +- README-ansible.md | 5 + README-ostree.md | 66 +++++ README.md | 159 +++++++++++ ansible_pytest_extra_requirements.txt | 6 + contributing.md | 52 ++++ defaults/main.yml | 23 ++ meta/argument_specs.yml | 138 +++++++++ meta/collection-requirements.yml | 5 + meta/main.yml | 26 ++ pylint_extra_requirements.txt | 4 + tasks/enable-repositories/CentOS.yml | 4 + tasks/enable-repositories/Fedora.yml | 4 + tasks/enable-repositories/RedHat.yml | 15 + tasks/fs.yml | 192 +++++++++++++ tasks/install-packages.yml | 53 ++++ tasks/main.yml | 22 ++ tasks/setup-cluster.yml | 106 +++++++ tests/.fmf/version | 1 + tests/get_unused_disk.yml | 11 + tests/library/find_unused_disk.py | 225 +++++++++++++++ tests/module_utils/size.py | 166 +++++++++++ tests/provision.fmf | 7 + tests/roles/linux-system-roles.gfs2/defaults | 1 + tests/roles/linux-system-roles.gfs2/meta | 1 + tests/roles/linux-system-roles.gfs2/tasks | 1 + tests/roles/linux-system-roles.gfs2/vars | 1 + tests/tests_basic_gfs2_cluster.yml | 122 ++++++++ tests/tests_default.yml | 11 + tox.ini | 11 + vars/RedHat_8.yml | 5 + vars/RedHat_9.yml | 5 + vars/main.yml | 18 ++ 58 files changed, 2968 insertions(+), 1 deletion(-) create mode 100644 .ansible-lint create mode 100644 .commitlintrc.js create mode 100644 .github/dependabot.yml create mode 100644 .github/pull_request_template.md create mode 100644 .github/workflows/ansible-lint.yml create mode 100644 .github/workflows/ansible-managed-var-comment.yml create mode 100644 .github/workflows/ansible-plugin-scan.yml create mode 100644 .github/workflows/ansible-test.yml create mode 100644 .github/workflows/build_docs.yml create mode 100644 .github/workflows/changelog_to_tag.yml create mode 100644 .github/workflows/codeql.yml create mode 100644 .github/workflows/markdownlint.yml create mode 100644 .github/workflows/pr-title-lint.yml create mode 100644 .github/workflows/python-unit-test.yml create mode 100644 .github/workflows/test_converting_readme.yml create mode 100644 .github/workflows/weekly_ci.yml create mode 100644 .github/workflows/woke.yml create mode 100644 .gitignore create mode 100644 .markdownlint.yaml create mode 100644 .ostree/README.md create mode 100755 .ostree/get_ostree_data.sh create mode 100644 .ostree/packages-runtime.txt create mode 100644 .pandoc_template.html5 create mode 100644 .yamllint.yml create mode 100644 .yamllint_defaults.yml create mode 100644 README-ansible.md create mode 100644 README-ostree.md create mode 100644 README.md create mode 100644 ansible_pytest_extra_requirements.txt create mode 100644 contributing.md create mode 100644 defaults/main.yml create mode 100644 meta/argument_specs.yml create mode 100644 meta/collection-requirements.yml create mode 100644 meta/main.yml create mode 100644 pylint_extra_requirements.txt create mode 100644 tasks/enable-repositories/CentOS.yml create mode 100644 tasks/enable-repositories/Fedora.yml create mode 100644 tasks/enable-repositories/RedHat.yml create mode 100644 tasks/fs.yml create mode 100644 tasks/install-packages.yml create mode 100644 tasks/main.yml create mode 100644 tasks/setup-cluster.yml create mode 100644 tests/.fmf/version create mode 100644 tests/get_unused_disk.yml create mode 100644 tests/library/find_unused_disk.py create mode 100644 tests/module_utils/size.py create mode 100644 tests/provision.fmf create mode 120000 tests/roles/linux-system-roles.gfs2/defaults create mode 120000 tests/roles/linux-system-roles.gfs2/meta create mode 120000 tests/roles/linux-system-roles.gfs2/tasks create mode 120000 tests/roles/linux-system-roles.gfs2/vars create mode 100644 tests/tests_basic_gfs2_cluster.yml create mode 100644 tests/tests_default.yml create mode 100644 tox.ini create mode 100644 vars/RedHat_8.yml create mode 100644 vars/RedHat_9.yml create mode 100644 vars/main.yml diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 0000000..b862d9b --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,24 @@ +--- +profile: production +kinds: + - yaml: "**/meta/collection-requirements.yml" + - playbook: "**/tests/get_coverage.yml" + - yaml: "**/tests/collection-requirements.yml" + - playbook: "**/tests/tests_*.yml" + - playbook: "**/tests/setup-snapshot.yml" + - tasks: "**/tests/*.yml" + - playbook: "**/tests/playbooks/*.yml" + - tasks: "**/tests/tasks/*.yml" + - tasks: "**/tests/tasks/*/*.yml" + - vars: "**/tests/vars/*.yml" + - playbook: "**/examples/*.yml" +skip_list: + - fqcn-builtins + - var-naming[no-role-prefix] +exclude_paths: + - tests/roles/ + - .github/ + - .markdownlint.yaml + - examples/roles/ +mock_roles: + - linux-system-roles.gfs2 diff --git a/.commitlintrc.js b/.commitlintrc.js new file mode 100644 index 0000000..f8a39ba --- /dev/null +++ b/.commitlintrc.js @@ -0,0 +1,141 @@ +module.exports = { + parserPreset: 'conventional-changelog-conventionalcommits', + rules: { + 'body-leading-blank': [1, 'always'], + 'body-max-line-length': [2, 'always', 100], + 'footer-leading-blank': [1, 'always'], + 'footer-max-line-length': [2, 'always', 100], + 'header-max-length': [2, 'always', 100], + 'subject-case': [ + 2, + 'never', + ['start-case', 'pascal-case', 'upper-case'], + ], + 'subject-empty': [2, 'never'], + 'subject-full-stop': [2, 'never', '.'], + 'type-case': [2, 'always', 'lower-case'], + 'type-empty': [2, 'never'], + 'type-enum': [ + 2, + 'always', + [ + 'build', + 'chore', + 'ci', + 'docs', + 'feat', + 'fix', + 'perf', + 'refactor', + 'revert', + 'style', + 'test', + 'tests', + ], + ], + }, + prompt: { + questions: { + type: { + description: "Select the type of change that you're committing", + enum: { + feat: { + description: 'A new feature', + title: 'Features', + emoji: '✨', + }, + fix: { + description: 'A bug fix', + title: 'Bug Fixes', + emoji: '🐛', + }, + docs: { + description: 'Documentation only changes', + title: 'Documentation', + emoji: '📚', + }, + style: { + description: + 'Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc)', + title: 'Styles', + emoji: '💎', + }, + refactor: { + description: + 'A code change that neither fixes a bug nor adds a feature', + title: 'Code Refactoring', + emoji: '📦', + }, + perf: { + description: 'A code change that improves performance', + title: 'Performance Improvements', + emoji: '🚀', + }, + test: { + description: 'Adding missing tests or correcting existing tests', + title: 'Tests', + emoji: '🚨', + }, + tests: { + description: 'Adding missing tests or correcting existing tests', + title: 'Tests', + emoji: '🚨', + }, + build: { + description: + 'Changes that affect the build system or external dependencies (example scopes: gulp, broccoli, npm)', + title: 'Builds', + emoji: '🛠', + }, + ci: { + description: + 'Changes to our CI configuration files and scripts (example scopes: Travis, Circle, BrowserStack, SauceLabs)', + title: 'Continuous Integrations', + emoji: '⚙️', + }, + chore: { + description: "Other changes that don't modify src or test files", + title: 'Chores', + emoji: '♻️', + }, + revert: { + description: 'Reverts a previous commit', + title: 'Reverts', + emoji: '🗑', + }, + }, + }, + scope: { + description: + 'What is the scope of this change (e.g. component or file name)', + }, + subject: { + description: + 'Write a short, imperative tense description of the change', + }, + body: { + description: 'Provide a longer description of the change', + }, + isBreaking: { + description: 'Are there any breaking changes?', + }, + breakingBody: { + description: + 'A BREAKING CHANGE commit requires a body. Please enter a longer description of the commit itself', + }, + breaking: { + description: 'Describe the breaking changes', + }, + isIssueAffected: { + description: 'Does this change affect any open issues?', + }, + issuesBody: { + description: + 'If issues are closed, the commit requires a body. Please enter a longer description of the commit itself', + }, + issues: { + description: 'Add issue references (e.g. "fix #123", "re #123".)', + }, + }, + }, +}; diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..7212528 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,9 @@ +--- +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: monthly + commit-message: + prefix: ci diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..dabc3e7 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,7 @@ +Enhancement: + +Reason: + +Result: + +Issue Tracker Tickets (Jira or BZ if any): diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml new file mode 100644 index 0000000..53ab0b0 --- /dev/null +++ b/.github/workflows/ansible-lint.yml @@ -0,0 +1,49 @@ +--- +name: Ansible Lint +on: # yamllint disable-line rule:truthy + pull_request: + merge_group: + branches: + - main + types: + - checks_requested + push: + branches: + - main + workflow_dispatch: +env: + LSR_ROLE2COLL_NAMESPACE: fedora + LSR_ROLE2COLL_NAME: linux_system_roles +permissions: + contents: read +jobs: + ansible_lint: + runs-on: ubuntu-latest + steps: + - name: Update pip, git + run: | + set -euxo pipefail + sudo apt update + sudo apt install -y git + + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Install tox, tox-lsr + run: | + set -euxo pipefail + pip3 install "git+https://github.com/linux-system-roles/tox-lsr@3.2.2" + + - name: Convert role to collection format + run: | + set -euxo pipefail + TOXENV=collection lsr_ci_runtox + coll_dir=".tox/ansible_collections/$LSR_ROLE2COLL_NAMESPACE/$LSR_ROLE2COLL_NAME" + # ansible-lint action requires a .git directory??? + # https://github.com/ansible/ansible-lint/blob/main/action.yml#L45 + mkdir -p "$coll_dir/.git" + + - name: Run ansible-lint + uses: ansible/ansible-lint@v24 + with: + working_directory: .tox/ansible_collections/${{ env.LSR_ROLE2COLL_NAMESPACE }}/${{ env.LSR_ROLE2COLL_NAME }} diff --git a/.github/workflows/ansible-managed-var-comment.yml b/.github/workflows/ansible-managed-var-comment.yml new file mode 100644 index 0000000..b2b8460 --- /dev/null +++ b/.github/workflows/ansible-managed-var-comment.yml @@ -0,0 +1,38 @@ +--- +name: Check for ansible_managed variable use in comments +on: # yamllint disable-line rule:truthy + pull_request: + merge_group: + branches: + - main + types: + - checks_requested + push: + branches: + - main + workflow_dispatch: +permissions: + contents: read +jobs: + ansible_managed_var_comment: + runs-on: ubuntu-latest + steps: + - name: Update pip, git + run: | + set -euxo pipefail + python3 -m pip install --upgrade pip + sudo apt update + sudo apt install -y git + + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Install tox, tox-lsr + run: | + set -euxo pipefail + pip3 install "git+https://github.com/linux-system-roles/tox-lsr@3.2.2" + + - name: Run ansible-plugin-scan + run: | + set -euxo pipefail + TOXENV=ansible-managed-var-comment lsr_ci_runtox diff --git a/.github/workflows/ansible-plugin-scan.yml b/.github/workflows/ansible-plugin-scan.yml new file mode 100644 index 0000000..8085384 --- /dev/null +++ b/.github/workflows/ansible-plugin-scan.yml @@ -0,0 +1,38 @@ +--- +name: Ansible Plugin Scan +on: # yamllint disable-line rule:truthy + pull_request: + merge_group: + branches: + - main + types: + - checks_requested + push: + branches: + - main + workflow_dispatch: +permissions: + contents: read +jobs: + ansible_plugin_scan: + runs-on: ubuntu-latest + steps: + - name: Update pip, git + run: | + set -euxo pipefail + python3 -m pip install --upgrade pip + sudo apt update + sudo apt install -y git + + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Install tox, tox-lsr + run: | + set -euxo pipefail + pip3 install "git+https://github.com/linux-system-roles/tox-lsr@3.2.2" + + - name: Run ansible-plugin-scan + run: | + set -euxo pipefail + TOXENV=ansible-plugin-scan lsr_ci_runtox diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml new file mode 100644 index 0000000..26fe33c --- /dev/null +++ b/.github/workflows/ansible-test.yml @@ -0,0 +1,47 @@ +--- +name: Ansible Test +on: # yamllint disable-line rule:truthy + pull_request: + merge_group: + branches: + - main + types: + - checks_requested + push: + branches: + - main + workflow_dispatch: +env: + LSR_ROLE2COLL_NAMESPACE: fedora + LSR_ROLE2COLL_NAME: linux_system_roles +permissions: + contents: read +jobs: + ansible_test: + runs-on: ubuntu-latest + steps: + - name: Update pip, git + run: | + set -euxo pipefail + python3 -m pip install --upgrade pip + sudo apt update + sudo apt install -y git + + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Install tox, tox-lsr + run: | + set -euxo pipefail + pip3 install "git+https://github.com/linux-system-roles/tox-lsr@3.2.2" + + - name: Convert role to collection format + run: | + set -euxo pipefail + TOXENV=collection lsr_ci_runtox + + - name: Run ansible-test + uses: ansible-community/ansible-test-gh-action@release/v1 + with: + testing-type: sanity # wokeignore:rule=sanity + collection-src-directory: .tox/ansible_collections/${{ env.LSR_ROLE2COLL_NAMESPACE }}/${{ env.LSR_ROLE2COLL_NAME }} diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml new file mode 100644 index 0000000..e08cb4b --- /dev/null +++ b/.github/workflows/build_docs.yml @@ -0,0 +1,104 @@ +--- +# yamllint disable rule:line-length +name: Convert README.md to HTML and push to docs branch +on: # yamllint disable-line rule:truthy + push: + branches: + - main + paths: + - README.md + release: + types: + - published +permissions: + contents: read +jobs: + build_docs: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Update pip, git + run: | + set -euxo pipefail + sudo apt update + sudo apt install -y git + + - name: Check out code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Ensure the docs branch + run: | + set -euxo pipefail + branch=docs + existed_in_remote=$(git ls-remote --heads origin $branch) + + if [ -z "${existed_in_remote}" ]; then + echo "Creating $branch branch" + git config --global user.name "${{ github.actor }}" + git config --global user.email "${{ github.actor }}@users.noreply.github.com" + git checkout --orphan $branch + git reset --hard + git commit --allow-empty -m "Initializing $branch branch" + git push origin $branch + echo "Created $branch branch" + else + echo "Branch $branch already exists" + fi + + - name: Checkout the docs branch + uses: actions/checkout@v4 + with: + ref: docs + + - name: Fetch README.md and .pandoc_template.html5 template from the workflow branch + uses: actions/checkout@v4 + with: + sparse-checkout: | + README.md + .pandoc_template.html5 + sparse-checkout-cone-mode: false + path: ref_branch + - name: Set RELEASE_VERSION based on whether run on release or on push + run: | + set -euxo pipefail + if [ ${{ github.event_name }} = release ]; then + echo "RELEASE_VERSION=${{ github.event.release.tag_name }}" >> $GITHUB_ENV + elif [ ${{ github.event_name }} = push ]; then + echo "RELEASE_VERSION=latest" >> $GITHUB_ENV + else + echo Unsupported event + exit 1 + fi + + - name: Ensure that version and docs directories exist + run: mkdir -p ${{ env.RELEASE_VERSION }} docs + + - name: Remove badges from README.md prior to converting to HTML + run: sed -i '1,8 {/^\[\!.*actions\/workflows/d}' ref_branch/README.md + + - name: Convert README.md to HTML and save to the version directory + uses: docker://pandoc/core:latest + with: + args: >- + --from gfm --to html5 --toc --shift-heading-level-by=-1 + --template ref_branch/.pandoc_template.html5 + --output ${{ env.RELEASE_VERSION }}/README.html ref_branch/README.md + + - name: Copy latest README.html to docs/index.html for GitHub pages + if: env.RELEASE_VERSION == 'latest' + run: cp ${{ env.RELEASE_VERSION }}/README.html docs/index.html + + - name: Commit changes + run: | + git config --global user.name "${{ github.actor }}" + git config --global user.email "${{ github.actor }}@users.noreply.github.com" + git add ${{ env.RELEASE_VERSION }}/README.html docs/index.html + git commit -m "Update README.html for ${{ env.RELEASE_VERSION }}" + + - name: Push changes + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: docs diff --git a/.github/workflows/changelog_to_tag.yml b/.github/workflows/changelog_to_tag.yml new file mode 100644 index 0000000..c2fe3c0 --- /dev/null +++ b/.github/workflows/changelog_to_tag.yml @@ -0,0 +1,91 @@ +--- +# yamllint disable rule:line-length +name: Tag, release, and publish role based on CHANGELOG.md push +on: # yamllint disable-line rule:truthy + push: + branches: + - main + paths: + - CHANGELOG.md +permissions: + contents: read +jobs: + tag_release_publish: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Update pip, git + run: | + set -euxo pipefail + sudo apt update + sudo apt install -y git + + - name: checkout PR + uses: actions/checkout@v4 + + - name: Get tag and message from the latest CHANGELOG.md commit + id: tag + run: | + set -euxo pipefail + print=false + while read -r line; do + if [[ "$line" =~ ^\[([0-9]+\.[0-9]+\.[0-9]+)\]\ -\ [0-9-]+ ]]; then + if [ "$print" = false ]; then + _tagname="${BASH_REMATCH[1]}" + echo "$line" + print=true + else + break + fi + elif [ "$print" = true ]; then + echo "$line" + fi + done < CHANGELOG.md > ./.tagmsg.txt + git fetch --all --tags + for t in $( git tag -l ); do + if [ "$t" = "$_tagname" ]; then + echo INFO: tag "$t" already exists + exit 1 + fi + done + # Get name of the branch that the change was pushed to + _branch="${GITHUB_REF_NAME:-}" + if [ "$_branch" = master ] || [ "$_branch" = main ]; then + echo Using branch name ["$_branch"] as push branch + else + echo WARNING: GITHUB_REF_NAME ["$_branch"] is not main or master + _branch=$( git branch -r | grep -o 'origin/HEAD -> origin/.*$' | \ + awk -F'/' '{print $3}' || : ) + fi + if [ -z "$_branch" ]; then + _branch=$( git branch --points-at HEAD --no-color --format='%(refname:short)' ) + fi + if [ -z "$_branch" ]; then + echo ERROR: unable to determine push branch + git branch -a + exit 1 + fi + echo "tagname=$_tagname" >> "$GITHUB_OUTPUT" + echo "branch=$_branch" >> "$GITHUB_OUTPUT" + - name: Create tag + uses: mathieudutour/github-tag-action@v6.1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + custom_tag: ${{ steps.tag.outputs.tagname }} + tag_prefix: '' + + - name: Create Release + id: create_release + uses: ncipollo/release-action@v1 + with: + tag: ${{ steps.tag.outputs.tagname }} + name: Version ${{ steps.tag.outputs.tagname }} + bodyFile: ./.tagmsg.txt + makeLatest: true + + - name: Publish role to Galaxy + uses: robertdebock/galaxy-action@1.2.1 + with: + galaxy_api_key: ${{ secrets.galaxy_api_key }} + git_branch: ${{ steps.tag.outputs.branch }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..f8ea08c --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,48 @@ +--- +name: CodeQL +on: # yamllint disable-line rule:truthy + push: + branches: ["main"] + pull_request: + branches: ["main"] + merge_group: + branches: + - main + types: + - checks_requested + schedule: + - cron: 8 10 * * 3 +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + strategy: + fail-fast: false + matrix: + language: [python] + steps: + - name: Update pip, git + run: | + set -euxo pipefail + sudo apt update + sudo apt install -y git + - name: Checkout + uses: actions/checkout@v4 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + queries: +security-and-quality + + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{ matrix.language }}" diff --git a/.github/workflows/markdownlint.yml b/.github/workflows/markdownlint.yml new file mode 100644 index 0000000..3094acd --- /dev/null +++ b/.github/workflows/markdownlint.yml @@ -0,0 +1,38 @@ +--- +# yamllint disable rule:line-length +name: Markdown Lint +on: # yamllint disable-line rule:truthy + pull_request: + merge_group: + branches: + - main + types: + - checks_requested + push: + branches: + - main + workflow_dispatch: +permissions: + contents: read +jobs: + markdownlint: + runs-on: ubuntu-latest + steps: + - name: Update pip, git + run: | + set -euxo pipefail + sudo apt update + sudo apt install -y git + + - name: Check out code + uses: actions/checkout@v4 + + # CHANGELOG.md is generated automatically from PR titles and descriptions + # It might have issues but they are not critical + - name: Lint all markdown files except for CHANGELOG.md + uses: docker://avtodev/markdown-lint:master + with: + args: >- + --ignore=CHANGELOG.md + **/*.md + config: .markdownlint.yaml diff --git a/.github/workflows/pr-title-lint.yml b/.github/workflows/pr-title-lint.yml new file mode 100644 index 0000000..8342fb5 --- /dev/null +++ b/.github/workflows/pr-title-lint.yml @@ -0,0 +1,31 @@ +name: PR Title Lint +on: # yamllint disable-line rule:truthy + pull_request: + types: + - opened + - synchronize + - reopened + - edited + merge_group: + branches: + - main + types: + - checks_requested +permissions: + contents: read +jobs: + commit-checks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install conventional-commit linter + run: npm install @commitlint/config-conventional @commitlint/cli + + - name: Run commitlint on PR title + env: + PR_TITLE: ${{ github.event.pull_request.title }} + # Echo from env variable to avoid bash errors with extra characters + run: echo "$PR_TITLE" | npx commitlint --verbose diff --git a/.github/workflows/python-unit-test.yml b/.github/workflows/python-unit-test.yml new file mode 100644 index 0000000..bcb21e6 --- /dev/null +++ b/.github/workflows/python-unit-test.yml @@ -0,0 +1,86 @@ +--- +# yamllint disable rule:line-length +name: Python Unit Tests +on: # yamllint disable-line rule:truthy + pull_request: + merge_group: + branches: + - main + types: + - checks_requested + push: + branches: + - main + workflow_dispatch: +permissions: + contents: read +jobs: + python: + strategy: + matrix: + pyver_os: + - ver: "2.7" + os: ubuntu-20.04 + - ver: "3.6" + os: ubuntu-20.04 + - ver: "3.8" + os: ubuntu-latest + - ver: "3.9" + os: ubuntu-latest + - ver: "3.10" + os: ubuntu-latest + - ver: "3.11" + os: ubuntu-latest + runs-on: ${{ matrix.pyver_os.os }} + steps: + - name: Update git + run: | + set -euxo pipefail + sudo apt update + sudo apt install -y git + + - name: checkout PR + uses: actions/checkout@v4 + + - name: Set up Python 2.7 + if: ${{ matrix.pyver_os.ver == '2.7' }} + run: | + set -euxo pipefail + sudo apt install -y python2.7 + + - name: Set up Python 3 + if: ${{ matrix.pyver_os.ver != '2.7' }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.pyver_os.ver }} + + - name: Install platform dependencies, python, tox, tox-lsr + run: | + set -euxo pipefail + python -m pip install --upgrade pip + pip install "git+https://github.com/linux-system-roles/tox-lsr@3.2.2" + # If you have additional OS dependency packages e.g. libcairo2-dev + # then put them in .github/config/ubuntu-requirements.txt, one + # package per line. + if [ -f .github/config/ubuntu-requirements.txt ]; then + sudo apt-get install -y $(cat .github/config/ubuntu-requirements.txt) + fi + + - name: Run unit tests + run: | + set -euxo pipefail + toxpyver=$(echo "${{ matrix.pyver_os.ver }}" | tr -d .) + toxenvs="py${toxpyver}" + # NOTE: The use of flake8, pylint, black with specific + # python envs is arbitrary and must be changed in tox-lsr + # We really should either do those checks using the latest + # version of python, or in every version of python + case "$toxpyver" in + 27) toxenvs="${toxenvs},coveralls,flake8,pylint" ;; + 36) toxenvs="${toxenvs},coveralls,black" ;; + *) toxenvs="${toxenvs},coveralls" ;; + esac + TOXENV="$toxenvs" lsr_ci_runtox + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4 diff --git a/.github/workflows/test_converting_readme.yml b/.github/workflows/test_converting_readme.yml new file mode 100644 index 0000000..ee96211 --- /dev/null +++ b/.github/workflows/test_converting_readme.yml @@ -0,0 +1,46 @@ +--- +# yamllint disable rule:line-length +name: Test converting README.md to README.html +on: # yamllint disable-line rule:truthy + pull_request: + merge_group: + branches: + - main + types: + - checks_requested + push: + branches: + - main +permissions: + contents: read +jobs: + test_converting_readme: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Update pip, git + run: | + set -euxo pipefail + sudo apt update + sudo apt install -y git + + - name: Check out code + uses: actions/checkout@v4 + + - name: Remove badges from README.md prior to converting to HTML + run: sed -i '1,8 {/^\[\!.*actions\/workflows/d}' README.md + + - name: Convert README.md to HTML + uses: docker://pandoc/core:latest + with: + args: >- + --from gfm --to html5 --toc --shift-heading-level-by=-1 + --template .pandoc_template.html5 + --output README.html README.md + + - name: Upload README.html as an artifact + uses: actions/upload-artifact@master + with: + name: README.html + path: README.html diff --git a/.github/workflows/weekly_ci.yml b/.github/workflows/weekly_ci.yml new file mode 100644 index 0000000..225dbdd --- /dev/null +++ b/.github/workflows/weekly_ci.yml @@ -0,0 +1,84 @@ +--- +# yamllint disable rule:line-length +name: Weekly CI trigger +on: # yamllint disable-line rule:truthy + workflow_dispatch: + schedule: + - cron: 0 22 * * 6 +env: + BRANCH_NAME: weekly-ci + COMMIT_MESSAGE: "ci: This PR is to trigger periodic CI testing" + BODY_MESSAGE: >- + This PR is for the purpose of triggering periodic CI testing. + We don't currently have a way to trigger CI without a PR, + so this PR serves that purpose. + COMMENT: "[citest]" +permissions: + contents: read +jobs: + weekly_ci: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + contents: write + steps: + - name: Update pip, git + run: | + set -euxo pipefail + sudo apt update + sudo apt install -y git + + - name: Checkout latest code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Create or rebase commit, add dump_packages callback + run: | + set -euxo pipefail + + git config --global user.name "github-actions[bot]" + git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" + git checkout ${{ env.BRANCH_NAME }} || git checkout -b ${{ env.BRANCH_NAME }} + git rebase main + if [ ! -d tests/callback_plugins ]; then + mkdir -p tests/callback_plugins + fi + curl -L -s -o tests/callback_plugins/dump_packages.py https://raw.githubusercontent.com/linux-system-roles/auto-maintenance/main/callback_plugins/dump_packages.py + git add tests/callback_plugins + git commit --allow-empty -m "${{ env.COMMIT_MESSAGE }}" + git push -f --set-upstream origin ${{ env.BRANCH_NAME }} + + - name: Create and comment pull request + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GH_PUSH_TOKEN }} + script: | + const head = [context.repo.owner, ":", "${{ env.BRANCH_NAME }}"].join(""); + const response = await github.rest.pulls.list({ + owner: context.repo.owner, + repo: context.repo.repo, + head: head, + base: context.ref, + state: "open" + }); + let pr_number = ''; + if (response.data.length === 0) { + pr_number = (await github.rest.pulls.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: "${{ env.COMMIT_MESSAGE }}", + body: "${{ env.BODY_MESSAGE }}", + head: "${{ env.BRANCH_NAME }}", + base: context.ref, + draft: true + })).data.number; + } else { + pr_number = response.data[0].number; + } + github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pr_number, + body: "${{ env.COMMENT }}", + }); diff --git a/.github/workflows/woke.yml b/.github/workflows/woke.yml new file mode 100644 index 0000000..02045ec --- /dev/null +++ b/.github/workflows/woke.yml @@ -0,0 +1,19 @@ +# yamllint disable rule:line-length +name: Woke +on: # yamllint disable-line rule:truthy + - pull_request +jobs: + woke: + name: Detect non-inclusive language + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Run lsr-woke-action + # Originally, uses: get-woke/woke-action@v0 + uses: linux-system-roles/lsr-woke-action@main + with: + woke-args: "-c https://raw.githubusercontent.com/linux-system-roles/tox-lsr/main/src/tox_lsr/config_files/woke.yml --count-only-error-for-failure" + # Cause the check to fail on any broke rules + fail-on-error: true diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..09db6d0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,15 @@ +*.swp +*~ +passes.yml +vault.yml +*.pyc +*.retry +/tests/.coverage +/tests/htmlcov* +/.tox +/venv*/ +/.venv/ +.vscode/ +artifacts/ +__pycache__/ +.pytest_cache/ diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 0000000..6bf4ccd --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,261 @@ +--- +# Default state for all rules +default: true + +# Path to configuration file to extend +extends: null + +# MD001/heading-increment/header-increment - Heading levels should only increment by one level at a time +MD001: true + +# MD002/first-heading-h1/first-header-h1 - First heading should be a top-level heading +MD002: + # Heading level + level: 1 + +# MD003/heading-style/header-style - Heading style +MD003: + # Heading style + style: "consistent" + +# MD004/ul-style - Unordered list style +MD004: + # List style + style: "consistent" + +# MD005/list-indent - Inconsistent indentation for list items at the same level +MD005: true + +# MD006/ul-start-left - Consider starting bulleted lists at the beginning of the line +MD006: true + +# MD007/ul-indent - Unordered list indentation +MD007: + # Spaces for indent + indent: 2 + # Whether to indent the first level of the list + start_indented: false + # Spaces for first level indent (when start_indented is set) + start_indent: 2 + +# MD009/no-trailing-spaces - Trailing spaces +MD009: + # Spaces for line break + br_spaces: 2 + # Allow spaces for empty lines in list items + list_item_empty_lines: false + # Include unnecessary breaks + strict: false + +# MD010/no-hard-tabs - Hard tabs +MD010: + # Include code blocks + code_blocks: true + # Fenced code languages to ignore + ignore_code_languages: [] + # Number of spaces for each hard tab + spaces_per_tab: 1 + +# MD011/no-reversed-links - Reversed link syntax +MD011: true + +# MD012/no-multiple-blanks - Multiple consecutive blank lines +MD012: + # Consecutive blank lines + maximum: 1 + +# Modified for LSR +# GFM does not limit line length +# MD013/line-length - Line length +MD013: false + # # Number of characters + # # line_length: 80 + # line_length: 999 + # # Number of characters for headings + # heading_line_length: 80 + # # Number of characters for code blocks + # code_block_line_length: 80 + # # Include code blocks + # code_blocks: true + # # Include tables + # tables: true + # # Include headings + # headings: true + # # Include headings + # headers: true + # # Strict length checking + # strict: false + # # Stern length checking + # stern: false + +# MD014/commands-show-output - Dollar signs used before commands without showing output +MD014: true + +# MD018/no-missing-space-atx - No space after hash on atx style heading +MD018: true + +# MD019/no-multiple-space-atx - Multiple spaces after hash on atx style heading +MD019: true + +# MD020/no-missing-space-closed-atx - No space inside hashes on closed atx style heading +MD020: true + +# MD021/no-multiple-space-closed-atx - Multiple spaces inside hashes on closed atx style heading +MD021: true + +# MD022/blanks-around-headings/blanks-around-headers - Headings should be surrounded by blank lines +MD022: + # Blank lines above heading + lines_above: 1 + # Blank lines below heading + lines_below: 1 + +# MD023/heading-start-left/header-start-left - Headings must start at the beginning of the line +MD023: true + +# MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content +MD024: true + +# MD025/single-title/single-h1 - Multiple top-level headings in the same document +MD025: + # Heading level + level: 1 + # RegExp for matching title in front matter + front_matter_title: "^\\s*title\\s*[:=]" + +# MD026/no-trailing-punctuation - Trailing punctuation in heading +MD026: + # Punctuation characters not allowed at end of headings + punctuation: ".,;:!。,;:!" + +# MD027/no-multiple-space-blockquote - Multiple spaces after blockquote symbol +MD027: true + +# MD028/no-blanks-blockquote - Blank line inside blockquote +MD028: true + +# MD029/ol-prefix - Ordered list item prefix +MD029: + # List style + style: "one_or_ordered" + +# MD030/list-marker-space - Spaces after list markers +MD030: + # Spaces for single-line unordered list items + ul_single: 1 + # Spaces for single-line ordered list items + ol_single: 1 + # Spaces for multi-line unordered list items + ul_multi: 1 + # Spaces for multi-line ordered list items + ol_multi: 1 + +# MD031/blanks-around-fences - Fenced code blocks should be surrounded by blank lines +MD031: + # Include list items + list_items: true + +# MD032/blanks-around-lists - Lists should be surrounded by blank lines +MD032: true + +# MD033/no-inline-html - Inline HTML +MD033: + # Allowed elements + allowed_elements: [] + +# MD034/no-bare-urls - Bare URL used +MD034: true + +# MD035/hr-style - Horizontal rule style +MD035: + # Horizontal rule style + style: "consistent" + +# MD036/no-emphasis-as-heading/no-emphasis-as-header - Emphasis used instead of a heading +MD036: + # Punctuation characters + punctuation: ".,;:!?。,;:!?" + +# MD037/no-space-in-emphasis - Spaces inside emphasis markers +MD037: true + +# MD038/no-space-in-code - Spaces inside code span elements +MD038: true + +# MD039/no-space-in-links - Spaces inside link text +MD039: true + +# MD040/fenced-code-language - Fenced code blocks should have a language specified +MD040: + # List of languages + allowed_languages: [] + # Require language only + language_only: false + +# MD041/first-line-heading/first-line-h1 - First line in a file should be a top-level heading +MD041: + # Heading level + level: 1 + # RegExp for matching title in front matter + front_matter_title: "^\\s*title\\s*[:=]" + +# MD042/no-empty-links - No empty links +MD042: true + +# Modified for LSR +# Disabling, we do not need this +# MD043/required-headings/required-headers - Required heading structure +MD043: false + # # List of headings + # headings: [] + # # List of headings + # headers: [] + # # Match case of headings + # match_case: false + +# MD044/proper-names - Proper names should have the correct capitalization +MD044: + # List of proper names + names: [] + # Include code blocks + code_blocks: true + # Include HTML elements + html_elements: true + +# MD045/no-alt-text - Images should have alternate text (alt text) +MD045: true + +# MD046/code-block-style - Code block style +MD046: + # Block style + style: "consistent" + +# MD047/single-trailing-newline - Files should end with a single newline character +MD047: true + +# MD048/code-fence-style - Code fence style +MD048: + # Code fence style + style: "consistent" + +# MD049/emphasis-style - Emphasis style should be consistent +MD049: + # Emphasis style should be consistent + style: "consistent" + +# MD050/strong-style - Strong style should be consistent +MD050: + # Strong style should be consistent + style: "consistent" + +# MD051/link-fragments - Link fragments should be valid +MD051: true + +# MD052/reference-links-images - Reference links and images should use a label that is defined +MD052: true + +# MD053/link-image-reference-definitions - Link and image reference definitions should be needed +MD053: + # Ignored definitions + ignored_definitions: + - "//" diff --git a/.ostree/README.md b/.ostree/README.md new file mode 100644 index 0000000..f5e6931 --- /dev/null +++ b/.ostree/README.md @@ -0,0 +1,3 @@ +*NOTE*: The `*.txt` files are used by `get_ostree_data.sh` to create the lists +of packages, and to find other system roles used by this role. DO NOT use them +directly. diff --git a/.ostree/get_ostree_data.sh b/.ostree/get_ostree_data.sh new file mode 100755 index 0000000..65830d6 --- /dev/null +++ b/.ostree/get_ostree_data.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ostree_dir="${OSTREE_DIR:-"$(dirname "$(realpath "$0")")"}" + +if [ -z "${4:-}" ] || [ "${1:-}" = help ] || [ "${1:-}" = -h ]; then + cat <&2 echo ERROR - could not find role "$role" - please use ANSIBLE_COLLECTIONS_PATH + exit 2 +} + +get_packages() { + local ostree_dir pkgtype pkgfile rolefile + ostree_dir="$1" + for pkgtype in "${pkgtypes[@]}"; do + for suff in "" "-$distro" "-${distro}-${major_ver}" "-${distro}-${ver}"; do + pkgfile="$ostree_dir/packages-${pkgtype}${suff}.txt" + if [ -f "$pkgfile" ]; then + cat "$pkgfile" + fi + done + rolefile="$ostree_dir/roles-${pkgtype}.txt" + if [ -f "$rolefile" ]; then + local roles role rolepath + roles="$(cat "$rolefile")" + for role in $roles; do + rolepath="$(get_rolepath "$ostree_dir" "$role")" + if [ -z "$rolepath" ]; then + 1>&2 echo ERROR - could not find role "$role" - please use ANSIBLE_COLLECTIONS_PATH + exit 2 + fi + get_packages "$rolepath" + done + fi + done | sort -u +} + +format_packages_json() { + local comma pkgs pkg + comma="" + pkgs="[" + while read -r pkg; do + pkgs="${pkgs}${comma}\"${pkg}\"" + comma=, + done + pkgs="${pkgs}]" + echo "$pkgs" +} + +format_packages_raw() { + cat +} + +format_packages_yaml() { + while read -r pkg; do + echo "- $pkg" + done +} + +format_packages_toml() { + while read -r pkg; do + echo "[[packages]]" + echo "name = \"$pkg\"" + echo "version = \"*\"" + done +} + +distro="${distro_ver%%-*}" +ver="${distro_ver##*-}" +if [[ "$ver" =~ ^([0-9]*) ]]; then + major_ver="${BASH_REMATCH[1]}" +else + echo ERROR: cannot parse major version number from version "$ver" + exit 1 +fi + +"get_$category" "$ostree_dir" | "format_${category}_$format" diff --git a/.ostree/packages-runtime.txt b/.ostree/packages-runtime.txt new file mode 100644 index 0000000..79c62c5 --- /dev/null +++ b/.ostree/packages-runtime.txt @@ -0,0 +1,3 @@ +dlm +gfs2-utils +lvm2-lockd diff --git a/.pandoc_template.html5 b/.pandoc_template.html5 new file mode 100644 index 0000000..f214661 --- /dev/null +++ b/.pandoc_template.html5 @@ -0,0 +1,166 @@ +$--| GitHub HTML5 Pandoc Template" v2.2 | 2020/08/12 | pandoc v2.1.1 + + +$-------------------------------------------------------------------------> lang + + +$--============================================================================= +$-- METADATA +$--============================================================================= + + + +$-----------------------------------------------------------------------> author +$for(author-meta)$ + +$endfor$ +$-------------------------------------------------------------------------> date +$if(date-meta)$ + +$endif$ +$---------------------------------------------------------------------> keywords +$if(keywords)$ + +$endif$ +$------------------------------------------------------------------> description +$if(description)$ + +$endif$ +$------------------------------------------------------------------------> title + $if(title-prefix)$$title-prefix$ – $endif$$pagetitle$ +$--=========================================================================== +$-- CSS STYLESHEETS +$--=========================================================================== +$-- Here comes the placeholder (within double braces) that will be replaced +$-- by the CSS file in the finalized template: + +$------------------------------------------------------------------------------- + +$------------------------------------------------------------------------------- +$if(quotes)$ + +$endif$ +$-------------------------------------------------------------> highlighting-css +$if(highlighting-css)$ + +$endif$ +$--------------------------------------------------------------------------> css +$for(css)$ + +$endfor$ +$-------------------------------------------------------------------------> math +$if(math)$ + $math$ +$endif$ +$------------------------------------------------------------------------------- + +$--------------------------------------------------------------> header-includes +$for(header-includes)$ + $header-includes$ +$endfor$ +$------------------------------------------------------------------------------- + + +
+$---------------------------------------------------------------> include-before +$for(include-before)$ +$include-before$ +$endfor$ +$-->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> IF: title +$if(title)$ +
+

$title$

+$---------------------------------------------------------------------> subtitle +$if(subtitle)$ +

$subtitle$

+$endif$ +$-----------------------------------------------------------------------> author +$for(author)$ +

$author$

+$endfor$ +$-------------------------------------------------------------------------> date +$if(date)$ +

$date$

+$endif$ +$----------------------------------------------------------------------> summary +$if(summary)$ +
+$summary$ +
+$endif$ +$------------------------------------------------------------------------------- +
+$endif$ +$--<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< END IF: title +$--------------------------------------------------------------------------> toc +$if(toc)$ +
+ +
+$endif$ +$-------------------------------------------------------------------------> body +$body$ +$----------------------------------------------------------------> include-after +$for(include-after)$ +$include-after$ +$endfor$ +$------------------------------------------------------------------------------- +
+ + diff --git a/.yamllint.yml b/.yamllint.yml new file mode 100644 index 0000000..9607233 --- /dev/null +++ b/.yamllint.yml @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MIT +--- +extends: default +ignore: | + /.tox/ diff --git a/.yamllint_defaults.yml b/.yamllint_defaults.yml new file mode 100644 index 0000000..be0b697 --- /dev/null +++ b/.yamllint_defaults.yml @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: MIT +--- +ignore: | + /.tox/ +extends: default +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + truthy: + allowed-values: ["yes", "no", "true", "false"] + level: error + document-start: disable diff --git a/LICENSE b/LICENSE index a50144b..1ad2057 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2024 linux-system-roles +Copyright (C) 2024 Red Hat, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README-ansible.md b/README-ansible.md new file mode 100644 index 0000000..01a54eb --- /dev/null +++ b/README-ansible.md @@ -0,0 +1,5 @@ +# Introduction to Ansible for Linux System Roles + +If you are not familiar with Ansible, please see +[Introduction to Ansible for Linux System Roles](https://linux-system-roles.github.io/documentation/intro-to-ansible-for-system-roles.html), +where many useful links are presented. diff --git a/README-ostree.md b/README-ostree.md new file mode 100644 index 0000000..a9f0185 --- /dev/null +++ b/README-ostree.md @@ -0,0 +1,66 @@ +# rpm-ostree + +The role supports running on [rpm-ostree](https://coreos.github.io/rpm-ostree/) +systems. The primary issue is that the `/usr` filesystem is read-only, and the +role cannot install packages. Instead, it will just verify that the necessary +packages and any other `/usr` files are pre-installed. The role will change the +package manager to one that is compatible with `rpm-ostree` systems. + +## Building + +To build an ostree image for a particular operating system distribution and +version, use the script `.ostree/get_ostree_data.sh` to get the list of +packages. If the role uses other system roles, then the script will include the +packages for the other roles in the list it outputs. The list of packages will +be sorted in alphanumeric order. + +Usage: + +```bash +.ostree/get_ostree_data.sh packages runtime DISTRO-VERSION FORMAT +``` + +`DISTRO-VERSION` is in the format that Ansible uses for `ansible_distribution` +and `ansible_distribution_version` - for example, `Fedora-38`, `CentOS-8`, +`RedHat-9.4` + +`FORMAT` is one of `toml`, `json`, `yaml`, `raw` + +* `toml` - each package in a TOML `[[packages]]` element + +```toml +[[packages]] +name = "package-a" +version = "*" +[[packages]] +name = "package-b" +version = "*" +... +``` + +* `yaml` - a YAML list of packages + +```yaml +- package-a +- package-b +... +``` + +* `json` - a JSON list of packages + +```json +["package-a","package-b",...] +``` + +* `raw` - a plain text list of packages, one per line + +```bash +package-a +package-b +... +``` + +What format you choose depends on which image builder you are using. For +example, if you are using something based on +[osbuild-composer](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/composing_installing_and_managing_rhel_for_edge_images/index#creating-an-image-builder-blueprint-for-a-rhel-for-edge-image-using-the-command-line-interface_composing-a-rhel-for-edge-image-using-image-builder-command-line), +you will probably want to use the `toml` output format. diff --git a/README.md b/README.md new file mode 100644 index 0000000..abe1167 --- /dev/null +++ b/README.md @@ -0,0 +1,159 @@ +# gfs2 + +[![ansible-lint.yml](https://github.com/linux-system-roles/gfs2/actions/workflows/ansible-lint.yml/badge.svg)](https://github.com/linux-system-roles/gfs2/actions/workflows/ansible-lint.yml) [![ansible-test.yml](https://github.com/linux-system-roles/gfs2/actions/workflows/ansible-test.yml/badge.svg)](https://github.com/linux-system-roles/gfs2/actions/workflows/ansible-test.yml) [![codeql.yml](https://github.com/linux-system-roles/gfs2/actions/workflows/codeql.yml/badge.svg)](https://github.com/linux-system-roles/gfs2/actions/workflows/codeql.yml) [![markdownlint.yml](https://github.com/linux-system-roles/gfs2/actions/workflows/markdownlint.yml/badge.svg)](https://github.com/linux-system-roles/gfs2/actions/workflows/markdownlint.yml) [![python-unit-test.yml](https://github.com/linux-system-roles/gfs2/actions/workflows/python-unit-test.yml/badge.svg)](https://github.com/linux-system-roles/gfs2/actions/workflows/python-unit-test.yml) [![shellcheck.yml](https://github.com/linux-system-roles/gfs2/actions/workflows/shellcheck.yml/badge.svg)](https://github.com/linux-system-roles/gfs2/actions/workflows/shellcheck.yml) [![woke.yml](https://github.com/linux-system-roles/gfs2/actions/workflows/woke.yml/badge.svg)](https://github.com/linux-system-roles/gfs2/actions/workflows/woke.yml) + +Configure and manage gfs2 file systems in a pacemaker cluster. + +## Supported Distributions + +* RHEL-8+, CentOS-8+ +* Fedora + +## Requirements + +You must have a working HA cluster. + +### Collection requirements + +This role uses the `storage` system role. Please run the following command line +to install it: + +```bash +ansible-galaxy collection install -vv -r meta/collection-requirements.yml +``` + +## Role Variables + +See [meta/argument_specs.yml](meta/argument_specs.yml) + +## Examples + +### Minimal example + +```yaml +- name: Configure a GFS2 filesystem on the cluster + hosts: myapp-servers + roles: + - role: linux-system-roles.gfs2 + vars: + gfs2_cluster_name: myapp-cluster + gfs2_file_systems: + - name: myapp-shared-fs + pvs: + - /dev/disk/by-path/pci-0000:42:00.0-fc-0xf000-lun-1 + vg: vg_myapp_shared + lv: lv_myapp_shared + lv_size: 100G + mount_point: /mnt/myapp-shared +``` + +### Minimal example with cluster setup + +```yaml +- name: Configure a cluster and a GFS2 filesystem + hosts: cluster-virts + vars: + common_cluster_name: MyCluster + pre_tasks: + - name: Check whether cluster has been set up yet + # We do this because the ha_cluster role removes any resources not specified + # by its variables. It is safer to run ha_cluster in a separate one-off + # playbook which will not be used again after the gfs2 resources are created. + ansible.builtin.command: pcs stonith status + register: cluster_exists + changed_when: false + failed_when: false + + - name: Create cluster if it doesn't exist + ansible.builtin.include_role: + name: linux-system-roles.ha_cluster + vars: + ha_cluster_cluster_name: "{{ common_cluster_name }}" + ha_cluster_enable_repos: false + # Users should vault-encrypt the password + ha_cluster_hacluster_password: hunter2 + ha_cluster_fence_virt_key_src: fence_xvm.key + ha_cluster_cluster_properties: + - attrs: + - name: stonith-enabled + value: "true" + ha_cluster_resource_primitives: + - id: xvm-fencing + agent: "stonith:fence_xvm" + when: + - cluster_exists.rc != 0 + or "Started" not in cluster_exists.stdout + roles: + - role: linux-system-roles.gfs2 + vars: + gfs2_cluster_name: "{{ common_cluster_name }}" + # Specify 2 gfs2 file systems + gfs2_file_systems: + - name: fs1 + pvs: + - /dev/disk/by-path/virtio-pci-0000:00:08.0 + vg: vg_gfs2_1 + lv: lv_gfs2_1 + lv_size: 100G + mount_point: /mnt/test1 + - name: fs2 + pvs: + - /dev/disk/by-path/virtio-pci-0000:01:00.0 + vg: vg_gfs2_2 + lv: lv_gfs2_2 + lv_size: 100G + mount_point: /mnt/test2 +``` + +### Example with optional role variables + +```yaml +- name: Configure a GFS2 filesystem with optional variables + hosts: cluster-virts + roles: + - role: linux-system-roles.gfs2 + vars: + gfs2_cluster_name: MyCluster + gfs2_resource_name_lvmlockd: lvm_locking + gfs2_resource_name_dlm: dlm_control + gfs2_group_name_locking: locking + gfs2_file_systems: + - name: fs1 + resource_name_fs: gfs2-1 + pvs: + - /dev/disk/by-path/virtio-pci-0000:00:08.0 + vg: vg_gfs2_1 + lv: lv_gfs2_1 + lv_size: 100G + resource_name_lv: shared-lv-1 + journals: 3 + mount_point: /mnt/test1 + mount_options: + - noatime + state: enabled + - name: fs2 + resource_name_fs: gfs2-2 + pvs: + - /dev/disk/by-path/virtio-pci-0000:01:00.0 + vg: vg_gfs2_2 + lv: lv_gfs2_2 + lv_size: 100G + resource_name_lv: shared-lv-2 + journals: 3 + mount_point: /mnt/test2 + mount_options: + - rgrplvb + state: disabled +``` + +## rpm-ostree + +See README-ostree.md + +## License + +MIT + +## Author Information + +Andrew Price diff --git a/ansible_pytest_extra_requirements.txt b/ansible_pytest_extra_requirements.txt new file mode 100644 index 0000000..6bafb6f --- /dev/null +++ b/ansible_pytest_extra_requirements.txt @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: MIT + +# ansible and dependencies for all supported platforms +ansible ; python_version > "2.6" +idna<2.8 ; python_version < "2.7" +PyYAML<5.1 ; python_version < "2.7" diff --git a/contributing.md b/contributing.md new file mode 100644 index 0000000..1f4804e --- /dev/null +++ b/contributing.md @@ -0,0 +1,52 @@ +# Contributing to the gfs2 Linux System Role + +## Where to start + +The first place to go is [Contribute](https://linux-system-roles.github.io/contribute.html). +This has all of the common information that all role developers need: + +* Role structure and layout +* Development tools - How to run tests and checks +* Ansible recommended practices +* Basic git and github information +* How to create git commits and submit pull requests + +**Bugs and needed implementations** are listed on +[Github Issues](https://github.com/linux-system-roles/gfs2/issues). +Issues labeled with +[**help wanted**](https://github.com/linux-system-roles/gfs2/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) +are likely to be suitable for new contributors! + +**Code** is managed on [Github](https://github.com/linux-system-roles/gfs2), using +[Pull Requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests). + +## Python Code + +The Python code needs to be **compatible with the Python versions supported by +the role platform**. + +For example, see [meta](https://github.com/linux-system-roles/gfs2/blob/main/meta/main.yml) +for the platforms supported by the role. + +If the role provides Ansible modules (code in `library/` or `module_utils/`) - +these run on the *managed* node, and typically[1] use the default system python: + +* EL6 - python 2.6 +* EL7 - python 2.7 or python 3.6 in some cases +* EL8 - python 3.6 +* EL9 - python 3.9 + +If the role provides some other sort of Ansible plugin such as a filter, test, +etc. - these run on the *control* node and typically use whatever version of +python that Ansible uses, which in many cases is *not* the system python, and +may be a modularity release such as python311. + +In general, it is a good idea to ensure the role python code works on all +versions of python supported by `tox-lsr` from py36 on, and on py27 if the role +supports EL7, and on py26 if the role supports EL6.[1] + +[1] Advanced users may set +[ansible_python_interpreter](https://docs.ansible.com/ansible/latest/reference_appendices/special_variables.html#term-ansible_python_interpreter) +to use a non-system python on the managed node, so it is a good idea to ensure +your code has broad python version compatibility, and do not assume your code +will only ever be run with the default system python. diff --git a/defaults/main.yml b/defaults/main.yml new file mode 100644 index 0000000..32b1393 --- /dev/null +++ b/defaults/main.yml @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: MIT +--- +# Whether to enable the repositories required to install the required +# packages. Defaults to `true`. +gfs2_enable_repos: true + +# Default lvmlockd resource name +gfs2_resource_name_lvmlockd: lvm_locking + +# Default dlm resource name +gfs2_resource_name_dlm: dlm + +# Default locking group name +gfs2_group_name_locking: locking + +# File system defaults +gfs2_fs_defaults: + # Default number of journals is the number of hosts + journals: "{{ ansible_play_hosts_all | length }}" + state: enabled + +# For testing only +_gfs2_test_allow_stonith_disabled: false diff --git a/meta/argument_specs.yml b/meta/argument_specs.yml new file mode 100644 index 0000000..4443c64 --- /dev/null +++ b/meta/argument_specs.yml @@ -0,0 +1,138 @@ +# SPDX-License-Identifier: MIT +--- +argument_specs: + main: + short_description: The gfs2 role. + description: > + The gfs2 role allows you to create gfs2 file systems in a pacemaker + cluster managed using `pcs`. Before this role is invoked, the cluster + must be set up (perhaps using the `ha_cluster` role) and fencing must be + enabled. + + This role will install necessary packages, set up the dlm and lvmlockd + cluster resources, create the LVM volume groups and logical volumes, and + create the gfs2 file system and cluster resources with the necessary + resource constraints. + + If any of the cluster resources already exist, make sure that the + `gfs2_resource_name_*` variables are set to the names of the resources so + that it can detect them. + + The role errs on the side of caution when creating the file system. If + the file system does not yet exist, the first time this role runs it will + only create the gfs2 file system on the shared logical volume if an + existing file system signature is not detected. You may need to inspect + the device's contents and use `wipefs` if appropriate before continuing. + options: + gfs2_enable_repos: + type: bool + required: false + description: > + Whether to enable the repositories required to install the required + packages. Defaults to `true`. + gfs2_cluster_name: + type: str + required: true + description: > + The name of the cluster. See the mkfs.gfs2(8) man page for details. + gfs2_resource_name_lvmlockd: + type: str + required: false + description: > + The name of the lvmlockd resource in the cluster. This defaults to + 'lvm_locking'. + gfs2_resource_name_dlm: + type: str + required: true + description: > + The name of the dlm_controld resource in the cluster. This defaults + to 'dlm'. + gfs2_group_name_locking: + type: str + required: true + description: > + The name of the DLM/LVM locking resource group in the cluster. This + defaults to 'locking'. + gfs2_file_systems: + type: list + elements: dict + required: true + description: > + A list of gfs2 file system specifications. + options: + name: + type: str + required: true + description: > + The file system name. See the mkfs.gfs2(8) man page for details. + state: + type: str + required: false + choices: + - enabled + - disabled + description: > + The intended state of the gfs2 file system. Defaults to + 'enabled'. 'enabled' and 'disabled' map to the state of the + Filesystem resource in the cluster, effectively declaring whether + the gfs2 file system is mounted or unmounted. In both cases, the + file system resources will be created if they do not already exist. + journals: + type: int + required: false + description: > + The number of gfs2 journals (max. number of mounters). This + defaults to the number of hosts targeted by the play. + resource_name_fs: + type: str + required: false + description: > + The name of the gfs2 Filesystem resource in the cluster. Defaults + to the value of `name`. + group_name_fs: + type: str + required: false + description: > + The name of the gfs2/LVM resource group in the cluster. Defaults + to `-group`. + pvs: + type: list + elements: path + required: true + description: > + A list of shared block devices to use as lvm physical volumes. + vg: + type: str + required: true + description: > + The name of the shared volume group. + lv: + type: str + required: true + description: > + The name of the shared logical volume. + resource_name_lv: + type: str + required: false + description: > + The name of the shared LV activation resource in the cluster. + Defaults to `vg-lv`. + mount_point: + type: path + required: true + description: > + The path to the mount point (directory). It will be created if it + does not exist. + mount_options: + type: list + elements: str + required: false + description: > + gfs2 mount options. See the gfs2(5) man page. + lv_size: + type: str + required: true + description: > + The size of the shared logical volume as required by `lvcreate + --size`. Note that using dynamic values such as 100%FREE is not + idempotent and it is better to use exact values such as 100G. diff --git a/meta/collection-requirements.yml b/meta/collection-requirements.yml new file mode 100644 index 0000000..0e6f8ea --- /dev/null +++ b/meta/collection-requirements.yml @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MIT +--- +collections: + - ansible.posix + - fedora.linux_system_roles diff --git a/meta/main.yml b/meta/main.yml new file mode 100644 index 0000000..b1ad195 --- /dev/null +++ b/meta/main.yml @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: MIT +--- +galaxy_info: + author: Andrew Price + description: Configure and manage gfs2 file systems in a pacemaker cluster. + company: Red Hat, Inc. + license: MIT + min_ansible_version: "2.12" + platforms: + - name: Fedora + versions: + - all + - name: EL + versions: + - "8" + - "9" + galaxy_tags: + - hacluster + - pacemaker + - corosync + - dlm + - lvm + - gfs2 + - sharedstorage + - filesystem +dependencies: [] diff --git a/pylint_extra_requirements.txt b/pylint_extra_requirements.txt new file mode 100644 index 0000000..ab91ce4 --- /dev/null +++ b/pylint_extra_requirements.txt @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: MIT + +# Write extra requirements for running pylint here: +pytest diff --git a/tasks/enable-repositories/CentOS.yml b/tasks/enable-repositories/CentOS.yml new file mode 100644 index 0000000..77daf74 --- /dev/null +++ b/tasks/enable-repositories/CentOS.yml @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: MIT +# No enable-repositories tasks are needed for Fedora. This file serves as a +# placeholder and prevents RedHat.yml to be executed instead. +--- diff --git a/tasks/enable-repositories/Fedora.yml b/tasks/enable-repositories/Fedora.yml new file mode 100644 index 0000000..77daf74 --- /dev/null +++ b/tasks/enable-repositories/Fedora.yml @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: MIT +# No enable-repositories tasks are needed for Fedora. This file serves as a +# placeholder and prevents RedHat.yml to be executed instead. +--- diff --git a/tasks/enable-repositories/RedHat.yml b/tasks/enable-repositories/RedHat.yml new file mode 100644 index 0000000..bb6f8ba --- /dev/null +++ b/tasks/enable-repositories/RedHat.yml @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: MIT +--- +- name: List active RHEL repositories + ansible.builtin.command: + cmd: dnf repolist + register: __gfs2_repolist + changed_when: false + check_mode: false + +- name: Enable RHEL repositories + ansible.builtin.command: >- + subscription-manager repos --enable {{ item.id | quote }} + loop: "{{ __gfs2_repos }}" + when: item.name not in __gfs2_repolist.stdout + changed_when: item.name not in __gfs2_repolist.stdout diff --git a/tasks/fs.yml b/tasks/fs.yml new file mode 100644 index 0000000..ed8b8d5 --- /dev/null +++ b/tasks/fs.yml @@ -0,0 +1,192 @@ +# SPDX-License-Identifier: MIT +--- +- name: Merge file system specification with default values + vars: + fs: "{{ gfs2_fs_defaults | combine(fs_spec) }}" + default_resource_name_lv: "{{ fs.vg }}-{{ fs.lv }}" + resource_name_lv: "{{ fs.resource_name_lv | d(default_resource_name_lv) }}" + default_group_name_fs: "{{ fs.name }}-group" + group_name_fs: "{{ fs.group_name_fs | d(default_group_name_fs) }}" + default_resource_name_fs: gfs2-{{ fs.name }} + resource_name_fs: "{{ fs.resource_name_fs | d(default_resource_name_fs) }}" + block: + - name: Create shared volume group + ansible.builtin.include_role: + name: fedora.linux_system_roles.storage + vars: + storage_pools: + - name: "{{ fs.vg }}" + disks: "{{ fs.pvs }}" + type: lvm + shared: true + state: present + volumes: + - name: "{{ fs.lv }}" + size: "{{ fs.lv_size }}" + fs_type: gfs2 + fs_create_options: >- + -D -t "{{ gfs2_cluster_name }}:{{ fs.name }}" + -j "{{ fs.journals | d(gfs2_default_journals) }}" + run_once: true + + - name: Check whether the logical volume activation resource exists + ansible.builtin.command: + argv: + - pcs + - resource + - status + - "{{ resource_name_lv }}" + register: lv_resource_exists + failed_when: false + changed_when: false + + - name: Create logical volume activation resource + ansible.builtin.command: + argv: + - pcs + - resource + - create + - "{{ resource_name_lv }}" + - --group + - "{{ group_name_fs }}" + - ocf:heartbeat:LVM-activate + - lvname={{ fs.lv }} + - vgname={{ fs.vg }} + - activation_mode=shared + - vg_access_mode=lvmlockd + - --wait + when: lv_resource_exists.rc != 0 + changed_when: true + run_once: true + + - name: Clone file system-specific resource group + ansible.builtin.command: + argv: + - pcs + - resource + - clone + - "{{ group_name_fs }}" + - interleave=true + - --wait + register: fs_clone_result + failed_when: + - fs_clone_result.rc != 0 + - "'already been cloned' not in fs_clone_result.stderr" + changed_when: fs_clone_result.rc == 0 + run_once: true + + - name: Set ordering constraint between locking and file system resource groups + ansible.builtin.command: + argv: + - pcs + - constraint + - order + - start + - "{{ gfs2_group_name_locking }}-clone" + - then + - "{{ group_name_fs }}-clone" + register: ordering_result + failed_when: + - ordering_result.rc != 0 + - "'duplicate constraint already exists' not in ordering_result.stderr" + changed_when: ordering_result.rc == 0 + run_once: true + + - name: Set colocation constraint between locking and file system resource groups + ansible.builtin.command: + argv: + - pcs + - constraint + - colocation + - add + - "{{ group_name_fs }}-clone" + - with + - "{{ gfs2_group_name_locking }}-clone" + register: colo_result + failed_when: + - colo_result.rc != 0 + - "'duplicate constraint already exists' not in colo_result.stderr" + changed_when: colo_result.rc == 0 + run_once: true + + - name: Check logical volume path exists + ansible.builtin.stat: + path: /dev/mapper/{{ fs.vg }}-{{ fs.lv }} + follow: true + get_checksum: false + register: lv_path + changed_when: false + failed_when: not lv_path.stat.exists + any_errors_fatal: true + become: true + + - name: Create mount point + ansible.builtin.file: + path: "{{ fs.mount_point }}" + state: directory + mode: "0755" + any_errors_fatal: true + become: true + + - name: Check whether the gfs2 file system resource exists + ansible.builtin.command: + argv: + - pcs + - resource + - status + - "{{ resource_name_fs }}" + register: fs_resource_exists + failed_when: false + changed_when: false + + - name: Create gfs2 file system resource + ansible.builtin.command: + argv: + - pcs + - resource + - create + - --disabled + - "{{ resource_name_fs }}" + - --group + - "{{ group_name_fs }}" + - ocf:heartbeat:Filesystem + - device={{ lv_path.stat.path }} + - directory={{ fs.mount_point }} + - fstype=gfs2 + - op + - monitor + - interval=10s + - on-fail=fence + - --wait + run_once: true + when: fs_resource_exists.rc != 0 + changed_when: true + + - name: Set gfs2 resource mount options, if any + ansible.builtin.command: + argv: + - pcs + - resource + - update + - "{{ resource_name_fs }}" + - options={{ fs.mount_options | join(',') }} + - --wait + run_once: true + when: + - fs_resource_exists.rc != 0 + - fs.mount_options | d([]) | length > 0 + changed_when: true + + - name: Enable or disable the gfs2 resource + ansible.builtin.command: + argv: + - pcs + - resource + - "{{ {'disabled': 'disable', 'enabled': 'enable'}[fs.state] }}" + - "{{ resource_name_fs }}" + - --wait + run_once: true + when: + - fs.state == "enabled" and "Started:" not in fs_resource_exists.stdout + or fs.state == "disabled" and "Started:" in fs_resource_exists.stdout + changed_when: true diff --git a/tasks/install-packages.yml b/tasks/install-packages.yml new file mode 100644 index 0000000..7b0c7bd --- /dev/null +++ b/tasks/install-packages.yml @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: MIT +--- +- name: Ensure ansible_facts used by role + setup: + gather_subset: "{{ __gfs2_required_facts_subsets }}" + when: __gfs2_required_facts | + difference(ansible_facts.keys() | list) | length > 0 + +- name: Determine if system is ostree and set flag + when: not __gfs2_is_ostree is defined + block: + - name: Check if system is ostree + stat: + path: /run/ostree-booted + register: __ostree_booted_stat + + - name: Set flag to indicate system is ostree + set_fact: + __gfs2_is_ostree: "{{ __ostree_booted_stat.stat.exists }}" + +- name: Find environment-specific tasks to enable repositories + ansible.builtin.set_fact: + __gfs2_enable_repo_tasks_file: \ + "{{ __gfs2_enable_repo_tasks_file_candidate }}" + loop: + - "{{ ansible_facts['os_family'] }}.yml" + - "{{ ansible_facts['distribution'] }}.yml" + - >- + {{ ansible_facts['distribution'] ~ '_' ~ + ansible_facts['distribution_major_version'] }}.yml + - >- + {{ ansible_facts['distribution'] ~ '_' ~ + ansible_facts['distribution_version'] }}.yml + vars: + __gfs2_enable_repo_tasks_file_candidate: \ + "{{ role_path }}/tasks/enable-repositories/{{ item }}" + when: + - gfs2_enable_repos | bool + - __gfs2_enable_repo_tasks_file_candidate is file + +- name: Run environment-specific tasks to enable repositories + ansible.builtin.include_tasks: "{{ __gfs2_enable_repo_tasks_file }}" + when: + - gfs2_enable_repos | bool + - __gfs2_enable_repo_tasks_file is defined + +- name: Install packages + ansible.builtin.package: + name: "{{ __gfs2_packages }}" + state: present + use: "{{ (__gfs2_is_ostree | d(false)) | + ternary('ansible.posix.rhel_rpm_ostree', omit) }}" + any_errors_fatal: true diff --git a/tasks/main.yml b/tasks/main.yml new file mode 100644 index 0000000..d2ca895 --- /dev/null +++ b/tasks/main.yml @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: MIT +--- +- name: Check for an active stonith resource + ansible.builtin.command: + argv: + - pcs + - stonith + - status + register: stonith_status + run_once: true + changed_when: false + failed_when: + - stonith_status.rc != 0 + or "Started" not in stonith_status.stdout + when: not _gfs2_test_allow_stonith_disabled + +- name: Install required packages + ansible.builtin.include_tasks: install-packages.yml + +- name: Setup cluster + ansible.builtin.include_tasks: setup-cluster.yml + when: gfs2_file_systems | length > 0 diff --git a/tasks/setup-cluster.yml b/tasks/setup-cluster.yml new file mode 100644 index 0000000..16e887f --- /dev/null +++ b/tasks/setup-cluster.yml @@ -0,0 +1,106 @@ +# SPDX-License-Identifier: MIT +--- +- name: Check the cluster quorum policy + ansible.builtin.command: + argv: + - pcs + - property + - config + - no-quorum-policy + register: no_quorum_policy + changed_when: false + any_errors_fatal: true + +- name: Set the cluster quorum policy to 'freeze' + ansible.builtin.command: + argv: + - pcs + - property + - set + - no-quorum-policy=freeze + when: "'no-quorum-policy: freeze' not in no_quorum_policy.stdout" + run_once: true + changed_when: true + +- name: Check whether the dlm resource exists + ansible.builtin.command: + argv: + - pcs + - resource + - status + - "{{ gfs2_resource_name_dlm }}" + register: dlm_resource_exists + failed_when: false + changed_when: false + +- name: Create dlm resource + ansible.builtin.command: + argv: + - pcs + - resource + - create + - "{{ gfs2_resource_name_dlm }}" + - --group + - "{{ gfs2_group_name_locking }}" + - ocf:pacemaker:controld + - "allow_stonith_disabled={{ _gfs2_test_allow_stonith_disabled }}" + - op + - monitor + - interval=30s + - on-fail=fence + - --wait + when: dlm_resource_exists.rc != 0 + run_once: true + changed_when: true + +- name: Clone locking resource group + ansible.builtin.command: + argv: + - pcs + - resource + - clone + - "{{ gfs2_group_name_locking }}" + - interleave=true + - --wait + register: locking_clone_result + failed_when: + - locking_clone_result.rc != 0 + - "'already been cloned' not in locking_clone_result.stderr" + changed_when: locking_clone_result.rc == 0 + run_once: true + +- name: Check whether the lvmlockd resource exists + ansible.builtin.command: + argv: + - pcs + - resource + - status + - "{{ gfs2_resource_name_lvmlockd }}" + register: lockd_resource_exists + failed_when: false + changed_when: false + +- name: Create lvmlockd resource + ansible.builtin.command: + argv: + - pcs + - resource + - create + - "{{ gfs2_resource_name_lvmlockd }}" + - --group + - "{{ gfs2_group_name_locking }}" + - ocf:heartbeat:lvmlockd + - op + - monitor + - interval=30s + - on-fail=fence + - --wait + when: lockd_resource_exists.rc != 0 + run_once: true + changed_when: true + +- name: Configure file system-specific resources + ansible.builtin.include_tasks: fs.yml + loop: "{{ gfs2_file_systems }}" + loop_control: + loop_var: fs_spec diff --git a/tests/.fmf/version b/tests/.fmf/version new file mode 100644 index 0000000..d00491f --- /dev/null +++ b/tests/.fmf/version @@ -0,0 +1 @@ +1 diff --git a/tests/get_unused_disk.yml b/tests/get_unused_disk.yml new file mode 100644 index 0000000..aa5d04d --- /dev/null +++ b/tests/get_unused_disk.yml @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: MIT +--- +- name: Find unused disks in the system + find_unused_disk: + min_size: "{{ min_size | d(omit) }}" + max_return: "{{ max_return | d(omit) }}" + register: unused + changed_when: false + failed_when: + - ("Unable to find unused disk" in unused.disks) or + (unused.disks | length < disks_needed | d(1)) diff --git a/tests/library/find_unused_disk.py b/tests/library/find_unused_disk.py new file mode 100644 index 0000000..7583367 --- /dev/null +++ b/tests/library/find_unused_disk.py @@ -0,0 +1,225 @@ +#!/usr/bin/python + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: find_unused_disk +short_description: Gets unused disks +description: + - "WARNING: Do not use this module directly! It is only for role internal use." + - Disks are considered in ascending alphanumeric sorted order. + - Disks that meet all conditions are considered 'empty' and returned (using kernel device name) in a list. + - 1. No known signatures exist on the disk, with the exception of partition tables. + - 2. If there is a partition table on the disk, it contains no partitions. + - 3. The disk has no holders to eliminate the possibility of it being a multipath or dmraid member device. + - 4. Device can be opened with exclusive access to make sure no other software is using it. + - If no disks meet all criteria, "Unable to find unused disk" will be returned. + - Number of returned disks defaults to first 10, but can be specified with 'max_return' argument. +author: Eda Zhou (@edamamez) +options: + max_return: + description: Sets the maximum number of unused disks to return. + default: 10 + type: int + + min_size: + description: Specifies the minimum disk size to return an unused disk. + default: '0' + type: str + + with_interface: + description: Specifies which disk interface will be accepted (scsi, virtio, nvme). + default: null + type: str +""" + +EXAMPLES = """ +- name: test finding first unused device module + hosts: localhost + tasks: + - name: run module + find_unused_disk: + min_size: '10g' + register: testout + + - name: dump test output + debug: + msg: '{{ testout }}' +""" + +RETURN = """ +disk_name: + description: Information about unused disks + returned: On success + type: complex + contains: + disks: + description: Unused disk(s) that have been found + returned: On success + type: list + samples: | + ["sda1", "dm-0", "dm-3"] + none: + description: No unused disks were found + returned: On success + type: string + sample: "Unable to find unused disk" +""" + + +import os +import re + +# pylint: disable=E0401, E0611 +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.size import Size + + +SYS_CLASS_BLOCK = "/sys/class/block/" +IGNORED_DEVICES = [re.compile(r"^/dev/nullb\d+$")] + + +def is_ignored(disk_path): + sys_path = os.path.realpath(disk_path) + return any(ignore.match(sys_path) is not None for ignore in IGNORED_DEVICES) + + +def is_device_interface(module, path, interface): + device = path.split("dev/")[-1] + # command checks if the device uses given interface (virtio, scsi or nvme) + result = module.run_command( + [ + "readlink", + "/sys/block/%s/device/device/driver" % device, + "/sys/block/%s/device/driver" % device, + ] + ) + return interface in result[1] + + +def no_signature(run_command, disk_path): + """Return true if no known signatures exist on the disk.""" + signatures = run_command(["blkid", "-p", disk_path]) + return "UUID" not in signatures[1] + + +def no_holders(disk_path): + """Return true if the disk has no holders.""" + holders = os.listdir(SYS_CLASS_BLOCK + get_sys_name(disk_path) + "/holders/") + return len(holders) == 0 + + +def can_open(disk_path): + """Return true if the device can be opened with exclusive access.""" + try: + os.open(disk_path, os.O_EXCL) + return True + except OSError: + return False + + +def get_sys_name(disk_path): + if not os.path.islink(disk_path): + return os.path.basename(disk_path) + + node_dir = "/".join(disk_path.split("/")[-1]) + return os.path.normpath(node_dir + "/" + os.readlink(disk_path)) + + +def get_partitions(disk_path): + sys_name = get_sys_name(disk_path) + partitions = list() + for filename in os.listdir(SYS_CLASS_BLOCK + sys_name): + if re.match(sys_name + r"p?\d+$", filename): + partitions.append(filename) + + return partitions + + +def get_disks(module): + buf = module.run_command( + ["lsblk", "-p", "--pairs", "--bytes", "-o", "NAME,TYPE,SIZE,FSTYPE"] + )[1] + disks = dict() + for line in buf.splitlines(): + if not line: + continue + + m = re.search( + r'NAME="(?P[^"]*)" TYPE="(?P[^"]*)" SIZE="(?P\d+)" FSTYPE="(?P[^"]*)"', + line, + ) + if m is None: + module.log(line) + continue + + if m.group("type") != "disk": + continue + + disks[m.group("path")] = { + "type": m.group("type"), + "size": m.group("size"), + "fstype": m.group("fstype"), + } + + return disks + + +def run_module(): + """Create the module""" + module_args = dict( + max_return=dict(type="int", required=False, default=10), + min_size=dict(type="str", required=False, default="0"), + with_interface=dict(type="str", required=False, default=None), + ) + + result = dict(changed=False, disks=[]) + + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + for path, attrs in get_disks(module).items(): + if is_ignored(path): + continue + + interface = module.params["with_interface"] + + if interface is not None and not is_device_interface(module, path, interface): + continue + + if attrs["fstype"]: + continue + + if Size(attrs["size"]).bytes < Size(module.params["min_size"]).bytes: + continue + + if get_partitions(path): + continue + + if not no_holders(get_sys_name(path)): + continue + + if not can_open(path): + continue + + result["disks"].append(os.path.basename(path)) + if len(result["disks"]) >= module.params["max_return"]: + break + + if not result["disks"]: + result["disks"] = "Unable to find unused disk" + else: + result["disks"].sort() + + module.exit_json(**result) + + +def main(): + """Execute the module""" + run_module() + + +if __name__ == "__main__": + main() diff --git a/tests/module_utils/size.py b/tests/module_utils/size.py new file mode 100644 index 0000000..6b7f379 --- /dev/null +++ b/tests/module_utils/size.py @@ -0,0 +1,166 @@ +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re + +DECIMAL_FACTOR = 10**3 +BINARY_FACTOR = 2**10 + +# index of the item in the list determines the exponent for size computation +# e.g. size_in_bytes = value * (DECIMAL_FACTOR ** (index(mega)+1)) = value * (1000 ** (1+1)) +# pylint: disable=bad-whitespace +PREFIXES_DECIMAL = [ + ["k", "M", "G", "T", "P", "E", "Z", "Y"], # noqa: E241 + ["kilo", "mega", "giga", "tera", "peta", "exa", "zetta", "yotta"], +] # noqa: E241 +PREFIXES_BINARY = [ + ["Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"], # noqa: E221,E241 + ["kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"], +] # noqa: E241 +SUFFIXES = ["bytes", "byte", "B"] + + +class Size(object): + """Class for basic manipulation of the sizes in *bytes""" + + def __init__(self, value): + raw_number, raw_units = self._parse_input(str(value)) + self.factor, self.exponent = self._parse_units(raw_units) + self.number = self._parse_number(raw_number) + + self.units = raw_units + + def _parse_input(self, value): + """splits input string into number and unit parts + returns number part, unit part + """ + m = re.search("^(.*?)([^0-9]*)$", value) + + raw_number = m.group(1).strip() + if raw_number == "": + raise ValueError("The string '%s' does not contain size" % value) + + raw_units = m.group(2).strip() + + return raw_number, raw_units + + def _parse_units(self, raw_units): + """ + gets string containing size units and + returns *_FACTOR (BINARY or DECIMAL) and the prefix position (not index!) + in the PREFIXES_* list + If no unit is specified defaults to BINARY and Bytes + """ + + prefix = raw_units + no_suffix_flag = True + valid_suffix = False + + # get rid of possible units suffix ('bytes', 'b' or 'B') + for suffix in SUFFIXES: + if raw_units.lower().endswith(suffix.lower()): + no_suffix_flag = False + prefix = raw_units[: -len(suffix)] + break + + if prefix == "": + # no unit was specified, use default + return BINARY_FACTOR, 0 + + # check the list for units + idx = -1 + + for lst in PREFIXES_DECIMAL: + lower_lst = [x.lower() for x in lst] + if prefix.lower() in lower_lst: + valid_suffix = True + idx = lower_lst.index(prefix.lower()) + used_factor = DECIMAL_FACTOR + break + + if idx < 0 or no_suffix_flag: + if no_suffix_flag: + used_factor = BINARY_FACTOR + + for lst in PREFIXES_BINARY: + lower_lst = [x.lower() for x in lst] + if prefix.lower() in lower_lst: + valid_suffix = True + idx = lower_lst.index(prefix.lower()) + used_factor = BINARY_FACTOR + break + + if idx < 0 or not valid_suffix: + raise ValueError("Unable to identify unit '%s'" % raw_units) + + return used_factor, idx + 1 + + def _parse_number(self, raw_number): + """parse input string containing number + return float + """ + return float(raw_number) + + def _get_unit(self, factor, exponent, unit_type=0): + """based on decimal or binary factor and exponent + obtain and return correct unit + """ + + if unit_type == 0: + suffix = "B" + else: + suffix = "bytes" + + if exponent == 0: + return suffix + + if factor == DECIMAL_FACTOR: + prefix_lst = PREFIXES_DECIMAL[unit_type] + else: + prefix_lst = PREFIXES_BINARY[unit_type] + return prefix_lst[exponent - 1] + suffix + + @property + def bytes(self): + """returns size value in bytes as int""" + return int((self.factor**self.exponent) * self.number) + + def _format(self, format_str, factor, exponent): + + result = format_str + result = result.replace(r"%sb", self._get_unit(factor, exponent, 0)) + result = result.replace(r"%lb", self._get_unit(factor, exponent, 1)) + + return result + + def get(self, units="autobin", fmt="%0.1f %sb"): + """returns size value as a string with given units and format + + "units" parameter allows to select preferred unit: + for example "KiB" or "megabytes" + accepted values are also: + "autobin" (default) - uses the highest human readable unit (binary) + "autodec" - uses the highest human readable unit (decimal) + + "fmt" parameter allows to specify the output format: + %sb - will be replaced with the short byte size unit (e.g. MiB) + %lb - will be replaced with the long byte size unit (e.g. kilobytes) + value can be formatted using standard string replacements (e.g. %d, %f) + + """ + + ftr = BINARY_FACTOR + if units == "autodec": + ftr = DECIMAL_FACTOR + if units in ("autobin", "autodec"): + exp = 0 + value = float(self.bytes) + while value + 0.01 > ftr: # + 0.01 to balance the float comparison + value /= ftr + exp += 1 + else: + ftr, exp = self._parse_units(units.strip()) + value = (float(self.factor**self.exponent) / float(ftr**exp)) * self.number + + return self._format(fmt, ftr, exp) % value diff --git a/tests/provision.fmf b/tests/provision.fmf new file mode 100644 index 0000000..0c865f2 --- /dev/null +++ b/tests/provision.fmf @@ -0,0 +1,7 @@ +standard-inventory-qcow2: + qemu: + m: 2048 + drive: + - size: 5368709120 + - size: 5368709120 + - size: 5368709120 diff --git a/tests/roles/linux-system-roles.gfs2/defaults b/tests/roles/linux-system-roles.gfs2/defaults new file mode 120000 index 0000000..30459c3 --- /dev/null +++ b/tests/roles/linux-system-roles.gfs2/defaults @@ -0,0 +1 @@ +../../../defaults \ No newline at end of file diff --git a/tests/roles/linux-system-roles.gfs2/meta b/tests/roles/linux-system-roles.gfs2/meta new file mode 120000 index 0000000..8df72eb --- /dev/null +++ b/tests/roles/linux-system-roles.gfs2/meta @@ -0,0 +1 @@ +../../../meta \ No newline at end of file diff --git a/tests/roles/linux-system-roles.gfs2/tasks b/tests/roles/linux-system-roles.gfs2/tasks new file mode 120000 index 0000000..d97297b --- /dev/null +++ b/tests/roles/linux-system-roles.gfs2/tasks @@ -0,0 +1 @@ +../../../tasks \ No newline at end of file diff --git a/tests/roles/linux-system-roles.gfs2/vars b/tests/roles/linux-system-roles.gfs2/vars new file mode 120000 index 0000000..663079d --- /dev/null +++ b/tests/roles/linux-system-roles.gfs2/vars @@ -0,0 +1 @@ +../../../vars \ No newline at end of file diff --git a/tests/tests_basic_gfs2_cluster.yml b/tests/tests_basic_gfs2_cluster.yml new file mode 100644 index 0000000..dc90fe9 --- /dev/null +++ b/tests/tests_basic_gfs2_cluster.yml @@ -0,0 +1,122 @@ +# SPDX-License-Identifier: MIT +--- +- name: Basic usability test + hosts: all + become: true + vars: + common_cluster_name: testcluster + mount_point: /mnt/test + tasks: + - name: Skip this test if test system does not support + meta: end_host + when: lookup("env", "SYSTEM_ROLES_GFS2_CLUSTER_TEST") != "true" + + - name: Run the test + block: + # Make the host alias we specified in LSR_QEMU_IMAGE_ALIAS (tox.ini) + # resolve + - name: Update /etc/hosts + ansible.builtin.lineinfile: + path: /etc/hosts + line: 127.0.0.1 testnode + + - name: Get unused disks + ansible.builtin.include_tasks: get_unused_disk.yml + vars: + min_size: "5g" + max_return: 1 + disks_needed: 1 + + - name: Create cluster + ansible.builtin.include_role: + name: fedora.linux_system_roles.ha_cluster + vars: + ha_cluster_enable_repos: false + ha_cluster_cluster_name: "{{ common_cluster_name }}" + ha_cluster_hacluster_password: insecure + ha_cluster_extra_packages: + - dlm + - lvm2-lockd + ha_cluster_cluster_properties: + - attrs: + # Don't do this in production + - name: stonith-enabled + value: "false" + ha_cluster_resource_primitives: + - id: dlm + agent: "ocf:pacemaker:controld" + instance_attrs: + - attrs: + # Don't do this in production + - name: allow_stonith_disabled + value: "true" + - id: lvmlockd + agent: "ocf:heartbeat:lvmlockd" + ha_cluster_resource_groups: + - id: locking + resource_ids: + - dlm + - lvmlockd + + - name: Create gfs2 filesystem and cluster resources + ansible.builtin.include_role: + name: linux-system-roles.gfs2 + vars: + # noqa var-naming[no-role-prefix] + _gfs2_test_allow_stonith_disabled: true + gfs2_cluster_name: "{{ common_cluster_name }}" + gfs2_file_systems: + - name: fs1 + pvs: + - "{{ unused.disks[0] }}" + vg: vg_gfs2_1 + lv: lv_gfs2_1 + lv_size: 4G + mount_point: "{{ mount_point }}" + + - name: Create a file on the gfs2 filesystem + ansible.builtin.file: + path: "{{ mount_point }}/boop" + state: touch + mode: "0644" + + - name: Run stat + ansible.builtin.command: + argv: + - stat + - -f + - "{{ mount_point }}/boop" + register: stat_result + failed_when: "'gfs/gfs2' not in stat_result.stdout" + changed_when: true + + - name: Re-run the role to test idempotency + ansible.builtin.include_role: + name: linux-system-roles.gfs2 + vars: + # noqa var-naming[no-role-prefix] + _gfs2_test_allow_stonith_disabled: true + gfs2_cluster_name: "{{ common_cluster_name }}" + gfs2_file_systems: + - name: fs1 + pvs: + - "{{ unused.disks[0] }}" + vg: vg_gfs2_1 + lv: lv_gfs2_1 + lv_size: 4G + mount_point: "{{ mount_point }}" + + always: + - name: Remove cluster + ansible.builtin.include_role: + name: fedora.linux_system_roles.ha_cluster + vars: + ha_cluster_enable_repos: false + ha_cluster_cluster_name: "{{ common_cluster_name }}" + ha_cluster_cluster_present: false + + - name: Update /etc/hosts + ansible.builtin.lineinfile: + path: /etc/hosts + line: 127.0.0.1 testnode + state: absent diff --git a/tests/tests_default.yml b/tests/tests_default.yml new file mode 100644 index 0000000..5f59958 --- /dev/null +++ b/tests/tests_default.yml @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: MIT +--- +- name: Basic usability test + hosts: all + gather_facts: false + vars: + gfs2_cluster_name: gfs2_test_cluster + gfs2_file_systems: [] + _gfs2_test_allow_stonith_disabled: true + roles: + - linux-system-roles.gfs2 diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..69609ae --- /dev/null +++ b/tox.ini @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: MIT +[lsr_config] +lsr_enable = true + +[testenv] +setenv = + RUN_PYTEST_SETUP_MODULE_UTILS = true + RUN_PYLINT_SETUP_MODULE_UTILS = true + # see tests_basic_gfs2_cluster.yml + LSR_QEMU_IMAGE_ALIAS = testnode + SYSTEM_ROLES_GFS2_CLUSTER_TEST = true diff --git a/vars/RedHat_8.yml b/vars/RedHat_8.yml new file mode 100644 index 0000000..8816624 --- /dev/null +++ b/vars/RedHat_8.yml @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MIT +--- +__gfs2_repos: + - id: rhel-8-for-{{ ansible_architecture }}-resilientstorage-rpms + name: Resilient Storage diff --git a/vars/RedHat_9.yml b/vars/RedHat_9.yml new file mode 100644 index 0000000..c638b11 --- /dev/null +++ b/vars/RedHat_9.yml @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MIT +--- +__gfs2_repos: + - id: rhel-9-for-{{ ansible_architecture }}-resilientstorage-rpms + name: Resilient Storage diff --git a/vars/main.yml b/vars/main.yml new file mode 100644 index 0000000..3801fc9 --- /dev/null +++ b/vars/main.yml @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: MIT +--- +__gfs2_repos: [] +__gfs2_packages: + - dlm + - lvm2-lockd + - gfs2-utils +# ansible_facts required by the role +__gfs2_required_facts: + - distribution + - distribution_major_version + - distribution_version + - os_family +# the subsets of ansible_facts that need to be gathered in case any of the +# facts in required_facts is missing; see the documentation of +# the 'gather_subset' parameter of the 'setup' module +__gfs2_required_facts_subsets: "{{ ['!all', '!min'] + + __gfs2_required_facts }}"