Skip to content

Commit

Permalink
feat: PV resize support
Browse files Browse the repository at this point in the history
There is an usecase when the physical device size can change (e.g. on
VM). We need to be able to change the size of the LVM PV to accomodate
that. This adds a new pool parameter 'grow_to_fill'. When set, pool PVs
will try to take all available space on their respective devices.
Defaults to False.

Requires blivet version that supports this feature.
For tests this is checked by using 'does_library_support' script.
  • Loading branch information
japokorn committed Jun 3, 2024
1 parent a9c84e5 commit 419170a
Show file tree
Hide file tree
Showing 6 changed files with 173 additions and 2 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,11 @@ keys:
This specifies the type of pool to manage.
Valid values for `type`: `lvm`.

- `grow_to_fill`

When set, the pool Physical Volumes will be resized to match their respective device sizes.
(e.g. after Virtual Machine disk size increase)

- `shared`

If set to `true`, the role creates or manages a shared volume group. Requires lvmlockd and
Expand Down
1 change: 1 addition & 0 deletions defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ storage_pool_defaults:
type: lvm
disks: []
volumes: []
grow_to_fill: false

encryption: false
encryption_password: null
Expand Down
26 changes: 24 additions & 2 deletions library/blivet.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@
encryption_password:
description: encryption_password
type: str
grow_to_fill:
description: grow_to_fill
type: bool
name:
description: name
type: str
Expand Down Expand Up @@ -370,7 +373,7 @@
from blivet3.callbacks import callbacks
from blivet3 import devicelibs
from blivet3 import devices
from blivet3.deviceaction import ActionConfigureFormat, ActionAddMember, ActionRemoveMember
from blivet3.deviceaction import ActionConfigureFormat, ActionResizeFormat, ActionAddMember, ActionRemoveMember
from blivet3.devicefactory import DEFAULT_THPOOL_RESERVE
from blivet3.flags import flags as blivet_flags
from blivet3.formats import fslib, get_format
Expand All @@ -386,7 +389,7 @@
from blivet.callbacks import callbacks
from blivet import devicelibs
from blivet import devices
from blivet.deviceaction import ActionConfigureFormat, ActionAddMember, ActionRemoveMember
from blivet.deviceaction import ActionConfigureFormat, ActionResizeFormat, ActionAddMember, ActionRemoveMember
from blivet.devicefactory import DEFAULT_THPOOL_RESERVE
from blivet.flags import flags as blivet_flags
from blivet.formats import fslib, get_format
Expand All @@ -412,6 +415,7 @@ def __getattr__(self, val):
blivet_flags.allow_online_fs_resize = True
blivet_flags.gfs2 = True
set_up_logging()

log = logging.getLogger(BLIVET_PACKAGE + ".ansible")

# XXX add support for LVM RAID raid0 level
Expand Down Expand Up @@ -1752,6 +1756,22 @@ def _manage_members(self):
add_disks = [d for d in self._disks if d not in self._device.ancestors]
remove_disks = [pv for pv in self._device.pvs if not any(d in pv.ancestors for d in self._disks)]

if self._pool['grow_to_fill']:
grow_pv_candidates = [pv for pv in self._device.pvs if pv not in remove_disks and pv not in add_disks]

for pv in grow_pv_candidates:
if abs(self._device.size - self._device.current_size) < 2 * self._device.pe_size:
continue

pv.format.update_size_info() # set pv to be resizable

if pv.format.resizable:
pv.grow_to_fill = True
ac = ActionResizeFormat(pv, self._device.size)
self._blivet.devicetree.actions.add(ac)
else:
log.warning("cannot grow/resize PV '%s', format is not resizable", pv.name)

if not (add_disks or remove_disks):
return

Expand Down Expand Up @@ -2140,6 +2160,7 @@ def run_module():
encryption_key_size=dict(type='int'),
encryption_luks_version=dict(type='str'),
encryption_password=dict(type='str', no_log=True),
grow_to_fill=dict(type='bool'),
name=dict(type='str'),
raid_level=dict(type='str'),
raid_device_count=dict(type='int'),
Expand Down Expand Up @@ -2284,6 +2305,7 @@ def action_dict(action):
# execute the scheduled actions, committing changes to disk
callbacks.action_executed.add(record_action)
callbacks.action_executed.add(ensure_udev_update)

try:
b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode)
except Exception as e:
Expand Down
19 changes: 19 additions & 0 deletions tests/test-verify-pool-members.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,25 @@
loop_var: pv
when: storage_test_pool.type == 'lvm'

- name: Check that blivet supports PV grow to fill
ansible.builtin.script: >-
scripts/does_library_support.py
blivet.formats.lvmpv.LVMPhysicalVolume.grow_to_fill
args:
executable: "{{ ansible_python.executable }}"
register: grow_supported
changed_when: false

- name: Verify that PVs fill the whole devices when they should
include_tasks: verify-pool-member-pvsize.yml
loop: "{{ _storage_test_pool_pvs | default([]) }}"
loop_control:
loop_var: st_pool_pv
when:
- grow_supported.stdout | trim == 'True'
- storage_test_pool.type == "lvm"
- storage_test_pool.grow_to_fill | bool

- name: Check MD RAID
include_tasks: verify-pool-md.yml

Expand Down
100 changes: 100 additions & 0 deletions tests/tests_lvm_pool_pv_grow.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
---
- name: Test create disk and remove
hosts: all
become: true
vars:
storage_safe_mode: false
mount_location1: '/opt/test1'
mount_location2: '/opt/test2'
pv_size: '8g'
volume1_size: '2g'
volume2_size: '3g'
tags:
- tests::lvm

tasks:
- name: Run the role
include_role:
name: linux-system-roles.storage

- name: Mark tasks to be skipped
set_fact:
storage_skip_checks:
- blivet_available
- service_facts
- "{{ (lookup('env',
'SYSTEM_ROLES_REMOVE_CLOUD_INIT') in ['', 'false']) |
ternary('packages_installed', '') }}"

- name: Get unused disks
include_tasks: get_unused_disk.yml
vars:
max_return: 1
min_size: "10g"

- name: Create PV with a space to grow
command: "pvcreate --setphysicalvolumesize {{ pv_size }} /dev/{{ unused_disks[0] }}"
register: pvcreate_output
changed_when: pvcreate_output.rc != 0

# VG has to be present, the role otherwise automatically reformats empty PV,
# taking all available space
- name: Create VG
command: "vgcreate foo /dev/{{ unused_disks[0] }}"
register: vgcreate_output
changed_when: vgcreate_output.rc != 0

- name: Create LVM
include_role:
name: linux-system-roles.storage
vars:
storage_pools:
- name: foo
disks: "{{ unused_disks }}"
grow_to_fill: true
state: present
volumes:
- name: test1
size: "{{ volume1_size }}"
mount_point: "{{ mount_location1 }}"
- name: test2
size: "{{ volume2_size }}"
mount_point: "{{ mount_location2 }}"

- name: Verify role results
include_tasks: verify-role-results.yml

- name: Rerun the task to verify idempotence
include_role:
name: linux-system-roles.storage
vars:
storage_pools:
- name: foo
disks: "{{ unused_disks }}"
grow_to_fill: true
state: present
volumes:
- name: test1
size: "{{ volume1_size }}"
mount_point: "{{ mount_location1 }}"
- name: test2
size: "{{ volume2_size }}"
mount_point: "{{ mount_location2 }}"

- name: Verify role results
include_tasks: verify-role-results.yml

- name: Remove 'foo' pool created above
include_role:
name: linux-system-roles.storage
vars:
storage_pools:
- name: foo
disks: "{{ unused_disks }}"
state: "absent"
volumes:
- name: test1
- name: test2

- name: Verify role results
include_tasks: verify-role-results.yml
24 changes: 24 additions & 0 deletions tests/verify-pool-member-pvsize.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
---
- name: Get actual PV size
command: "pvs --noheadings --nosuffix --units b -o SIZE {{ st_pool_pv }}"
register: actual_pv_size
changed_when: false

- name: Convert blkinfo size to bytes
bsize:
size: "{{ storage_test_blkinfo.info[st_pool_pv]['size'] }}"
register: dev_size

- name: Verify each PV size
assert:
that: (dev_size.bytes - actual_pv_size.stdout | int) |
abs / actual_pv_size.stdout | int < 0.04
msg: >-
PV resize failure; size difference too big
(device size: {{ dev_size.bytes }})
(actual PV size: {{ actual_pv_size.stdout }})
- name: Clean up test variables
set_fact:
actual_pv_size: null
dev_size: null

0 comments on commit 419170a

Please sign in to comment.