Skip to content

Commit

Permalink
Fix issue with PITR (#584)
Browse files Browse the repository at this point in the history
if the PITR performed by the maybe_pg_upgrade.py script and not followed by the major upgrade the archive_mode was set to off.
  • Loading branch information
CyberDem0n authored May 19, 2021
1 parent 0feeb7d commit 4454a37
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 8 deletions.
11 changes: 8 additions & 3 deletions postgres-appliance/bootstrap/maybe_pg_upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,15 @@ def wait_end_of_recovery(postgresql):
logger.info('waiting for end of recovery of the old cluster')


def perform_pitr(postgresql, cluster_version, config):
def perform_pitr(postgresql, cluster_version, bin_version, config):
logger.info('Trying to perform point-in-time recovery')

config[config['method']]['command'] = 'true'
try:
if not postgresql.start_old_cluster(config, cluster_version):
if bin_version == cluster_version:
if not postgresql.bootstrap.bootstrap(config):
raise Exception('Point-in-time recovery failed')
elif not postgresql.start_old_cluster(config, cluster_version):
raise Exception('Failed to start the cluster with old postgres')
return wait_end_of_recovery(postgresql)
except Exception:
Expand Down Expand Up @@ -79,7 +84,7 @@ def main():
logger.info('Cluster version: %s, bin version: %s', cluster_version, bin_version)
assert float(cluster_version) <= float(bin_version)

perform_pitr(upgrade, cluster_version, config['bootstrap'])
perform_pitr(upgrade, cluster_version, bin_version, config['bootstrap'])

if cluster_version == bin_version:
return 0
Expand Down
6 changes: 1 addition & 5 deletions postgres-appliance/major_upgrade/pg_upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,12 @@ def restore_shared_preload_libraries(self):
def start_old_cluster(self, config, version):
self.set_bin_dir(version)

version = float(version)

config[config['method']]['command'] = 'true'

# make sure we don't archive wals from the old version
self._old_config_values = {'archive_mode': self.config.get('parameters').get('archive_mode')}
self.config.get('parameters')['archive_mode'] = 'off'

# and don't load shared_preload_libraries which don't exist in the old version
self.adjust_shared_preload_libraries(version)
self.adjust_shared_preload_libraries(float(version))

return self.bootstrap.bootstrap(config)

Expand Down
9 changes: 9 additions & 0 deletions postgres-appliance/tests/test_spilo.sh
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,11 @@ function verify_clone_with_wale_upgrade_to_13() {
wait_query "$1" "SELECT current_setting('server_version_num')::int/10000" 13 2> /dev/null
}

function verify_archive_mode_is_on() {
archive_mode=$(docker_exec "$1" "psql -U postgres -tAc \"SHOW archive_mode\"")
[ "$archive_mode" = "on" ]
}

function run_test() {
"$@" || log_error "Test case $1 FAILED"
echo -e "Test case $1 ${GREEN}PASSED${RESET}"
Expand All @@ -283,6 +288,7 @@ function test_spilo() {
# run_test test_failed_inplace_upgrade_big_replication_lag "$container"

wait_zero_lag "$container"
run_test verify_archive_mode_is_on "$container"
wait_backup "$container"

local upgrade_container
Expand All @@ -302,6 +308,7 @@ function test_spilo() {

run_test verify_clone_with_wale_upgrade_to_13 "$upgrade_container"

run_test verify_archive_mode_is_on "$upgrade_container"
wait_backup "$upgrade_container"
docker rm -f "$upgrade_container"

Expand All @@ -324,6 +331,7 @@ function test_spilo() {
run_test test_envdir_updated_to_x 12

find_leader "$clone13_container"
run_test verify_archive_mode_is_on "$clone13_container"

wait_backup "$container"

Expand Down Expand Up @@ -355,6 +363,7 @@ function test_spilo() {
run_test verify_clone_with_wale_upgrade "$upgrade_replica_container"

run_test verify_clone_with_basebackup_upgrade "$basebackup_container"
run_test verify_archive_mode_is_on "$basebackup_container"
}

function main() {
Expand Down

0 comments on commit 4454a37

Please sign in to comment.