Skip to content

Commit

Permalink
Ansible deployment state (#17653)
Browse files Browse the repository at this point in the history
New status flag in the base class to record if Ansible has been executed or
not. Use the flag to only to call Ansible deregister when needed.
mr_test schedule to use qesap_cleanup in place of peering_destroy
Both peering_destroy and qesap_cleanup are doing exactly the same from
the point of view of the network peering to the IBSm.
qesap_cleanup has cluster destruction after that.
  • Loading branch information
mpagot authored Sep 25, 2023
1 parent fc19dde commit 7c02f5f
Show file tree
Hide file tree
Showing 20 changed files with 191 additions and 128 deletions.
6 changes: 4 additions & 2 deletions lib/mr_test_lib.pm
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,10 @@ sub load_mr_tests {
# [debug] ||| finished ssh_interactive_end publiccloud
# [debug] ||| starting 1_saptune_notes .../mr_test_run.pm
# [debug] ||| finished 1_saptune_notes lib
loadtest_mr_test('tests/publiccloud/ssh_interactive_end', run_args => $args) if get_var('PUBLIC_CLOUD_SLES4SAP');
loadtest_mr_test('tests/sles4sap/publiccloud/peering_destroy', run_args => $args) if get_var('IS_MAINTENANCE');
if (get_var('PUBLIC_CLOUD_SLES4SAP')) {
loadtest_mr_test('tests/publiccloud/ssh_interactive_end', run_args => $args);
loadtest_mr_test('tests/sles4sap/publiccloud/qesap_cleanup', run_args => $args);
}
}

1;
56 changes: 56 additions & 0 deletions lib/sles4sap_publiccloud.pm
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ our @EXPORT = qw(
create_instance_data
deployment_name
delete_network_peering
create_playbook_section_list
create_hana_vars_section
);

=head2 run_cmd
Expand Down Expand Up @@ -608,4 +610,58 @@ sub delete_network_peering {
}
}
=head2 create_ansible_playbook_list
Detects HANA/HA scenario from openQA variables and returns a list of ansible playbooks to include
in the "ansible: create:" section of config.yaml file.
=cut
sub create_playbook_section_list {
my ($ha_enabled) = @_;
my @playbook_list;
# Add registration module as first element - "QESAP_SCC_NO_REGISTER" skips scc registration via ansible
push @playbook_list, 'registration.yaml -e reg_code=' . get_required_var('SCC_REGCODE_SLES4SAP') . " -e email_address=''"
unless (get_var('QESAP_SCC_NO_REGISTER'));
# SLES4SAP/HA related playbooks
if ($ha_enabled) {
push @playbook_list, 'pre-cluster.yaml', 'sap-hana-preconfigure.yaml -e use_sapconf=' . get_required_var('USE_SAPCONF');
push @playbook_list, 'cluster_sbd_prep.yaml' if (check_var('FENCING_MECHANISM', 'sbd'));
push @playbook_list, qw(
sap-hana-storage.yaml
sap-hana-download-media.yaml
sap-hana-install.yaml
sap-hana-system-replication.yaml
sap-hana-system-replication-hooks.yaml
sap-hana-cluster.yaml
);
}
return (\@playbook_list);
}
=head2 create_hana_vars_section
Detects HANA/HA scenario from openQA variables and creates "terraform: variables:" section in config.yaml file.
=cut
sub create_hana_vars_section {
my ($ha_enabled) = @_;
# Cluster related setup
my %hana_vars;
if ($ha_enabled == 1) {
$hana_vars{sap_hana_install_software_directory} = get_required_var('HANA_MEDIA');
$hana_vars{sap_hana_install_master_password} = get_required_var('_HANA_MASTER_PW');
$hana_vars{sap_hana_install_sid} = get_required_var('INSTANCE_SID');
$hana_vars{sap_hana_install_instance_number} = get_required_var('INSTANCE_ID');
$hana_vars{sap_domain} = get_var('SAP_DOMAIN', 'qesap.example.com');
$hana_vars{primary_site} = get_var('HANA_PRIMARY_SITE', 'site_a');
$hana_vars{secondary_site} = get_var('HANA_SECONDARY_SITE', 'site_b');
set_var('SAP_SIDADM', lc(get_var('INSTANCE_SID') . 'adm'));
}
return (\%hana_vars);
}
1;
28 changes: 23 additions & 5 deletions lib/sles4sap_publiccloud_basetest.pm
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use qesapdeployment;
use sles4sap_publiccloud;
use publiccloud::utils;

our @EXPORT = qw(cleanup);
our @EXPORT = qw(cleanup import_context);


sub cleanup {
Expand All @@ -26,17 +26,21 @@ sub cleanup {
record_info('Cleanup',
join(' ',
'cleanup_called:', $self->{cleanup_called} // 'undefined',
'network_peering_present:', $self->{network_peering_present} // 'undefined'));
'network_peering_present:', $self->{network_peering_present} // 'undefined',
'ansible_present:', $self->{ansible_present} // 'undefined'));
# Do not run destroy if already executed
return if ($self->{cleanup_called});
$self->{cleanup_called} = 1;

qesap_upload_logs();
delete_network_peering() if ($self->{network_peering_present});
if ($self->{network_peering_present}) {
delete_network_peering();
$self->{network_peering_present} = 0;
}

my @cmd_list;
# Only run the Ansible deregister if the inventory is present
push(@cmd_list, 'ansible') if (!script_run 'test -f ' . qesap_get_inventory(provider => get_required_var('PUBLIC_CLOUD_PROVIDER')));
# Only run the Ansible deregister if Ansible has been executed
push(@cmd_list, 'ansible') if ($self->{ansible_present});

# Terraform destroy can be executed in any case
push(@cmd_list, 'terraform');
Expand All @@ -49,6 +53,7 @@ sub cleanup {
if ($cleanup_cmd_rc[0] == 0) {
diag(ucfirst($command) . " cleanup attempt # $_ PASSED.");
record_info("Clean $command", ucfirst($command) . ' cleanup PASSED.');
$self->{ansible_present} = 0 if ($command eq 'ansible');
last;
}
else {
Expand All @@ -65,6 +70,19 @@ sub cleanup {
record_info('Cleanup finished');
}

sub import_context {
my ($self, $run_args) = @_;
$self->{instances} = $run_args->{instances};
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});
$self->{ansible_present} = 1 if ($run_args->{ansible_present});
record_info('CONTEXT LOG', join(' ',
'cleanup_called:', $self->{cleanup_called} // 'undefined',
'instances:', $self->{instances} // 'undefined',
'network_peering_present:', $self->{network_peering_present} // 'undefined',
'ansible_present:', $self->{ansible_present} // 'undefined')
);
}

sub post_fail_hook {
my ($self) = @_;
if (get_var('QESAP_NO_CLEANUP_ON_FAILURE')) {
Expand Down
6 changes: 2 additions & 4 deletions tests/sles4sap/publiccloud/add_server_to_hosts.pm
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,9 @@ sub test_flags {

sub run {
my ($self, $run_args) = @_;
my $instances = $run_args->{instances};
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});
record_info('CONTEXT LOG', "instances:$instances network_peering_present:$self->{network_peering_present}");
$self->import_context($run_args);

foreach my $instance (@{$instances}) {
foreach my $instance (@{$self->{instances}}) {
next if ($instance->{'instance_id'} !~ m/vmhana/);
record_info("$instance");

Expand Down
3 changes: 1 addition & 2 deletions tests/sles4sap/publiccloud/check_ibsm_embargoed.pm
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,8 @@ sub test_flags {

sub run() {
my ($self, $run_args) = @_;
$self->import_context($run_args);
my $instance = $run_args->{my_instance};
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});
record_info('CONTEXT LOG', "instance:$instance network_peering_present:$self->{network_peering_present}");

my @repos = split(/,/, get_var('INCIDENT_REPO'));
my $count = 0;
Expand Down
11 changes: 4 additions & 7 deletions tests/sles4sap/publiccloud/cluster_add_repos.pm
Original file line number Diff line number Diff line change
Expand Up @@ -13,27 +13,24 @@ sub test_flags {
return {fatal => 1, publiccloud_multi_module => 1};
}

sub run() {
sub run {
my ($self, $run_args) = @_;
my $instance = $run_args->{my_instance};
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});
record_info('CONTEXT LOG', "instance:$instance network_peering_present:$self->{network_peering_present}");
$self->import_context($run_args);

set_var('MAINT_TEST_REPO', get_var('INCIDENT_REPO')) if get_var('INCIDENT_REPO');
my $prov = get_required_var('PUBLIC_CLOUD_PROVIDER');
my @repos = split(/,/, get_var('MAINT_TEST_REPO'));
my $count = 0;

while (defined(my $maintrepo = shift @repos)) {
next if $maintrepo =~ /^\s*$/;
foreach my $instance (@{$run_args->{instances}}) {
foreach my $instance (@{$self->{instances}}) {
next if ($instance->{'instance_id'} !~ m/vmhana/);
$instance->run_ssh_command(cmd => "sudo zypper --no-gpg-checks ar -f -n TEST_$count $maintrepo TEST_$count",
username => 'cloudadmin');
}
$count++;
}
foreach my $instance (@{$run_args->{instances}}) {
foreach my $instance (@{$self->{instances}}) {
next if ($instance->{'instance_id'} !~ m/vmhana/);
$instance->run_ssh_command(cmd => 'sudo zypper -n ref', username => 'cloudadmin', timeout => 1500);
}
Expand Down
8 changes: 4 additions & 4 deletions tests/sles4sap/publiccloud/general_patch_and_reboot.pm
Original file line number Diff line number Diff line change
Expand Up @@ -15,25 +15,25 @@ use testapi;
use registration;
use utils;
use publiccloud::ssh_interactive qw(select_host_console);
use publiccloud::utils qw(kill_packagekit);
use publiccloud::utils qw(kill_packagekit is_azure);

sub test_flags {
return {fatal => 1, publiccloud_multi_module => 1};
}

sub run {
my ($self, $run_args) = @_;
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});
$self->import_context($run_args);
select_host_console(); # select console on the host, not the PC instance

foreach my $instance (@{$run_args->{instances}}) {
foreach my $instance (@{$self->{instances}}) {
next if ($instance->{'instance_id'} !~ m/vmhana/);
record_info("$instance");

my $remote = '-o ControlMaster=no -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' . $instance->username . '@' . $instance->public_ip;

my $cmd_time = time();
my $ref_timeout = check_var('PUBLIC_CLOUD_PROVIDER', 'AZURE') ? 3600 : 240;
my $ref_timeout = is_azure ? 3600 : 240;
kill_packagekit($instance);
$instance->ssh_script_retry("sudo zypper -n --gpg-auto-import-keys ref", timeout => $ref_timeout, retry => 6, delay => 60);
record_info('zypper ref time', 'The command zypper -n ref took ' . (time() - $cmd_time) . ' seconds.');
Expand Down
11 changes: 9 additions & 2 deletions tests/sles4sap/publiccloud/hana_sr_schedule_cleanup.pm
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,25 @@ package hana_sr_schedule_cleanup;
use strict;
use warnings FATAL => 'all';
use base 'sles4sap_publiccloud_basetest';
use main_common 'loadtest';
use testapi;
use main_common 'loadtest';

sub test_flags {
return {fatal => 1, publiccloud_multi_module => 1};
}

sub run {
my ($self, $run_args) = @_;
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});

# Needed to have peering and ansible state propagated in post_fail_hook
$self->import_context($run_args);

record_info("Schedule", "Schedule cleanup job");
# A test module only to schedule another test module
# It is needed to be sure that qesap_cleanup is executed as last test step
# Direct schedule of qesap_cleanup is not possible
# due to the fact that hana_sr_schedule_cleanup is scheduled
# with other test modules that are also using loadtest
loadtest('sles4sap/publiccloud/qesap_cleanup', name => "Cleanup resources", run_args => $run_args, @_);
}

Expand Down
6 changes: 4 additions & 2 deletions tests/sles4sap/publiccloud/hana_sr_schedule_deployment.pm
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,18 @@ package hana_sr_schedule_deployment;
use strict;
use warnings FATAL => 'all';
use base 'sles4sap_publiccloud_basetest';
use main_common 'loadtest';
use testapi;
use main_common 'loadtest';

sub test_flags {
return {fatal => 1, publiccloud_multi_module => 1};
}

sub run {
my ($self, $run_args) = @_;
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});

# Needed to have peering and ansible state propagated in post_fail_hook
$self->import_context($run_args);

if (get_var('QESAP_DEPLOYMENT_IMPORT')) {
loadtest('sles4sap/publiccloud/qesap_reuse_infra', name => 'prepare_existing_infrastructure', run_args => $run_args, @_);
Expand Down
6 changes: 4 additions & 2 deletions tests/sles4sap/publiccloud/hana_sr_schedule_primary_tests.pm
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,18 @@ package hana_sr_schedule_primary_tests;
use strict;
use warnings FATAL => 'all';
use base 'sles4sap_publiccloud_basetest';
use main_common 'loadtest';
use testapi;
use main_common 'loadtest';

sub test_flags {
return {fatal => 1, publiccloud_multi_module => 1};
}

sub run {
my ($self, $run_args) = @_;
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});

# Needed to have peering and ansible state propagated in post_fail_hook
$self->import_context($run_args);

record_info("Schedule", "Executing tests on master Hana DB");
# 'HANASR_PRIMARY_ACTIONS' - define to override test flow
Expand Down
6 changes: 4 additions & 2 deletions tests/sles4sap/publiccloud/hana_sr_schedule_replica_tests.pm
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,18 @@ package hana_sr_schedule_replica_tests;
use strict;
use warnings FATAL => 'all';
use base 'sles4sap_publiccloud_basetest';
use main_common 'loadtest';
use testapi;
use main_common 'loadtest';

sub test_flags {
return {fatal => 1, publiccloud_multi_module => 1};
}

sub run {
my ($self, $run_args) = @_;
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});

# Needed to have peering and ansible state propagated in post_fail_hook
$self->import_context($run_args);

record_info("Schedule", "Executing tests on secondary site (replica)");
# 'HANASR_SECONDARY_ACTIONS' - define to override test flow
Expand Down
7 changes: 4 additions & 3 deletions tests/sles4sap/publiccloud/hana_sr_takeover.pm
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ use strict;
use warnings FATAL => 'all';
use base 'sles4sap_publiccloud_basetest';
use testapi;
use publiccloud::utils;
use sles4sap_publiccloud;
use publiccloud::utils;
use serial_terminal 'select_serial_terminal';

sub test_flags {
Expand All @@ -19,8 +19,9 @@ sub test_flags {

sub run {
my ($self, $run_args) = @_;
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});
$self->{instances} = $run_args->{instances};

# Needed to have peering and ansible state propagated in post_fail_hook
$self->import_context($run_args);

select_serial_terminal;
my $test_name = $self->{name};
Expand Down
7 changes: 4 additions & 3 deletions tests/sles4sap/publiccloud/hana_sr_test_secondary.pm
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
use strict;
use warnings FATAL => 'all';
use base 'sles4sap_publiccloud_basetest';
use sles4sap_publiccloud;
use testapi;
use sles4sap_publiccloud;
use serial_terminal 'select_serial_terminal';
use Time::HiRes 'sleep';

Expand All @@ -23,8 +23,9 @@ sub test_flags {

sub run {
my ($self, $run_args) = @_;
$self->{network_peering_present} = 1 if ($run_args->{network_peering_present});
$self->{instances} = $run_args->{instances};

# Needed to have peering and ansible state propagated in post_fail_hook
$self->import_context($run_args);
croak('site_b is missing or undefined in run_args') if (!$run_args->{site_b});

my $hana_start_timeout = bmwqemu::scale_timeout(600);
Expand Down
Loading

0 comments on commit 7c02f5f

Please sign in to comment.