diff --git a/spotty/__init__.py b/spotty/__init__.py index 58d478a..3f262a6 100644 --- a/spotty/__init__.py +++ b/spotty/__init__.py @@ -1 +1 @@ -__version__ = '1.2.0' +__version__ = '1.2.1' diff --git a/spotty/commands/run.py b/spotty/commands/run.py index 8ac8c21..0df34dc 100644 --- a/spotty/commands/run.py +++ b/spotty/commands/run.py @@ -46,7 +46,7 @@ def _run(self, instance_manager: AbstractInstanceManager, args: Namespace, outpu session_name = args.session_name if args.session_name else 'spotty-script-%s' % script_name # run the script on the instance - run_script(host=instance_manager.ip_address, + run_script(host=instance_manager.get_ip_address(), port=instance_manager.ssh_port, user=instance_manager.ssh_user, key_path=instance_manager.ssh_key_path, diff --git a/spotty/commands/ssh.py b/spotty/commands/ssh.py index a01372c..de58e9f 100644 --- a/spotty/commands/ssh.py +++ b/spotty/commands/ssh.py @@ -39,6 +39,6 @@ def _run(self, instance_manager: AbstractInstanceManager, args: Namespace, outpu remote_cmd = subprocess.list2cmdline(remote_cmd) # connect to the instance - ssh_command = get_ssh_command(instance_manager.ip_address, instance_manager.ssh_port, + ssh_command = get_ssh_command(instance_manager.get_ip_address(), instance_manager.ssh_port, instance_manager.ssh_user, instance_manager.ssh_key_path, remote_cmd) subprocess.call(ssh_command) diff --git a/spotty/commands/start.py b/spotty/commands/start.py index c9f1a83..f7542cf 100644 --- a/spotty/commands/start.py +++ b/spotty/commands/start.py @@ -29,4 +29,4 @@ def _run(self, instance_manager: AbstractInstanceManager, args: Namespace, outpu output.write('\nThe instance was successfully started.\n' '\n%s\n' '\nUse the "spotty ssh%s" command to connect to the Docker container.\n' - % (instance_manager.status_text, instance_name)) + % (instance_manager.get_status_text(), instance_name)) diff --git a/spotty/commands/status.py b/spotty/commands/status.py index 6e1b76e..bce2867 100644 --- a/spotty/commands/status.py +++ b/spotty/commands/status.py @@ -10,4 +10,4 @@ class StatusCommand(AbstractConfigCommand): description = 'Print information about the instance' def _run(self, instance_manager: AbstractInstanceManager, args: Namespace, output: AbstractOutputWriter): - output.write(instance_manager.status_text) + output.write(instance_manager.get_status_text()) diff --git a/spotty/config/validation.py b/spotty/config/validation.py index 454eb07..807a3a8 100644 --- a/spotty/config/validation.py +++ b/spotty/config/validation.py @@ -160,6 +160,7 @@ def validate_old_config(data, project_dir): Optional('availabilityZone', default=''): str, Optional('subnetId', default=''): str, 'instanceType': str, + Optional('onDemandInstance', default=False): bool, Optional('amiName', default='SpottyAMI'): str, Optional('keyName', default=''): str, Optional('rootVolumeSize', default=0): And(Or(int, str), Use(str), @@ -259,6 +260,7 @@ def convert_old_config(config): 'availabilityZone': config['instance']['availabilityZone'], 'subnetId': config['instance']['subnetId'], 'instanceType': config['instance']['instanceType'], + 'onDemandInstance': config['instance']['onDemandInstance'], 'amiName': config['instance']['amiName'], 'rootVolumeSize': config['instance']['rootVolumeSize'], 'dockerDataRoot': config['instance']['docker']['dataRoot'], diff --git a/spotty/providers/abstract_instance_manager.py b/spotty/providers/abstract_instance_manager.py index 9a775af..b3ac369 100644 --- a/spotty/providers/abstract_instance_manager.py +++ b/spotty/providers/abstract_instance_manager.py @@ -45,21 +45,31 @@ def download(self, download_filters: list, output: AbstractOutputWriter, dry_run """Downloads files from the instance.""" raise NotImplementedError - @property @abstractmethod - def status_text(self) -> str: - """Information about the running instance that will be - shown to the user once the instance is started or the "status" - command is called. + def get_status_text(self) -> str: + """Returns information about the started instance. + + It will be shown to the user once the instance is started and by using the "status" command. """ raise NotImplementedError - @property @abstractmethod - def ip_address(self) -> str: - """Returns an IP address of the running instance.""" + def get_public_ip_address(self) -> str: + """Returns a public IP address of the running instance.""" raise NotImplementedError + def get_ip_address(self): + """Returns an IP address that will be used for SSH connections.""" + if self._instance_config.local_ssh_port: + return '127.0.0.1' + + public_ip_address = self.get_public_ip_address() + if not public_ip_address: + raise ValueError('The running instance doesn\'t have a public IP address.\n' + 'Use the "localSshPort" parameter if you want to create a tunnel to the instance.') + + return public_ip_address + @property def ssh_port(self) -> int: if self._instance_config.local_ssh_port: diff --git a/spotty/providers/aws/aws_resources/instance.py b/spotty/providers/aws/aws_resources/instance.py index d82a7f6..cdc8108 100644 --- a/spotty/providers/aws/aws_resources/instance.py +++ b/spotty/providers/aws/aws_resources/instance.py @@ -59,7 +59,7 @@ def launch_time(self) -> datetime: @property def lifecycle(self) -> str: - return self._instance_info['InstanceLifecycle'] + return self._instance_info.get('InstanceLifecycle') def get_spot_price(self): """Get current Spot Instance price for this instance.""" diff --git a/spotty/providers/aws/aws_resources/volume.py b/spotty/providers/aws/aws_resources/volume.py index 3d702fc..e510924 100644 --- a/spotty/providers/aws/aws_resources/volume.py +++ b/spotty/providers/aws/aws_resources/volume.py @@ -42,8 +42,12 @@ def size(self) -> int: def availability_zone(self) -> str: return self._volume_info['AvailabilityZone'] + @property + def state(self) -> str: + return self._volume_info['State'] + def is_available(self): - return self._volume_info['State'] == 'available' + return self.state == 'available' def create_snapshot(self) -> Snapshot: snapshot_info = self._ec2.create_snapshot( diff --git a/spotty/providers/aws/deployment/cf_templates/ami_template.py b/spotty/providers/aws/deployment/cf_templates/ami_template.py index f2bd88c..9defa01 100644 --- a/spotty/providers/aws/deployment/cf_templates/ami_template.py +++ b/spotty/providers/aws/deployment/cf_templates/ami_template.py @@ -27,7 +27,10 @@ def prepare_ami_template(availability_zone: str, subnet_id: str, debug_mode: boo { 'SubnetId': subnet_id, 'DeviceIndex': 0, + 'Groups': template['Resources']['InstanceLaunchTemplate']['Properties']['LaunchTemplateData'][ + 'SecurityGroupIds'], }] + del template['Resources']['InstanceLaunchTemplate']['Properties']['LaunchTemplateData']['SecurityGroupIds'] # run on-demand instance if on_demand: diff --git a/spotty/providers/aws/deployment/cf_templates/instance_template.py b/spotty/providers/aws/deployment/cf_templates/instance_template.py index 56d0c74..e517069 100644 --- a/spotty/providers/aws/deployment/cf_templates/instance_template.py +++ b/spotty/providers/aws/deployment/cf_templates/instance_template.py @@ -157,7 +157,8 @@ def _get_volume_resources(volumes: List[AbstractInstanceVolume], output: Abstrac if ec2_volume: # check if the volume is available if not ec2_volume.is_available(): - raise ValueError('EBS volume "%s" is not available.' % volume.ec2_volume_name) + raise ValueError('EBS volume "%s" is not available (state: %s).' + % (volume.ec2_volume_name, ec2_volume.state)) # check size of the volume if volume.size and (volume.size != ec2_volume.size): diff --git a/spotty/providers/aws/deployment/instance_deployment.py b/spotty/providers/aws/deployment/instance_deployment.py index 1a2c99d..7e4f3a6 100644 --- a/spotty/providers/aws/deployment/instance_deployment.py +++ b/spotty/providers/aws/deployment/instance_deployment.py @@ -4,6 +4,8 @@ from spotty.config.project_config import ProjectConfig from spotty.config.validation import is_subdir from spotty.deployment.abstract_instance_volume import AbstractInstanceVolume +from spotty.providers.aws.aws_resources.snapshot import Snapshot +from spotty.providers.aws.aws_resources.volume import Volume from spotty.providers.aws.config.instance_config import VOLUME_TYPE_EBS from spotty.deployment.container_deployment import ContainerDeployment, VolumeMount from spotty.providers.aws.deployment.abstract_aws_deployment import AbstractAwsDeployment @@ -192,9 +194,6 @@ def _get_template_parameters(self, instance_profile_arn: str, instance_name: str def apply_deletion_policies(self, output: AbstractOutputWriter): """Applies deletion policies to the EBS volumes.""" - wait_snapshots = [] - delete_snapshots = [] - delete_volumes = [] # get volumes volumes = self._get_volumes() @@ -206,6 +205,7 @@ def apply_deletion_policies(self, output: AbstractOutputWriter): return # apply deletion policies + wait_snapshots = [] for volume in ebs_volumes: # get EC2 volume try: @@ -219,7 +219,8 @@ def apply_deletion_policies(self, output: AbstractOutputWriter): continue if not ec2_volume.is_available(): - output.write('- volume "%s" is not available' % volume.ec2_volume_name) + output.write('- volume "%s" is not available (state: %s)' + % (volume.ec2_volume_name, ec2_volume.state)) continue # apply deletion policies @@ -228,31 +229,29 @@ def apply_deletion_policies(self, output: AbstractOutputWriter): output.write('- volume "%s" is retained' % ec2_volume.name) elif volume.deletion_policy == EbsVolume.DP_DELETE: - # volume will be deleted later - delete_volumes.append(ec2_volume) + # delete EBS volume + self._delete_ec2_volume(ec2_volume, output) elif volume.deletion_policy == EbsVolume.DP_CREATE_SNAPSHOT \ or volume.deletion_policy == EbsVolume.DP_UPDATE_SNAPSHOT: try: - # rename or delete previous snapshot + # rename a previous snapshot prev_snapshot = volume.get_snapshot() if prev_snapshot: - # rename previous snapshot - if volume.deletion_policy == EbsVolume.DP_CREATE_SNAPSHOT: - prev_snapshot.rename('%s-%d' % (prev_snapshot.name, prev_snapshot.creation_time)) - - # once new snapshot will be created, the old one should be deleted - if volume.deletion_policy == EbsVolume.DP_UPDATE_SNAPSHOT: - delete_snapshots.append(prev_snapshot) + prev_snapshot.rename('%s-%d' % (prev_snapshot.name, prev_snapshot.creation_time)) - output.write('- creating snapshot for the volume "%s"...' % ec2_volume.name) + output.write('- creating a snapshot for the volume "%s"...' % ec2_volume.name) - # create new snapshot + # create a new snapshot new_snapshot = ec2_volume.create_snapshot() - wait_snapshots.append((new_snapshot, ec2_volume)) - # once the snapshot will be created, the volume should be deleted - delete_volumes.append(ec2_volume) + # delete the EBS volume and a previous snapshot only after a new snapshot will be created + wait_snapshots.append({ + 'new_snapshot': new_snapshot, + 'prev_snapshot': prev_snapshot, + 'ec2_volume': ec2_volume, + 'deletion_policy': volume.deletion_policy, + }) except Exception as e: output.write('- snapshot for the volume "%s" was not created. Error: %s' % (volume.ec2_volume_name, str(e))) @@ -261,28 +260,36 @@ def apply_deletion_policies(self, output: AbstractOutputWriter): raise ValueError('Unsupported deletion policy: "%s".' % volume.deletion_policy) # wait until all snapshots will be created - for snapshot, ec2_volume in wait_snapshots: + for resources in wait_snapshots: try: - snapshot.wait_snapshot_completed() - output.write('- snapshot for the volume "%s" was created' % snapshot.name) + resources['new_snapshot'].wait_snapshot_completed() + output.write('- snapshot for the volume "%s" was created' % resources['new_snapshot'].name) except Exception as e: - output.write('- snapshot "%s" was not created. Error: %s' % (snapshot.name, str(e))) + output.write('- snapshot "%s" was not created. Error: %s' % (resources['new_snapshot'].name, str(e))) + continue - # delete old snapshots - for snapshot in delete_snapshots: - try: - snapshot.delete() - output.write('- previous snapshot "%s" was deleted' % snapshot.name) - except Exception as e: - output.write('- previous snapshot "%s" was not deleted. Error: %s' % (snapshot.name, str(e))) + # delete a previous snapshot if it's the "update_snapshot" deletion policy + if (resources['deletion_policy'] == EbsVolume.DP_UPDATE_SNAPSHOT) and resources['prev_snapshot']: + self._delete_snapshot(resources['prev_snapshot'], output) - # delete volumes - for ec2_volume in delete_volumes: - try: - ec2_volume.delete() - output.write('- volume "%s" was deleted' % ec2_volume.name) - except Exception as e: - output.write('- volume "%s" was not deleted. Error: %s' % (ec2_volume.name, str(e))) + # delete the EBS volume + self._delete_ec2_volume(resources['ec2_volume'], output) + + @staticmethod + def _delete_ec2_volume(ec2_volume: Volume, output: AbstractOutputWriter): + try: + ec2_volume.delete() + output.write('- volume "%s" was deleted' % ec2_volume.name) + except Exception as e: + output.write('- volume "%s" was not deleted. Error: %s' % (ec2_volume.name, str(e))) + + @staticmethod + def _delete_snapshot(snapshot: Snapshot, output: AbstractOutputWriter): + try: + snapshot.delete() + output.write('- previous snapshot "%s" was deleted' % snapshot.name) + except Exception as e: + output.write('- previous snapshot "%s" was not deleted. Error: %s' % (snapshot.name, str(e))) @staticmethod def _render_volumes_info_table(volume_mounts: List[VolumeMount], volumes: List[AbstractInstanceVolume]): diff --git a/spotty/providers/aws/instance_manager.py b/spotty/providers/aws/instance_manager.py index 929707b..af44ec6 100644 --- a/spotty/providers/aws/instance_manager.py +++ b/spotty/providers/aws/instance_manager.py @@ -101,7 +101,7 @@ def sync(self, output: AbstractOutputWriter, dry_run=False): if not dry_run: # sync S3 with the instance output.write('Syncing S3 bucket with the instance...') - sync_instance_with_s3(self.project_config.sync_filters, self.ip_address, self.ssh_port, self.ssh_user, + sync_instance_with_s3(self.project_config.sync_filters, self.get_ip_address(), self.ssh_port, self.ssh_user, self.ssh_key_path) def download(self, download_filters: list, output: AbstractOutputWriter, dry_run=False): @@ -110,7 +110,7 @@ def download(self, download_filters: list, output: AbstractOutputWriter, dry_run # sync files from the instance to a temporary S3 directory output.write('Uploading files from the instance to S3 bucket...') - upload_from_instance_to_s3(download_filters, self.ip_address, self.ssh_port, self.ssh_user, self.ssh_key_path, + upload_from_instance_to_s3(download_filters, self.get_ip_address(), self.ssh_port, self.ssh_user, self.ssh_key_path, dry_run=dry_run) # sync the project with the S3 bucket @@ -118,8 +118,7 @@ def download(self, download_filters: list, output: AbstractOutputWriter, dry_run download_from_s3_to_local(bucket_name, self.instance_config.name, self.project_config.project_dir, self.instance_config.region, download_filters, dry_run=dry_run) - @property - def status_text(self): + def get_status_text(self): instance = self.instance_deployment.get_instance() if not instance: raise InstanceNotRunningError(self.instance_config.name) @@ -144,12 +143,8 @@ def status_text(self): return render_table(table) - @property - def ip_address(self): - """Returns public IP address of the running instance.""" - if self._instance_config.local_ssh_port: - return '127.0.0.1' - + def get_public_ip_address(self): + """Returns a public IP address of the running instance.""" instance = self.instance_deployment.get_instance() if not instance: raise InstanceNotRunningError(self.instance_config.name)