@@ -86,13 +86,8 @@ def self.condition_response(condition, log, use_slack_notification: true)
8686
8787 iterate_and_log_notify_errors ( backup_files , 'in backup_files loop, uploading_file_to_s3' , log ) do |backup_file |
8888 upload_file_to_s3 ( aws_s3 , aws_s3_backup_bucket , aws_backup_bucket_full_prefix , backup_file )
89- # When we first upload our file to s3, the default storage class is STANDARD
90- # After 1 month, we want to to transition the object to STANDARD IA,
91- # then GLACIER after 3 months. This can be changed to meet our needs.
92- # This will help us save on costs.
93- # This however has effects on retrieval time for objects which you can see in the link below
94- # https://aws.amazon.com/s3/storage-classes/#Performance_across_the_S3_Storage_Classes
95- set_s3_lifecycle_rules ( bucket_name : aws_s3_backup_bucket , bucket_full_prefix : aws_backup_bucket_full_prefix , status : 'enabled' , storage_rules : [ { days : 30 , storage_class : 'STANDARD_IA' } , { days : 90 , storage_class : 'GLACIER' } ] )
89+ # When we first upload our file to s3, the default storage class is STANDARD_IA
90+ set_s3_lifecycle_rules ( bucket_name : aws_s3_backup_bucket , bucket_full_prefix : aws_backup_bucket_full_prefix , status : 'enabled' , storage_rules : [ { days : 90 , storage_class : 'GLACIER' } , { days : 450 , storage_class : 'DEEP_ARCHIVE' } ] )
9691 end
9792
9893 log . record ( 'info' , 'Pruning older backups on local storage' )
@@ -149,7 +144,7 @@ def self.s3_backup_bucket_full_prefix(today = Date.current)
149144 # @see https://aws.amazon.com/blogs/developer/uploading-files-to-amazon-s3/
150145 def self . upload_file_to_s3 ( s3 , bucket , bucket_folder , file )
151146 obj = s3 . bucket ( bucket ) . object ( bucket_folder + File . basename ( file ) )
152- obj . upload_file ( file , { tagging : aws_date_tags } )
147+ obj . upload_file ( file , { tagging : aws_date_tags , storage_class : 'STANDARD_IA' } )
153148 end
154149
155150
@@ -295,7 +290,7 @@ def self.iterate_and_log_notify_errors(list, additional_error_info, log, use_sla
295290 end
296291
297292
298- STORAGE_CLASSES = %w( STANDARD_IA GLACIER DEEP_ARCHIVE ) . freeze
293+ STORAGE_CLASSES = %w( GLACIER DEEP_ARCHIVE ) . freeze
299294 class << self
300295 define_method ( :storage_class_is_valid? ) do |storage_class_list |
301296 unless storage_class_list . empty?
@@ -306,7 +301,7 @@ class << self
306301 end
307302 end
308303
309- # s3_lifecycle_rules(bucket_name: 'bucket_name', bucket_full_prefix: 'bucket_full_prefix', status: 'enabled', storage_rules: [{days: 30 , storage_class: 'STANDARD_IA '}, {days: 90 , storage_class: 'GLACIER '}])
304+ # s3_lifecycle_rules(bucket_name: 'bucket_name', bucket_full_prefix: 'bucket_full_prefix', status: 'enabled', storage_rules: [{days: 90 , storage_class: 'GLACIER '}, {days: 450 , storage_class: 'DEEP_ARCHIVE '}])
310305 def self . set_s3_lifecycle_rules ( bucket_name :, bucket_full_prefix :, status :, storage_rules :)
311306 client = Aws ::S3 ::Client . new ( region : ENV [ 'SHF_AWS_S3_BACKUP_REGION' ] ,
312307 credentials : Aws ::Credentials . new ( ENV [ 'SHF_AWS_S3_BACKUP_KEY_ID' ] , ENV [ 'SHF_AWS_S3_BACKUP_SECRET_ACCESS_KEY' ] ) )
@@ -319,8 +314,9 @@ def self.set_s3_lifecycle_rules(bucket_name:, bucket_full_prefix:, status:, stor
319314 rules : [
320315 {
321316 expiration : {
317+ # Expire objects after 10 years
322318 date : Time . now ,
323- days : 365 ,
319+ days : 3650 ,
324320 expired_object_delete_marker : false
325321 } ,
326322 filter : {
0 commit comments