forked from DemocracyClub/polling_deploy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathaws.yml
190 lines (177 loc) · 6.42 KB
/
aws.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
---
# To use:
# On any change to Launch Config (instance size, spot price, userdata) you will
# need to incremenet the number in the vars below
#
# Then run
#
# AWS_PROFILE=democlub ansible-playbook aws.yml -e replace_all=True
#
# This will create a new launch config, associated the ASG with it, and then
# delete the old one. If `replace_all` is set then it will also cycle all the
# old instances (1 by 1) to make them use new ones.
- hosts: 127.0.0.1
connection: local
vars:
region: "{{ lookup('env', 'AWS_REGION') or 'eu-west-1' }}"
regions:
eu-west-1:
ami_id: ami-0a35270e3a2e14cb9
# The default VPC
vpc_id: vpc-e2c30986
# eu-central-1:
# ami_id: ami-2bbe8e4d
# vpc_id: vpc-1e467077
vpc_id: "{{ regions[region].vpc_id }}"
ami_id: "{{ regions[region].ami_id }}"
elb_ssl_arn: "arn:aws:acm:eu-west-1:225244788755:certificate/f550f331-59f8-4318-a91d-2384d0b6c23f"
lc_num: 223
old_lc_num: "{{ lc_num - 1 }}"
aws_env: "{{ lookup('env', 'ENVIRONMENT') or 'test' }}"
environment:
AWS_REGION: "{{ region }}"
tasks:
- ec2_vpc_subnet_facts:
filters:
vpc-id: "{{ vpc_id }}"
register: subnets
- name: ELB security group
ec2_group:
name: "pollingstations-elb-{{ aws_env }}"
description: "ELB http security group"
vpc_id: "{{ vpc_id }}"
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
register: sg_elb
- name: Instance Security Group
ec2_group:
name: "pollingstations-asg-{{ aws_env }}"
description: "Allow access for SSH and HTTP from the ELB"
vpc_id: "{{ vpc_id }}"
rules:
- proto: tcp
from_port: 80
to_port: 80
group_id: "{{ sg_elb.group_id }}"
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 81.187.11.5/32
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 86.130.83.53/32
register: sg_instance
# this will fail first time becasue we're not adding any instances to it :(
- name: Elastic Load Balancer
ec2_elb_lb:
name: "pollingstations-{{ aws_env }}"
state: present
security_group_ids: "{{ sg_elb.group_id }}"
# When the ELB is not having much traffic we'll only have ELB nodes in
# 2 AZs, but instances in 3. This settings makes it send traffic to all
# backend instances in that case
cross_az_load_balancing: True
connection_draining_timeout: 20
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
- protocol: https
load_balancer_port: 443
instance_port: 80
instance_protocol: http
ssl_certificate_id: "{{ elb_ssl_arn }}"
health_check:
ping_protocol: http
ping_port: 80
ping_path: "/status_check/"
response_timeout: 2
interval: 100
unhealthy_threshold: 10
healthy_threshold: 2
subnets: "{{ subnets.subnets | map(attribute='id') |list }}"
tags:
Env: "{{ aws_env }}"
Name: "pollingstations-{{ aws_env }}"
register: elb_result
- name: Spot price Launch Config
ec2_lc:
name: "spotprice-pollingstations_{{ aws_env }}-{{ lc_num }}"
assign_public_ip: yes
image_id: "{{ ami_id }}"
instance_type: m3.medium
security_groups: ["{{ sg_instance.group_id }}"]
spot_price: "{{ spot_price }}"
user_data: "{{lookup('file', 'userdata.yml') }}"
register: launchconfig
when: spot_price is defined
- name: Spot price Autoscailing group
ec2_asg:
name: "spotprice-pollingstations-asg-{{ aws_env }}"
state: present
tags:
- Env: "{{ aws_env }}"
Name: pollingstations-spot-asg
desired_capacity: 1
launch_config_name: "{{launchconfig.name}}"
load_balancers: [ "{{ elb_result.elb.name }}" ]
max_size: 10
min_size: 0
termination_policies: [ OldestLaunchConfiguration, ClosestToNextInstanceHour ]
replace_all_instances: "{{ replace_all|default(False) }}"
# Yes, subnets go in vpc_zone_identifier. Blame Garethr.
vpc_zone_identifier: "{{ subnets.subnets | map(attribute='id') |list }}"
# Wait for this long to replace old instances
wait_timeout: 1200
when: spot_price is defined
- name: On-demand Launch Config
ec2_lc:
name: "pollingstations_{{ aws_env }}-{{ lc_num }}"
assign_public_ip: yes
image_id: "{{ ami_id }}"
instance_type: t3.medium
instance_profile_name: "arn:aws:iam::225244788755:instance-profile/packer-ami-builder"
security_groups: ["{{ sg_instance.group_id }}"]
user_data: "{{lookup('template', 'userdata.yml') }}"
register: launchconfig
when: not spot_price is defined
- name: On-demand Autoscailing group
ec2_asg:
name: "pollingstations-asg-{{ aws_env }}"
state: present
tags:
- Env: "{{ aws_env }}"
Name: pollingstations_asg
health_check_type: ELB
launch_config_name: "{{launchconfig.name}}"
load_balancers: ["{{ elb_result.elb.name }}"]
max_size: 10
min_size: 1
desired_capacity: 1
replace_batch_size: 1
replace_all_instances: "{{ replace_all|default(False) }}"
termination_policies: [ OldestLaunchConfiguration, ClosestToNextInstanceHour ]
# Yes, subnets go in vpc_zone_identifier. Blame Garethr.
vpc_zone_identifier: "{{ subnets.subnets | map(attribute='id') |list }}"
# This timeout is quite long (20 mins) because we need to:
# - boot a server (a few mins),
# - run init_db.sh (which takes ~12 mins),
# - wait for it to pass 2 health checks (could be another 3 mins)
wait_timeout: 1200
when: not spot_price is defined
- name: Delete old unused spot price Launch Configs
ec2_lc:
name: "spotprice-pollingstations_{{ aws_env }}-{{ old_lc_num }}"
state: absent
- name: Delete old unused on-demand Launch Configs
ec2_lc:
name: "pollingstations_{{ aws_env }}-{{ old_lc_num }}"
state: absent