summaryrefslogtreecommitdiffstats
path: root/test/integration/roles/test_ec2_elb/tasks/main.yml
blob: d613f985abf3a072c3e5c58172bc261c80c563c8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
---
# tasks file for test_ec2_elb

# ============================================================
# create an ELB for testing

- name: create the test load balancer
  ec2_elb_lb:
    name: "{{ resource_prefix }}"
    ec2_access_key: "{{ ec2_access_key }}"
    ec2_secret_key: "{{ ec2_secret_key }}"
    region: "{{ ec2_region }}"
    state: present
    zones:
      - "{{ ec2_region }}b"
      - "{{ ec2_region }}c"
    listeners:
      - protocol: http
        load_balancer_port: 80
        instance_port: 80
    health_check:
        ping_protocol: http
        ping_port: 80
        ping_path: "/index.html"
        response_timeout: 5
        interval: 10
        unhealthy_threshold: 3
        healthy_threshold: 2
  register: result

- name: assert the test load balancer was created correctly
  assert:
    that:
      - 'result.changed'
      - '"failed" not in result'
      - 'result.elb.status == "created"'
      - '"{{ ec2_region }}b" in result.elb.zones'
      - '"{{ ec2_region }}c" in result.elb.zones'
      - 'result.elb.health_check.healthy_threshold == 2'
      - 'result.elb.health_check.interval == 10'
      - 'result.elb.health_check.target == "HTTP:80/index.html"'
      - 'result.elb.health_check.timeout == 5'
      - 'result.elb.health_check.unhealthy_threshold == 3'
      - '[80, 80, "HTTP", "HTTP"] in result.elb.listeners'


# ============================================================
# add one of the instances to the LB

- name: add first instance to the load balancer
  ec2_elb:
    ec2_elbs: "{{ resource_prefix }}"
    ec2_access_key: "{{ ec2_access_key }}"
    ec2_secret_key: "{{ ec2_secret_key }}"
    region: "{{ ec2_region }}"
    instance_id: "{{ ec2_provision_result.instance_ids[0] }}"
    state: present
    wait_timeout: 300
  register: result

- name: assert the first instance was added ok
  assert:
    that:
      - 'result.changed == True'
      - '"{{resource_prefix}}" in result.ansible_facts.ec2_elbs'

# ============================================================
# add all other instances to the LB

- name: add other instances to the load balancer
  ec2_elb:
    ec2_elbs: "{{ resource_prefix }}"
    ec2_access_key: "{{ ec2_access_key }}"
    ec2_secret_key: "{{ ec2_secret_key }}"
    region: "{{ ec2_region }}"
    instance_id: "{{ item }}"
    state: present
    wait_timeout: 300
  with_items: "ec2_provision_result.instance_ids[1:]"
  register: result

- name: assert the other instances were added ok
  assert:
    that:
      - 'item.changed == True'
      - '"{{resource_prefix}}" in item.ansible_facts.ec2_elbs'
  with_items: result.results

# ============================================================
# shutdown http first instance so it goes out of service

- name: "shutdown the apache service on the first instance ({{ec2_provision_result.instances[0].public_ip}})"
  service: name=httpd state=stopped
  remote_user: "ec2-user"
  become: yes
  become_user: root
  delegate_to: "{{ec2_provision_result.instances[0].public_ip}}"

- name: assert that the httpd service was stopped
  assert:
    that:
      - 'result.changed == True'

- name: pause long enough for the instance to go out of service
  pause: seconds=60

# ============================================================
# remove the out of service instance

- name: remove the out of service instance
  ec2_elb:
    ec2_elbs: "{{ resource_prefix }}"
    ec2_access_key: "{{ ec2_access_key }}"
    ec2_secret_key: "{{ ec2_secret_key }}"
    region: "{{ ec2_region }}"
    instance_id: "{{ ec2_provision_result.instance_ids[0] }}"
    state: absent
    wait_timeout: 300
  register: result

- name: assert that the out of service instance was removed
  assert:
    that:
      - 'result.changed == True'
      - '"{{resource_prefix}}" in result.ansible_facts.ec2_elbs'

# ============================================================
# remove another instance that is still in service

- name: remove the second instance
  ec2_elb:
    ec2_elbs: "{{ resource_prefix }}"
    ec2_access_key: "{{ ec2_access_key }}"
    ec2_secret_key: "{{ ec2_secret_key }}"
    region: "{{ ec2_region }}"
    instance_id: "{{ ec2_provision_result.instance_ids[1] }}"
    state: absent
    wait_timeout: 300
  register: result

- name: assert that the second instance was removed
  assert:
    that:
      - 'result.changed == True'
      - '"{{resource_prefix}}" in result.ansible_facts.ec2_elbs'

# ============================================================
# re-register the second instance (issue #4902)

- name: re-register the second instance (issue #4902)
  ec2_elb:
    ec2_elbs: "{{ resource_prefix }}"
    ec2_access_key: "{{ ec2_access_key }}"
    ec2_secret_key: "{{ ec2_secret_key }}"
    region: "{{ ec2_region }}"
    instance_id: "{{ ec2_provision_result.instance_ids[1] }}"
    state: present
    wait_timeout: 300
  register: result

- name: assert the instance was re-registered ok
  assert:
    that:
      - 'result.changed == True'
      - '"{{resource_prefix}}" in result.ansible_facts.ec2_elbs'

# ============================================================
# remove all other instances

- name: remove the rest of the instances
  ec2_elb:
    ec2_elbs: "{{ resource_prefix }}"
    ec2_access_key: "{{ ec2_access_key }}"
    ec2_secret_key: "{{ ec2_secret_key }}"
    region: "{{ ec2_region }}"
    instance_id: "{{ item }}"
    state: absent
    wait_timeout: 300
  with_items: "ec2_provision_result.instance_ids[1:]"
  register: result

- name: assert the other instances were removed
  assert:
    that:
      - 'item.changed == True'
      - '"{{resource_prefix}}" in item.ansible_facts.ec2_elbs'
  with_items: result.results