blob: 58350ac117e1aceeecc2a6996140095de19fd03e [file] [log] [blame]
David Kranz6308ec22012-02-22 09:36:48 -05001# Copyright 2011 Quanta Research Cambridge, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Defines various sub-classes of the `StressTestCase` and
David Kranz779c7f82012-05-01 16:50:32 -040015`PendingServerAction` class. Sub-classes of StressTestCase implement various
David Kranz6308ec22012-02-22 09:36:48 -050016API calls on the Nova cluster having to do with Server Actions. Each
David Kranz779c7f82012-05-01 16:50:32 -040017sub-class will have a corresponding PendingServerAction. These pending
David Kranz6308ec22012-02-22 09:36:48 -050018actions veriy that the API call was successful or not."""
19
20
21# system imports
22import random
23import time
24
25# local imports
26import test_case
27import pending_action
David Kranz779c7f82012-05-01 16:50:32 -040028from tempest.exceptions import Duplicate
David Kranz6308ec22012-02-22 09:36:48 -050029from utils.util import *
30
31
32class TestRebootVM(test_case.StressTestCase):
33 """Reboot a server"""
34
35 def run(self, manager, state, *pargs, **kwargs):
36 """
37 Send an HTTP POST request to the nova cluster to reboot a random
38 server. Update state of object in `state` variable to indicate that
39 it is rebooting.
40 `manager` : Manager object
41 `state` : `State` object describing our view of state of cluster
42 `pargs` : positional arguments
43 `kwargs` : keyword arguments, which include:
44 `timeout` : how long to wait before issuing Exception
45 `type` : reboot type [SOFT or HARD] (default is SOFT)
46 """
47
48 vms = state.get_instances()
49 active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
50 # no active vms, so return null
51 if not active_vms:
52 self._logger.info('no ACTIVE instances to reboot')
53 return
54
David Kranz180fed12012-03-27 14:31:29 -040055 _reboot_arg = kwargs.get('type', 'SOFT')
David Kranz6308ec22012-02-22 09:36:48 -050056
57 # select active vm to reboot and then send request to nova controller
58 target = random.choice(active_vms)
59 reboot_target = target[0]
David Kranz180fed12012-03-27 14:31:29 -040060 # It seems that doing a reboot when in reboot is an error.
61 try:
62 response, body = manager.servers_client.reboot(
63 reboot_target['id'],
64 _reboot_arg)
65 except Duplicate:
66 return
David Kranz6308ec22012-02-22 09:36:48 -050067
David Kranz6308ec22012-02-22 09:36:48 -050068 if (response.status != 202):
69 self._logger.error("response: %s" % response)
70 raise Exception
71
David Kranz180fed12012-03-27 14:31:29 -040072 if _reboot_arg == 'SOFT':
73 reboot_state = 'REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050074 else:
David Kranz180fed12012-03-27 14:31:29 -040075 reboot_state = 'HARD_REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050076
77 self._logger.info('waiting for machine %s to change to %s' %
David Kranz180fed12012-03-27 14:31:29 -040078 (reboot_target['id'], reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -050079
80 return VerifyRebootVM(manager,
81 state,
82 reboot_target,
David Kranz180fed12012-03-27 14:31:29 -040083 reboot_state=reboot_state)
David Kranz6308ec22012-02-22 09:36:48 -050084
85
David Kranz779c7f82012-05-01 16:50:32 -040086class VerifyRebootVM(pending_action.PendingServerAction):
David Kranz6308ec22012-02-22 09:36:48 -050087 """Class to verify that the reboot completed."""
88 States = enum('REBOOT_CHECK', 'ACTIVE_CHECK')
89
90 def __init__(self, manager, state, target_server,
David Kranz180fed12012-03-27 14:31:29 -040091 reboot_state=None,
David Kranz6308ec22012-02-22 09:36:48 -050092 ip_addr=None):
93 super(VerifyRebootVM, self).__init__(manager,
94 state,
95 target_server)
David Kranz180fed12012-03-27 14:31:29 -040096 self._reboot_state = reboot_state
97 self._retry_state = self.States.REBOOT_CHECK
David Kranz6308ec22012-02-22 09:36:48 -050098
99 def retry(self):
100 """
101 Check to see that the server of interest has actually rebooted. Update
102 state to indicate that server is running again.
103 """
104 # don't run reboot verification if target machine has been
105 # deleted or is going to be deleted
Zhongyue Luo76888ee2012-09-30 23:58:52 +0900106 target_id = self._target['id']
David Kranz6308ec22012-02-22 09:36:48 -0500107 if (self._target['id'] not in self._state.get_instances().keys() or
Zhongyue Luo76888ee2012-09-30 23:58:52 +0900108 self._state.get_instances()[target_id][1] == 'TERMINATING'):
David Kranz6308ec22012-02-22 09:36:48 -0500109 self._logger.debug('machine %s is deleted or TERMINATING' %
110 self._target['id'])
111 return True
112
David Kranz6308ec22012-02-22 09:36:48 -0500113 reboot_state = self._reboot_state
114 if self._retry_state == self.States.REBOOT_CHECK:
115 server_state = self._check_for_status(reboot_state)
116 if server_state == reboot_state:
117 self._logger.info('machine %s ACTIVE -> %s' %
118 (self._target['id'], reboot_state))
119 self._state.set_instance_state(self._target['id'],
Zhongyue Luo30a563f2012-09-30 23:43:50 +0900120 (self._target, reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -0500121 self._retry_state = self.States.ACTIVE_CHECK
122 elif server_state == 'ACTIVE':
123 # machine must have gone ACTIVE -> REBOOT ->ACTIVE
124 self._retry_state = self.States.ACTIVE_CHECK
125
126 elif self._retry_state == self.States.ACTIVE_CHECK:
127 if not self._check_for_status('ACTIVE'):
128 return False
129 target = self._target
David Kranz180fed12012-03-27 14:31:29 -0400130 self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
Zhongyue Luoe0884a32012-09-25 17:24:17 +0800131 (target['id'], reboot_state, self.elapsed()))
David Kranz6308ec22012-02-22 09:36:48 -0500132 self._state.set_instance_state(target['id'],
133 (target, 'ACTIVE'))
134
135 return True
136
137# This code needs to be tested against a cluster that supports resize.
138#class TestResizeVM(test_case.StressTestCase):
139# """Resize a server (change flavors)"""
140#
141# def run(self, manager, state, *pargs, **kwargs):
142# """
143# Send an HTTP POST request to the nova cluster to resize a random
144# server. Update `state` to indicate server is rebooting.
145#
146# `manager` : Manager object.
147# `state` : `State` object describing our view of state of cluster
148# `pargs` : positional arguments
149# `kwargs` : keyword arguments, which include:
150# `timeout` : how long to wait before issuing Exception
151# """
152#
153# vms = state.get_instances()
154# active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
155# # no active vms, so return null
156# if not active_vms:
157# self._logger.debug('no ACTIVE instances to resize')
158# return
159#
160# target = random.choice(active_vms)
161# resize_target = target[0]
162# print resize_target
163#
164# _timeout = kwargs.get('timeout', 600)
165#
166# # determine current flavor type, and resize to a different type
167# # m1.tiny -> m1.small, m1.small -> m1.tiny
168# curr_size = int(resize_target['flavor']['id'])
169# if curr_size == 1:
170# new_size = 2
171# else:
172# new_size = 1
173# flavor_type = { 'flavorRef': new_size } # resize to m1.small
174#
175# post_body = json.dumps({'resize' : flavor_type})
176# url = '/servers/%s/action' % resize_target['id']
177# (response, body) = manager.request('POST',
178# url,
179# body=post_body)
180#
181# if (response.status != 202):
182# self._logger.error("response: %s" % response)
183# raise Exception
184#
185# state_name = check_for_status(manager, resize_target, 'RESIZE')
186#
187# if state_name == 'RESIZE':
188# self._logger.info('machine %s: ACTIVE -> RESIZE' %
189# resize_target['id'])
190# state.set_instance_state(resize_target['id'],
191# (resize_target, 'RESIZE'))
192#
193# return VerifyResizeVM(manager,
194# state,
195# resize_target,
196# state_name=state_name,
197# timeout=_timeout)
198#
David Kranz779c7f82012-05-01 16:50:32 -0400199#class VerifyResizeVM(pending_action.PendingServerAction):
David Kranz6308ec22012-02-22 09:36:48 -0500200# """Verify that resizing of a VM was successful"""
201# States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
202#
203# def __init__(self, manager, state, created_server,
204# state_name=None,
205# timeout=300):
206# super(VerifyResizeVM, self).__init__(manager,
207# state,
208# created_server,
209# timeout=timeout)
210# self._retry_state = self.States.VERIFY_RESIZE_CHECK
211# self._state_name = state_name
212#
213# def retry(self):
214# """
215# Check to see that the server was actually resized. And change `state`
216# of server to running again.
217# """
218# # don't run resize if target machine has been deleted
219# # or is going to be deleted
220# if (self._target['id'] not in self._state.get_instances().keys() or
221# self._state.get_instances()[self._target['id']][1] ==
222# 'TERMINATING'):
223# self._logger.debug('machine %s is deleted or TERMINATING' %
224# self._target['id'])
225# return True
226#
David Kranz6308ec22012-02-22 09:36:48 -0500227# if self._retry_state == self.States.VERIFY_RESIZE_CHECK:
228# if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE':
229# # now issue command to CONFIRM RESIZE
230# post_body = json.dumps({'confirmResize' : null})
231# url = '/servers/%s/action' % self._target['id']
232# (response, body) = manager.request('POST',
233# url,
234# body=post_body)
235# if (response.status != 204):
236# self._logger.error("response: %s" % response)
237# raise Exception
238#
239# self._logger.info(
240# 'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400241# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500242# )
243# state.set_instance_state(self._target['id'],
244# (self._target, 'CONFIRM_RESIZE'))
245#
246# # change states
247# self._retry_state = self.States.ACTIVE_CHECK
248#
249# return False
250#
251# elif self._retry_state == self.States.ACTIVE_CHECK:
252# if not self._check_manager("ACTIVE"):
253# return False
254# else:
255# server = self._manager.get_server(self._target['id'])
256#
257# # Find private IP of server?
258# try:
259# (_, network) = server['addresses'].popitem()
260# ip = network[0]['addr']
261# except KeyError:
262# self._logger.error(
263# 'could not get ip address for machine %s' %
264# self._target['id']
265# )
266# raise Exception
267#
268# self._logger.info(
269# 'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400270# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500271# )
272# self._state.set_instance_state(self._target['id'],
273# (self._target, 'ACTIVE'))
274#
275# return True
276#
277# else:
278# # should never get here
279# self._logger.error('Unexpected state')
280# raise Exception