blob: f4ddf2379c3b9c74d20a6e2f7b59407f464dd20b [file] [log] [blame]
David Kranz6308ec22012-02-22 09:36:48 -05001# Copyright 2011 Quanta Research Cambridge, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Defines various sub-classes of the `StressTestCase` and
David Kranz779c7f82012-05-01 16:50:32 -040015`PendingServerAction` class. Sub-classes of StressTestCase implement various
David Kranz6308ec22012-02-22 09:36:48 -050016API calls on the Nova cluster having to do with Server Actions. Each
David Kranz779c7f82012-05-01 16:50:32 -040017sub-class will have a corresponding PendingServerAction. These pending
David Kranz6308ec22012-02-22 09:36:48 -050018actions veriy that the API call was successful or not."""
19
David Kranz6308ec22012-02-22 09:36:48 -050020import random
David Kranz6308ec22012-02-22 09:36:48 -050021
David Kranz6308ec22012-02-22 09:36:48 -050022import pending_action
Attila Fazekas73c152d2013-02-16 16:41:03 +010023import stress.utils
David Kranz779c7f82012-05-01 16:50:32 -040024from tempest.exceptions import Duplicate
Matthew Treinish8d6836b2012-12-10 10:07:56 -050025import test_case
David Kranz6308ec22012-02-22 09:36:48 -050026
27
28class TestRebootVM(test_case.StressTestCase):
Sean Daguef237ccb2013-01-04 15:19:14 -050029 """Reboot a server."""
David Kranz6308ec22012-02-22 09:36:48 -050030
31 def run(self, manager, state, *pargs, **kwargs):
32 """
33 Send an HTTP POST request to the nova cluster to reboot a random
34 server. Update state of object in `state` variable to indicate that
35 it is rebooting.
36 `manager` : Manager object
37 `state` : `State` object describing our view of state of cluster
38 `pargs` : positional arguments
39 `kwargs` : keyword arguments, which include:
40 `timeout` : how long to wait before issuing Exception
41 `type` : reboot type [SOFT or HARD] (default is SOFT)
42 """
43
44 vms = state.get_instances()
45 active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
46 # no active vms, so return null
47 if not active_vms:
48 self._logger.info('no ACTIVE instances to reboot')
49 return
50
David Kranz180fed12012-03-27 14:31:29 -040051 _reboot_arg = kwargs.get('type', 'SOFT')
David Kranz6308ec22012-02-22 09:36:48 -050052
53 # select active vm to reboot and then send request to nova controller
54 target = random.choice(active_vms)
55 reboot_target = target[0]
David Kranz180fed12012-03-27 14:31:29 -040056 # It seems that doing a reboot when in reboot is an error.
57 try:
Zhongyue Luoa1343de2013-01-04 16:21:35 +080058 response, body = manager.servers_client.reboot(reboot_target['id'],
David Kranz180fed12012-03-27 14:31:29 -040059 _reboot_arg)
60 except Duplicate:
61 return
David Kranz6308ec22012-02-22 09:36:48 -050062
David Kranz6308ec22012-02-22 09:36:48 -050063 if (response.status != 202):
64 self._logger.error("response: %s" % response)
65 raise Exception
66
David Kranz180fed12012-03-27 14:31:29 -040067 if _reboot_arg == 'SOFT':
68 reboot_state = 'REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050069 else:
David Kranz180fed12012-03-27 14:31:29 -040070 reboot_state = 'HARD_REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050071
72 self._logger.info('waiting for machine %s to change to %s' %
David Kranz180fed12012-03-27 14:31:29 -040073 (reboot_target['id'], reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -050074
75 return VerifyRebootVM(manager,
76 state,
77 reboot_target,
David Kranz180fed12012-03-27 14:31:29 -040078 reboot_state=reboot_state)
David Kranz6308ec22012-02-22 09:36:48 -050079
80
David Kranz779c7f82012-05-01 16:50:32 -040081class VerifyRebootVM(pending_action.PendingServerAction):
David Kranz6308ec22012-02-22 09:36:48 -050082 """Class to verify that the reboot completed."""
Attila Fazekas73c152d2013-02-16 16:41:03 +010083 States = stress.utils.enum('REBOOT_CHECK', 'ACTIVE_CHECK')
David Kranz6308ec22012-02-22 09:36:48 -050084
85 def __init__(self, manager, state, target_server,
David Kranz180fed12012-03-27 14:31:29 -040086 reboot_state=None,
David Kranz6308ec22012-02-22 09:36:48 -050087 ip_addr=None):
88 super(VerifyRebootVM, self).__init__(manager,
89 state,
90 target_server)
David Kranz180fed12012-03-27 14:31:29 -040091 self._reboot_state = reboot_state
92 self._retry_state = self.States.REBOOT_CHECK
David Kranz6308ec22012-02-22 09:36:48 -050093
94 def retry(self):
95 """
96 Check to see that the server of interest has actually rebooted. Update
97 state to indicate that server is running again.
98 """
99 # don't run reboot verification if target machine has been
100 # deleted or is going to be deleted
Zhongyue Luo76888ee2012-09-30 23:58:52 +0900101 target_id = self._target['id']
David Kranz6308ec22012-02-22 09:36:48 -0500102 if (self._target['id'] not in self._state.get_instances().keys() or
Zhongyue Luo76888ee2012-09-30 23:58:52 +0900103 self._state.get_instances()[target_id][1] == 'TERMINATING'):
David Kranz6308ec22012-02-22 09:36:48 -0500104 self._logger.debug('machine %s is deleted or TERMINATING' %
105 self._target['id'])
106 return True
107
David Kranz6308ec22012-02-22 09:36:48 -0500108 reboot_state = self._reboot_state
109 if self._retry_state == self.States.REBOOT_CHECK:
110 server_state = self._check_for_status(reboot_state)
111 if server_state == reboot_state:
112 self._logger.info('machine %s ACTIVE -> %s' %
113 (self._target['id'], reboot_state))
114 self._state.set_instance_state(self._target['id'],
Zhongyue Luo30a563f2012-09-30 23:43:50 +0900115 (self._target, reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -0500116 self._retry_state = self.States.ACTIVE_CHECK
117 elif server_state == 'ACTIVE':
118 # machine must have gone ACTIVE -> REBOOT ->ACTIVE
119 self._retry_state = self.States.ACTIVE_CHECK
120
121 elif self._retry_state == self.States.ACTIVE_CHECK:
122 if not self._check_for_status('ACTIVE'):
123 return False
124 target = self._target
David Kranz180fed12012-03-27 14:31:29 -0400125 self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
Zhongyue Luoe0884a32012-09-25 17:24:17 +0800126 (target['id'], reboot_state, self.elapsed()))
David Kranz6308ec22012-02-22 09:36:48 -0500127 self._state.set_instance_state(target['id'],
128 (target, 'ACTIVE'))
129
130 return True
131
132# This code needs to be tested against a cluster that supports resize.
133#class TestResizeVM(test_case.StressTestCase):
Sean Daguef237ccb2013-01-04 15:19:14 -0500134# """Resize a server (change flavors)."""
David Kranz6308ec22012-02-22 09:36:48 -0500135#
136# def run(self, manager, state, *pargs, **kwargs):
137# """
138# Send an HTTP POST request to the nova cluster to resize a random
139# server. Update `state` to indicate server is rebooting.
140#
141# `manager` : Manager object.
142# `state` : `State` object describing our view of state of cluster
143# `pargs` : positional arguments
144# `kwargs` : keyword arguments, which include:
145# `timeout` : how long to wait before issuing Exception
146# """
147#
148# vms = state.get_instances()
149# active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
150# # no active vms, so return null
151# if not active_vms:
152# self._logger.debug('no ACTIVE instances to resize')
153# return
154#
155# target = random.choice(active_vms)
156# resize_target = target[0]
157# print resize_target
158#
159# _timeout = kwargs.get('timeout', 600)
160#
161# # determine current flavor type, and resize to a different type
162# # m1.tiny -> m1.small, m1.small -> m1.tiny
163# curr_size = int(resize_target['flavor']['id'])
164# if curr_size == 1:
165# new_size = 2
166# else:
167# new_size = 1
168# flavor_type = { 'flavorRef': new_size } # resize to m1.small
169#
170# post_body = json.dumps({'resize' : flavor_type})
171# url = '/servers/%s/action' % resize_target['id']
172# (response, body) = manager.request('POST',
173# url,
174# body=post_body)
175#
176# if (response.status != 202):
177# self._logger.error("response: %s" % response)
178# raise Exception
179#
180# state_name = check_for_status(manager, resize_target, 'RESIZE')
181#
182# if state_name == 'RESIZE':
183# self._logger.info('machine %s: ACTIVE -> RESIZE' %
184# resize_target['id'])
185# state.set_instance_state(resize_target['id'],
186# (resize_target, 'RESIZE'))
187#
188# return VerifyResizeVM(manager,
189# state,
190# resize_target,
191# state_name=state_name,
192# timeout=_timeout)
193#
David Kranz779c7f82012-05-01 16:50:32 -0400194#class VerifyResizeVM(pending_action.PendingServerAction):
Sean Daguef237ccb2013-01-04 15:19:14 -0500195# """Verify that resizing of a VM was successful."""
David Kranz6308ec22012-02-22 09:36:48 -0500196# States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
197#
198# def __init__(self, manager, state, created_server,
199# state_name=None,
200# timeout=300):
201# super(VerifyResizeVM, self).__init__(manager,
202# state,
203# created_server,
204# timeout=timeout)
205# self._retry_state = self.States.VERIFY_RESIZE_CHECK
206# self._state_name = state_name
207#
208# def retry(self):
209# """
210# Check to see that the server was actually resized. And change `state`
211# of server to running again.
212# """
213# # don't run resize if target machine has been deleted
214# # or is going to be deleted
215# if (self._target['id'] not in self._state.get_instances().keys() or
216# self._state.get_instances()[self._target['id']][1] ==
217# 'TERMINATING'):
218# self._logger.debug('machine %s is deleted or TERMINATING' %
219# self._target['id'])
220# return True
221#
David Kranz6308ec22012-02-22 09:36:48 -0500222# if self._retry_state == self.States.VERIFY_RESIZE_CHECK:
223# if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE':
224# # now issue command to CONFIRM RESIZE
225# post_body = json.dumps({'confirmResize' : null})
226# url = '/servers/%s/action' % self._target['id']
227# (response, body) = manager.request('POST',
228# url,
229# body=post_body)
230# if (response.status != 204):
231# self._logger.error("response: %s" % response)
232# raise Exception
233#
234# self._logger.info(
235# 'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400236# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500237# )
238# state.set_instance_state(self._target['id'],
239# (self._target, 'CONFIRM_RESIZE'))
240#
241# # change states
242# self._retry_state = self.States.ACTIVE_CHECK
243#
244# return False
245#
246# elif self._retry_state == self.States.ACTIVE_CHECK:
247# if not self._check_manager("ACTIVE"):
248# return False
249# else:
250# server = self._manager.get_server(self._target['id'])
251#
252# # Find private IP of server?
253# try:
254# (_, network) = server['addresses'].popitem()
255# ip = network[0]['addr']
256# except KeyError:
257# self._logger.error(
258# 'could not get ip address for machine %s' %
259# self._target['id']
260# )
261# raise Exception
262#
263# self._logger.info(
264# 'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400265# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500266# )
267# self._state.set_instance_state(self._target['id'],
268# (self._target, 'ACTIVE'))
269#
270# return True
271#
272# else:
273# # should never get here
274# self._logger.error('Unexpected state')
275# raise Exception