blob: a2032f085a72a77e9b11ffcb8c525da9f1e46ea7 [file] [log] [blame]
David Kranz6308ec22012-02-22 09:36:48 -05001# Copyright 2011 Quanta Research Cambridge, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Defines various sub-classes of the `StressTestCase` and
David Kranz779c7f82012-05-01 16:50:32 -040015`PendingServerAction` class. Sub-classes of StressTestCase implement various
David Kranz6308ec22012-02-22 09:36:48 -050016API calls on the Nova cluster having to do with Server Actions. Each
David Kranz779c7f82012-05-01 16:50:32 -040017sub-class will have a corresponding PendingServerAction. These pending
David Kranz6308ec22012-02-22 09:36:48 -050018actions veriy that the API call was successful or not."""
19
David Kranz6308ec22012-02-22 09:36:48 -050020import random
21import time
22
David Kranz6308ec22012-02-22 09:36:48 -050023import pending_action
David Kranz779c7f82012-05-01 16:50:32 -040024from tempest.exceptions import Duplicate
Matthew Treinish8d6836b2012-12-10 10:07:56 -050025import test_case
David Kranz6308ec22012-02-22 09:36:48 -050026from utils.util import *
27
28
29class TestRebootVM(test_case.StressTestCase):
Sean Daguef237ccb2013-01-04 15:19:14 -050030 """Reboot a server."""
David Kranz6308ec22012-02-22 09:36:48 -050031
32 def run(self, manager, state, *pargs, **kwargs):
33 """
34 Send an HTTP POST request to the nova cluster to reboot a random
35 server. Update state of object in `state` variable to indicate that
36 it is rebooting.
37 `manager` : Manager object
38 `state` : `State` object describing our view of state of cluster
39 `pargs` : positional arguments
40 `kwargs` : keyword arguments, which include:
41 `timeout` : how long to wait before issuing Exception
42 `type` : reboot type [SOFT or HARD] (default is SOFT)
43 """
44
45 vms = state.get_instances()
46 active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
47 # no active vms, so return null
48 if not active_vms:
49 self._logger.info('no ACTIVE instances to reboot')
50 return
51
David Kranz180fed12012-03-27 14:31:29 -040052 _reboot_arg = kwargs.get('type', 'SOFT')
David Kranz6308ec22012-02-22 09:36:48 -050053
54 # select active vm to reboot and then send request to nova controller
55 target = random.choice(active_vms)
56 reboot_target = target[0]
David Kranz180fed12012-03-27 14:31:29 -040057 # It seems that doing a reboot when in reboot is an error.
58 try:
Zhongyue Luoa1343de2013-01-04 16:21:35 +080059 response, body = manager.servers_client.reboot(reboot_target['id'],
David Kranz180fed12012-03-27 14:31:29 -040060 _reboot_arg)
61 except Duplicate:
62 return
David Kranz6308ec22012-02-22 09:36:48 -050063
David Kranz6308ec22012-02-22 09:36:48 -050064 if (response.status != 202):
65 self._logger.error("response: %s" % response)
66 raise Exception
67
David Kranz180fed12012-03-27 14:31:29 -040068 if _reboot_arg == 'SOFT':
69 reboot_state = 'REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050070 else:
David Kranz180fed12012-03-27 14:31:29 -040071 reboot_state = 'HARD_REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050072
73 self._logger.info('waiting for machine %s to change to %s' %
David Kranz180fed12012-03-27 14:31:29 -040074 (reboot_target['id'], reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -050075
76 return VerifyRebootVM(manager,
77 state,
78 reboot_target,
David Kranz180fed12012-03-27 14:31:29 -040079 reboot_state=reboot_state)
David Kranz6308ec22012-02-22 09:36:48 -050080
81
David Kranz779c7f82012-05-01 16:50:32 -040082class VerifyRebootVM(pending_action.PendingServerAction):
David Kranz6308ec22012-02-22 09:36:48 -050083 """Class to verify that the reboot completed."""
84 States = enum('REBOOT_CHECK', 'ACTIVE_CHECK')
85
86 def __init__(self, manager, state, target_server,
David Kranz180fed12012-03-27 14:31:29 -040087 reboot_state=None,
David Kranz6308ec22012-02-22 09:36:48 -050088 ip_addr=None):
89 super(VerifyRebootVM, self).__init__(manager,
90 state,
91 target_server)
David Kranz180fed12012-03-27 14:31:29 -040092 self._reboot_state = reboot_state
93 self._retry_state = self.States.REBOOT_CHECK
David Kranz6308ec22012-02-22 09:36:48 -050094
95 def retry(self):
96 """
97 Check to see that the server of interest has actually rebooted. Update
98 state to indicate that server is running again.
99 """
100 # don't run reboot verification if target machine has been
101 # deleted or is going to be deleted
Zhongyue Luo76888ee2012-09-30 23:58:52 +0900102 target_id = self._target['id']
David Kranz6308ec22012-02-22 09:36:48 -0500103 if (self._target['id'] not in self._state.get_instances().keys() or
Zhongyue Luo76888ee2012-09-30 23:58:52 +0900104 self._state.get_instances()[target_id][1] == 'TERMINATING'):
David Kranz6308ec22012-02-22 09:36:48 -0500105 self._logger.debug('machine %s is deleted or TERMINATING' %
106 self._target['id'])
107 return True
108
David Kranz6308ec22012-02-22 09:36:48 -0500109 reboot_state = self._reboot_state
110 if self._retry_state == self.States.REBOOT_CHECK:
111 server_state = self._check_for_status(reboot_state)
112 if server_state == reboot_state:
113 self._logger.info('machine %s ACTIVE -> %s' %
114 (self._target['id'], reboot_state))
115 self._state.set_instance_state(self._target['id'],
Zhongyue Luo30a563f2012-09-30 23:43:50 +0900116 (self._target, reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -0500117 self._retry_state = self.States.ACTIVE_CHECK
118 elif server_state == 'ACTIVE':
119 # machine must have gone ACTIVE -> REBOOT ->ACTIVE
120 self._retry_state = self.States.ACTIVE_CHECK
121
122 elif self._retry_state == self.States.ACTIVE_CHECK:
123 if not self._check_for_status('ACTIVE'):
124 return False
125 target = self._target
David Kranz180fed12012-03-27 14:31:29 -0400126 self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
Zhongyue Luoe0884a32012-09-25 17:24:17 +0800127 (target['id'], reboot_state, self.elapsed()))
David Kranz6308ec22012-02-22 09:36:48 -0500128 self._state.set_instance_state(target['id'],
129 (target, 'ACTIVE'))
130
131 return True
132
133# This code needs to be tested against a cluster that supports resize.
134#class TestResizeVM(test_case.StressTestCase):
Sean Daguef237ccb2013-01-04 15:19:14 -0500135# """Resize a server (change flavors)."""
David Kranz6308ec22012-02-22 09:36:48 -0500136#
137# def run(self, manager, state, *pargs, **kwargs):
138# """
139# Send an HTTP POST request to the nova cluster to resize a random
140# server. Update `state` to indicate server is rebooting.
141#
142# `manager` : Manager object.
143# `state` : `State` object describing our view of state of cluster
144# `pargs` : positional arguments
145# `kwargs` : keyword arguments, which include:
146# `timeout` : how long to wait before issuing Exception
147# """
148#
149# vms = state.get_instances()
150# active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
151# # no active vms, so return null
152# if not active_vms:
153# self._logger.debug('no ACTIVE instances to resize')
154# return
155#
156# target = random.choice(active_vms)
157# resize_target = target[0]
158# print resize_target
159#
160# _timeout = kwargs.get('timeout', 600)
161#
162# # determine current flavor type, and resize to a different type
163# # m1.tiny -> m1.small, m1.small -> m1.tiny
164# curr_size = int(resize_target['flavor']['id'])
165# if curr_size == 1:
166# new_size = 2
167# else:
168# new_size = 1
169# flavor_type = { 'flavorRef': new_size } # resize to m1.small
170#
171# post_body = json.dumps({'resize' : flavor_type})
172# url = '/servers/%s/action' % resize_target['id']
173# (response, body) = manager.request('POST',
174# url,
175# body=post_body)
176#
177# if (response.status != 202):
178# self._logger.error("response: %s" % response)
179# raise Exception
180#
181# state_name = check_for_status(manager, resize_target, 'RESIZE')
182#
183# if state_name == 'RESIZE':
184# self._logger.info('machine %s: ACTIVE -> RESIZE' %
185# resize_target['id'])
186# state.set_instance_state(resize_target['id'],
187# (resize_target, 'RESIZE'))
188#
189# return VerifyResizeVM(manager,
190# state,
191# resize_target,
192# state_name=state_name,
193# timeout=_timeout)
194#
David Kranz779c7f82012-05-01 16:50:32 -0400195#class VerifyResizeVM(pending_action.PendingServerAction):
Sean Daguef237ccb2013-01-04 15:19:14 -0500196# """Verify that resizing of a VM was successful."""
David Kranz6308ec22012-02-22 09:36:48 -0500197# States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
198#
199# def __init__(self, manager, state, created_server,
200# state_name=None,
201# timeout=300):
202# super(VerifyResizeVM, self).__init__(manager,
203# state,
204# created_server,
205# timeout=timeout)
206# self._retry_state = self.States.VERIFY_RESIZE_CHECK
207# self._state_name = state_name
208#
209# def retry(self):
210# """
211# Check to see that the server was actually resized. And change `state`
212# of server to running again.
213# """
214# # don't run resize if target machine has been deleted
215# # or is going to be deleted
216# if (self._target['id'] not in self._state.get_instances().keys() or
217# self._state.get_instances()[self._target['id']][1] ==
218# 'TERMINATING'):
219# self._logger.debug('machine %s is deleted or TERMINATING' %
220# self._target['id'])
221# return True
222#
David Kranz6308ec22012-02-22 09:36:48 -0500223# if self._retry_state == self.States.VERIFY_RESIZE_CHECK:
224# if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE':
225# # now issue command to CONFIRM RESIZE
226# post_body = json.dumps({'confirmResize' : null})
227# url = '/servers/%s/action' % self._target['id']
228# (response, body) = manager.request('POST',
229# url,
230# body=post_body)
231# if (response.status != 204):
232# self._logger.error("response: %s" % response)
233# raise Exception
234#
235# self._logger.info(
236# 'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400237# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500238# )
239# state.set_instance_state(self._target['id'],
240# (self._target, 'CONFIRM_RESIZE'))
241#
242# # change states
243# self._retry_state = self.States.ACTIVE_CHECK
244#
245# return False
246#
247# elif self._retry_state == self.States.ACTIVE_CHECK:
248# if not self._check_manager("ACTIVE"):
249# return False
250# else:
251# server = self._manager.get_server(self._target['id'])
252#
253# # Find private IP of server?
254# try:
255# (_, network) = server['addresses'].popitem()
256# ip = network[0]['addr']
257# except KeyError:
258# self._logger.error(
259# 'could not get ip address for machine %s' %
260# self._target['id']
261# )
262# raise Exception
263#
264# self._logger.info(
265# 'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400266# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500267# )
268# self._state.set_instance_state(self._target['id'],
269# (self._target, 'ACTIVE'))
270#
271# return True
272#
273# else:
274# # should never get here
275# self._logger.error('Unexpected state')
276# raise Exception