blob: ca66dec619ca05bd0204a80ee9071b31dc147077 [file] [log] [blame]
David Kranz6308ec22012-02-22 09:36:48 -05001# Copyright 2011 Quanta Research Cambridge, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Defines various sub-classes of the `StressTestCase` and
David Kranz779c7f82012-05-01 16:50:32 -040015`PendingServerAction` class. Sub-classes of StressTestCase implement various
David Kranz6308ec22012-02-22 09:36:48 -050016API calls on the Nova cluster having to do with Server Actions. Each
David Kranz779c7f82012-05-01 16:50:32 -040017sub-class will have a corresponding PendingServerAction. These pending
David Kranz6308ec22012-02-22 09:36:48 -050018actions veriy that the API call was successful or not."""
19
David Kranz6308ec22012-02-22 09:36:48 -050020import random
21import time
22
David Kranz6308ec22012-02-22 09:36:48 -050023import pending_action
David Kranz779c7f82012-05-01 16:50:32 -040024from tempest.exceptions import Duplicate
Matthew Treinish8d6836b2012-12-10 10:07:56 -050025import test_case
David Kranz6308ec22012-02-22 09:36:48 -050026from utils.util import *
27
28
29class TestRebootVM(test_case.StressTestCase):
30 """Reboot a server"""
31
32 def run(self, manager, state, *pargs, **kwargs):
33 """
34 Send an HTTP POST request to the nova cluster to reboot a random
35 server. Update state of object in `state` variable to indicate that
36 it is rebooting.
37 `manager` : Manager object
38 `state` : `State` object describing our view of state of cluster
39 `pargs` : positional arguments
40 `kwargs` : keyword arguments, which include:
41 `timeout` : how long to wait before issuing Exception
42 `type` : reboot type [SOFT or HARD] (default is SOFT)
43 """
44
45 vms = state.get_instances()
46 active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
47 # no active vms, so return null
48 if not active_vms:
49 self._logger.info('no ACTIVE instances to reboot')
50 return
51
David Kranz180fed12012-03-27 14:31:29 -040052 _reboot_arg = kwargs.get('type', 'SOFT')
David Kranz6308ec22012-02-22 09:36:48 -050053
54 # select active vm to reboot and then send request to nova controller
55 target = random.choice(active_vms)
56 reboot_target = target[0]
David Kranz180fed12012-03-27 14:31:29 -040057 # It seems that doing a reboot when in reboot is an error.
58 try:
59 response, body = manager.servers_client.reboot(
60 reboot_target['id'],
61 _reboot_arg)
62 except Duplicate:
63 return
David Kranz6308ec22012-02-22 09:36:48 -050064
David Kranz6308ec22012-02-22 09:36:48 -050065 if (response.status != 202):
66 self._logger.error("response: %s" % response)
67 raise Exception
68
David Kranz180fed12012-03-27 14:31:29 -040069 if _reboot_arg == 'SOFT':
70 reboot_state = 'REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050071 else:
David Kranz180fed12012-03-27 14:31:29 -040072 reboot_state = 'HARD_REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050073
74 self._logger.info('waiting for machine %s to change to %s' %
David Kranz180fed12012-03-27 14:31:29 -040075 (reboot_target['id'], reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -050076
77 return VerifyRebootVM(manager,
78 state,
79 reboot_target,
David Kranz180fed12012-03-27 14:31:29 -040080 reboot_state=reboot_state)
David Kranz6308ec22012-02-22 09:36:48 -050081
82
David Kranz779c7f82012-05-01 16:50:32 -040083class VerifyRebootVM(pending_action.PendingServerAction):
David Kranz6308ec22012-02-22 09:36:48 -050084 """Class to verify that the reboot completed."""
85 States = enum('REBOOT_CHECK', 'ACTIVE_CHECK')
86
87 def __init__(self, manager, state, target_server,
David Kranz180fed12012-03-27 14:31:29 -040088 reboot_state=None,
David Kranz6308ec22012-02-22 09:36:48 -050089 ip_addr=None):
90 super(VerifyRebootVM, self).__init__(manager,
91 state,
92 target_server)
David Kranz180fed12012-03-27 14:31:29 -040093 self._reboot_state = reboot_state
94 self._retry_state = self.States.REBOOT_CHECK
David Kranz6308ec22012-02-22 09:36:48 -050095
96 def retry(self):
97 """
98 Check to see that the server of interest has actually rebooted. Update
99 state to indicate that server is running again.
100 """
101 # don't run reboot verification if target machine has been
102 # deleted or is going to be deleted
Zhongyue Luo76888ee2012-09-30 23:58:52 +0900103 target_id = self._target['id']
David Kranz6308ec22012-02-22 09:36:48 -0500104 if (self._target['id'] not in self._state.get_instances().keys() or
Zhongyue Luo76888ee2012-09-30 23:58:52 +0900105 self._state.get_instances()[target_id][1] == 'TERMINATING'):
David Kranz6308ec22012-02-22 09:36:48 -0500106 self._logger.debug('machine %s is deleted or TERMINATING' %
107 self._target['id'])
108 return True
109
David Kranz6308ec22012-02-22 09:36:48 -0500110 reboot_state = self._reboot_state
111 if self._retry_state == self.States.REBOOT_CHECK:
112 server_state = self._check_for_status(reboot_state)
113 if server_state == reboot_state:
114 self._logger.info('machine %s ACTIVE -> %s' %
115 (self._target['id'], reboot_state))
116 self._state.set_instance_state(self._target['id'],
Zhongyue Luo30a563f2012-09-30 23:43:50 +0900117 (self._target, reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -0500118 self._retry_state = self.States.ACTIVE_CHECK
119 elif server_state == 'ACTIVE':
120 # machine must have gone ACTIVE -> REBOOT ->ACTIVE
121 self._retry_state = self.States.ACTIVE_CHECK
122
123 elif self._retry_state == self.States.ACTIVE_CHECK:
124 if not self._check_for_status('ACTIVE'):
125 return False
126 target = self._target
David Kranz180fed12012-03-27 14:31:29 -0400127 self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
Zhongyue Luoe0884a32012-09-25 17:24:17 +0800128 (target['id'], reboot_state, self.elapsed()))
David Kranz6308ec22012-02-22 09:36:48 -0500129 self._state.set_instance_state(target['id'],
130 (target, 'ACTIVE'))
131
132 return True
133
134# This code needs to be tested against a cluster that supports resize.
135#class TestResizeVM(test_case.StressTestCase):
136# """Resize a server (change flavors)"""
137#
138# def run(self, manager, state, *pargs, **kwargs):
139# """
140# Send an HTTP POST request to the nova cluster to resize a random
141# server. Update `state` to indicate server is rebooting.
142#
143# `manager` : Manager object.
144# `state` : `State` object describing our view of state of cluster
145# `pargs` : positional arguments
146# `kwargs` : keyword arguments, which include:
147# `timeout` : how long to wait before issuing Exception
148# """
149#
150# vms = state.get_instances()
151# active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
152# # no active vms, so return null
153# if not active_vms:
154# self._logger.debug('no ACTIVE instances to resize')
155# return
156#
157# target = random.choice(active_vms)
158# resize_target = target[0]
159# print resize_target
160#
161# _timeout = kwargs.get('timeout', 600)
162#
163# # determine current flavor type, and resize to a different type
164# # m1.tiny -> m1.small, m1.small -> m1.tiny
165# curr_size = int(resize_target['flavor']['id'])
166# if curr_size == 1:
167# new_size = 2
168# else:
169# new_size = 1
170# flavor_type = { 'flavorRef': new_size } # resize to m1.small
171#
172# post_body = json.dumps({'resize' : flavor_type})
173# url = '/servers/%s/action' % resize_target['id']
174# (response, body) = manager.request('POST',
175# url,
176# body=post_body)
177#
178# if (response.status != 202):
179# self._logger.error("response: %s" % response)
180# raise Exception
181#
182# state_name = check_for_status(manager, resize_target, 'RESIZE')
183#
184# if state_name == 'RESIZE':
185# self._logger.info('machine %s: ACTIVE -> RESIZE' %
186# resize_target['id'])
187# state.set_instance_state(resize_target['id'],
188# (resize_target, 'RESIZE'))
189#
190# return VerifyResizeVM(manager,
191# state,
192# resize_target,
193# state_name=state_name,
194# timeout=_timeout)
195#
David Kranz779c7f82012-05-01 16:50:32 -0400196#class VerifyResizeVM(pending_action.PendingServerAction):
David Kranz6308ec22012-02-22 09:36:48 -0500197# """Verify that resizing of a VM was successful"""
198# States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
199#
200# def __init__(self, manager, state, created_server,
201# state_name=None,
202# timeout=300):
203# super(VerifyResizeVM, self).__init__(manager,
204# state,
205# created_server,
206# timeout=timeout)
207# self._retry_state = self.States.VERIFY_RESIZE_CHECK
208# self._state_name = state_name
209#
210# def retry(self):
211# """
212# Check to see that the server was actually resized. And change `state`
213# of server to running again.
214# """
215# # don't run resize if target machine has been deleted
216# # or is going to be deleted
217# if (self._target['id'] not in self._state.get_instances().keys() or
218# self._state.get_instances()[self._target['id']][1] ==
219# 'TERMINATING'):
220# self._logger.debug('machine %s is deleted or TERMINATING' %
221# self._target['id'])
222# return True
223#
David Kranz6308ec22012-02-22 09:36:48 -0500224# if self._retry_state == self.States.VERIFY_RESIZE_CHECK:
225# if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE':
226# # now issue command to CONFIRM RESIZE
227# post_body = json.dumps({'confirmResize' : null})
228# url = '/servers/%s/action' % self._target['id']
229# (response, body) = manager.request('POST',
230# url,
231# body=post_body)
232# if (response.status != 204):
233# self._logger.error("response: %s" % response)
234# raise Exception
235#
236# self._logger.info(
237# 'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400238# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500239# )
240# state.set_instance_state(self._target['id'],
241# (self._target, 'CONFIRM_RESIZE'))
242#
243# # change states
244# self._retry_state = self.States.ACTIVE_CHECK
245#
246# return False
247#
248# elif self._retry_state == self.States.ACTIVE_CHECK:
249# if not self._check_manager("ACTIVE"):
250# return False
251# else:
252# server = self._manager.get_server(self._target['id'])
253#
254# # Find private IP of server?
255# try:
256# (_, network) = server['addresses'].popitem()
257# ip = network[0]['addr']
258# except KeyError:
259# self._logger.error(
260# 'could not get ip address for machine %s' %
261# self._target['id']
262# )
263# raise Exception
264#
265# self._logger.info(
266# 'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400267# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500268# )
269# self._state.set_instance_state(self._target['id'],
270# (self._target, 'ACTIVE'))
271#
272# return True
273#
274# else:
275# # should never get here
276# self._logger.error('Unexpected state')
277# raise Exception