blob: 6b4f462ac9265eb83863acafd9942fd6f3425b61 [file] [log] [blame]
David Kranz6308ec22012-02-22 09:36:48 -05001# Copyright 2011 Quanta Research Cambridge, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Defines various sub-classes of the `StressTestCase` and
David Kranz779c7f82012-05-01 16:50:32 -040015`PendingServerAction` class. Sub-classes of StressTestCase implement various
David Kranz6308ec22012-02-22 09:36:48 -050016API calls on the Nova cluster having to do with Server Actions. Each
David Kranz779c7f82012-05-01 16:50:32 -040017sub-class will have a corresponding PendingServerAction. These pending
David Kranz6308ec22012-02-22 09:36:48 -050018actions veriy that the API call was successful or not."""
19
20
21# system imports
22import random
23import time
24
25# local imports
26import test_case
27import pending_action
David Kranz779c7f82012-05-01 16:50:32 -040028from tempest.exceptions import Duplicate
David Kranz6308ec22012-02-22 09:36:48 -050029from utils.util import *
30
31
32class TestRebootVM(test_case.StressTestCase):
33 """Reboot a server"""
34
35 def run(self, manager, state, *pargs, **kwargs):
36 """
37 Send an HTTP POST request to the nova cluster to reboot a random
38 server. Update state of object in `state` variable to indicate that
39 it is rebooting.
40 `manager` : Manager object
41 `state` : `State` object describing our view of state of cluster
42 `pargs` : positional arguments
43 `kwargs` : keyword arguments, which include:
44 `timeout` : how long to wait before issuing Exception
45 `type` : reboot type [SOFT or HARD] (default is SOFT)
46 """
47
48 vms = state.get_instances()
49 active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
50 # no active vms, so return null
51 if not active_vms:
52 self._logger.info('no ACTIVE instances to reboot')
53 return
54
David Kranz180fed12012-03-27 14:31:29 -040055 _reboot_arg = kwargs.get('type', 'SOFT')
David Kranz6308ec22012-02-22 09:36:48 -050056
57 # select active vm to reboot and then send request to nova controller
58 target = random.choice(active_vms)
59 reboot_target = target[0]
David Kranz180fed12012-03-27 14:31:29 -040060 # It seems that doing a reboot when in reboot is an error.
61 try:
62 response, body = manager.servers_client.reboot(
63 reboot_target['id'],
64 _reboot_arg)
65 except Duplicate:
66 return
David Kranz6308ec22012-02-22 09:36:48 -050067
David Kranz6308ec22012-02-22 09:36:48 -050068 if (response.status != 202):
69 self._logger.error("response: %s" % response)
70 raise Exception
71
David Kranz180fed12012-03-27 14:31:29 -040072 if _reboot_arg == 'SOFT':
73 reboot_state = 'REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050074 else:
David Kranz180fed12012-03-27 14:31:29 -040075 reboot_state = 'HARD_REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050076
77 self._logger.info('waiting for machine %s to change to %s' %
David Kranz180fed12012-03-27 14:31:29 -040078 (reboot_target['id'], reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -050079
80 return VerifyRebootVM(manager,
81 state,
82 reboot_target,
David Kranz180fed12012-03-27 14:31:29 -040083 reboot_state=reboot_state)
David Kranz6308ec22012-02-22 09:36:48 -050084
85
David Kranz779c7f82012-05-01 16:50:32 -040086class VerifyRebootVM(pending_action.PendingServerAction):
David Kranz6308ec22012-02-22 09:36:48 -050087 """Class to verify that the reboot completed."""
88 States = enum('REBOOT_CHECK', 'ACTIVE_CHECK')
89
90 def __init__(self, manager, state, target_server,
David Kranz180fed12012-03-27 14:31:29 -040091 reboot_state=None,
David Kranz6308ec22012-02-22 09:36:48 -050092 ip_addr=None):
93 super(VerifyRebootVM, self).__init__(manager,
94 state,
95 target_server)
David Kranz180fed12012-03-27 14:31:29 -040096 self._reboot_state = reboot_state
97 self._retry_state = self.States.REBOOT_CHECK
David Kranz6308ec22012-02-22 09:36:48 -050098
99 def retry(self):
100 """
101 Check to see that the server of interest has actually rebooted. Update
102 state to indicate that server is running again.
103 """
104 # don't run reboot verification if target machine has been
105 # deleted or is going to be deleted
106 if (self._target['id'] not in self._state.get_instances().keys() or
107 self._state.get_instances()[self._target['id']][1] ==
108 'TERMINATING'):
109 self._logger.debug('machine %s is deleted or TERMINATING' %
110 self._target['id'])
111 return True
112
David Kranz6308ec22012-02-22 09:36:48 -0500113 reboot_state = self._reboot_state
114 if self._retry_state == self.States.REBOOT_CHECK:
115 server_state = self._check_for_status(reboot_state)
116 if server_state == reboot_state:
117 self._logger.info('machine %s ACTIVE -> %s' %
118 (self._target['id'], reboot_state))
119 self._state.set_instance_state(self._target['id'],
120 (self._target, reboot_state)
121 )
122 self._retry_state = self.States.ACTIVE_CHECK
123 elif server_state == 'ACTIVE':
124 # machine must have gone ACTIVE -> REBOOT ->ACTIVE
125 self._retry_state = self.States.ACTIVE_CHECK
126
127 elif self._retry_state == self.States.ACTIVE_CHECK:
128 if not self._check_for_status('ACTIVE'):
129 return False
130 target = self._target
David Kranz180fed12012-03-27 14:31:29 -0400131 self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400132 (target['id'], reboot_state, self.elapsed()))
David Kranz6308ec22012-02-22 09:36:48 -0500133 self._state.set_instance_state(target['id'],
134 (target, 'ACTIVE'))
135
136 return True
137
138# This code needs to be tested against a cluster that supports resize.
139#class TestResizeVM(test_case.StressTestCase):
140# """Resize a server (change flavors)"""
141#
142# def run(self, manager, state, *pargs, **kwargs):
143# """
144# Send an HTTP POST request to the nova cluster to resize a random
145# server. Update `state` to indicate server is rebooting.
146#
147# `manager` : Manager object.
148# `state` : `State` object describing our view of state of cluster
149# `pargs` : positional arguments
150# `kwargs` : keyword arguments, which include:
151# `timeout` : how long to wait before issuing Exception
152# """
153#
154# vms = state.get_instances()
155# active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
156# # no active vms, so return null
157# if not active_vms:
158# self._logger.debug('no ACTIVE instances to resize')
159# return
160#
161# target = random.choice(active_vms)
162# resize_target = target[0]
163# print resize_target
164#
165# _timeout = kwargs.get('timeout', 600)
166#
167# # determine current flavor type, and resize to a different type
168# # m1.tiny -> m1.small, m1.small -> m1.tiny
169# curr_size = int(resize_target['flavor']['id'])
170# if curr_size == 1:
171# new_size = 2
172# else:
173# new_size = 1
174# flavor_type = { 'flavorRef': new_size } # resize to m1.small
175#
176# post_body = json.dumps({'resize' : flavor_type})
177# url = '/servers/%s/action' % resize_target['id']
178# (response, body) = manager.request('POST',
179# url,
180# body=post_body)
181#
182# if (response.status != 202):
183# self._logger.error("response: %s" % response)
184# raise Exception
185#
186# state_name = check_for_status(manager, resize_target, 'RESIZE')
187#
188# if state_name == 'RESIZE':
189# self._logger.info('machine %s: ACTIVE -> RESIZE' %
190# resize_target['id'])
191# state.set_instance_state(resize_target['id'],
192# (resize_target, 'RESIZE'))
193#
194# return VerifyResizeVM(manager,
195# state,
196# resize_target,
197# state_name=state_name,
198# timeout=_timeout)
199#
David Kranz779c7f82012-05-01 16:50:32 -0400200#class VerifyResizeVM(pending_action.PendingServerAction):
David Kranz6308ec22012-02-22 09:36:48 -0500201# """Verify that resizing of a VM was successful"""
202# States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
203#
204# def __init__(self, manager, state, created_server,
205# state_name=None,
206# timeout=300):
207# super(VerifyResizeVM, self).__init__(manager,
208# state,
209# created_server,
210# timeout=timeout)
211# self._retry_state = self.States.VERIFY_RESIZE_CHECK
212# self._state_name = state_name
213#
214# def retry(self):
215# """
216# Check to see that the server was actually resized. And change `state`
217# of server to running again.
218# """
219# # don't run resize if target machine has been deleted
220# # or is going to be deleted
221# if (self._target['id'] not in self._state.get_instances().keys() or
222# self._state.get_instances()[self._target['id']][1] ==
223# 'TERMINATING'):
224# self._logger.debug('machine %s is deleted or TERMINATING' %
225# self._target['id'])
226# return True
227#
David Kranz6308ec22012-02-22 09:36:48 -0500228# if self._retry_state == self.States.VERIFY_RESIZE_CHECK:
229# if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE':
230# # now issue command to CONFIRM RESIZE
231# post_body = json.dumps({'confirmResize' : null})
232# url = '/servers/%s/action' % self._target['id']
233# (response, body) = manager.request('POST',
234# url,
235# body=post_body)
236# if (response.status != 204):
237# self._logger.error("response: %s" % response)
238# raise Exception
239#
240# self._logger.info(
241# 'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400242# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500243# )
244# state.set_instance_state(self._target['id'],
245# (self._target, 'CONFIRM_RESIZE'))
246#
247# # change states
248# self._retry_state = self.States.ACTIVE_CHECK
249#
250# return False
251#
252# elif self._retry_state == self.States.ACTIVE_CHECK:
253# if not self._check_manager("ACTIVE"):
254# return False
255# else:
256# server = self._manager.get_server(self._target['id'])
257#
258# # Find private IP of server?
259# try:
260# (_, network) = server['addresses'].popitem()
261# ip = network[0]['addr']
262# except KeyError:
263# self._logger.error(
264# 'could not get ip address for machine %s' %
265# self._target['id']
266# )
267# raise Exception
268#
269# self._logger.info(
270# 'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' %
David Kranz779c7f82012-05-01 16:50:32 -0400271# (self._target['id'], self.elapsed())
David Kranz6308ec22012-02-22 09:36:48 -0500272# )
273# self._state.set_instance_state(self._target['id'],
274# (self._target, 'ACTIVE'))
275#
276# return True
277#
278# else:
279# # should never get here
280# self._logger.error('Unexpected state')
281# raise Exception