David Kranz | 6308ec2 | 2012-02-22 09:36:48 -0500 | [diff] [blame] | 1 | # Copyright 2011 Quanta Research Cambridge, Inc. |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | # you may not use this file except in compliance with the License. |
| 5 | # You may obtain a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | # See the License for the specific language governing permissions and |
| 13 | # limitations under the License. |
| 14 | """Defines various sub-classes of the `StressTestCase` and |
| 15 | `PendingAction` class. The sub-classes of StressTestCase implement various |
| 16 | API calls on the Nova cluster having to do with Server Actions. Each |
| 17 | sub-class will have a corresponding PendingAction. These pending |
| 18 | actions veriy that the API call was successful or not.""" |
| 19 | |
| 20 | |
| 21 | # system imports |
| 22 | import random |
| 23 | import time |
| 24 | |
| 25 | # local imports |
| 26 | import test_case |
| 27 | import pending_action |
| 28 | from tempest.exceptions import TimeoutException |
| 29 | from utils.util import * |
| 30 | |
| 31 | |
| 32 | class TestRebootVM(test_case.StressTestCase): |
| 33 | """Reboot a server""" |
| 34 | |
| 35 | def run(self, manager, state, *pargs, **kwargs): |
| 36 | """ |
| 37 | Send an HTTP POST request to the nova cluster to reboot a random |
| 38 | server. Update state of object in `state` variable to indicate that |
| 39 | it is rebooting. |
| 40 | `manager` : Manager object |
| 41 | `state` : `State` object describing our view of state of cluster |
| 42 | `pargs` : positional arguments |
| 43 | `kwargs` : keyword arguments, which include: |
| 44 | `timeout` : how long to wait before issuing Exception |
| 45 | `type` : reboot type [SOFT or HARD] (default is SOFT) |
| 46 | """ |
| 47 | |
| 48 | vms = state.get_instances() |
| 49 | active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE'] |
| 50 | # no active vms, so return null |
| 51 | if not active_vms: |
| 52 | self._logger.info('no ACTIVE instances to reboot') |
| 53 | return |
| 54 | |
| 55 | _reboot_type = kwargs.get('type', 'SOFT') |
| 56 | |
| 57 | # select active vm to reboot and then send request to nova controller |
| 58 | target = random.choice(active_vms) |
| 59 | reboot_target = target[0] |
| 60 | |
| 61 | response, body = manager.servers_client.reboot( |
| 62 | reboot_target['id'], |
| 63 | _reboot_type) |
| 64 | if (response.status != 202): |
| 65 | self._logger.error("response: %s" % response) |
| 66 | raise Exception |
| 67 | |
| 68 | if _reboot_type == 'SOFT': |
| 69 | state_name = 'REBOOT' |
| 70 | else: |
| 71 | state_name = 'REBOOT' # this is a bug, should be HARD_REBOOT |
| 72 | |
| 73 | self._logger.info('waiting for machine %s to change to %s' % |
| 74 | (reboot_target['id'], state_name)) |
| 75 | |
| 76 | # check for state transition |
| 77 | _resp, body = manager.servers_client.get_server(reboot_target['id']) |
| 78 | if body['status'] == state_name: |
| 79 | state_string = state_name |
| 80 | else: |
| 81 | # grab the actual state as we think it is |
| 82 | temp_obj = state.get_instances()[self._target['id']] |
| 83 | self._logger.debug( |
| 84 | "machine %s in state %s" % |
| 85 | (reboot_target['id'], temp_obj[1]) |
| 86 | ) |
| 87 | state_string = temp_obj[1] |
| 88 | |
| 89 | if state_string == state_name: |
| 90 | self._logger.info('machine %s ACTIVE -> %s' % |
| 91 | (reboot_target['id'], state_name)) |
| 92 | state.set_instance_state(reboot_target['id'], |
| 93 | (reboot_target, state_name)) |
| 94 | |
| 95 | return VerifyRebootVM(manager, |
| 96 | state, |
| 97 | reboot_target, |
| 98 | reboot_type=_reboot_type, |
| 99 | state_name=state_string) |
| 100 | |
| 101 | |
| 102 | class VerifyRebootVM(pending_action.PendingAction): |
| 103 | """Class to verify that the reboot completed.""" |
| 104 | States = enum('REBOOT_CHECK', 'ACTIVE_CHECK') |
| 105 | |
| 106 | def __init__(self, manager, state, target_server, |
| 107 | reboot_type=None, |
| 108 | state_name=None, |
| 109 | ip_addr=None): |
| 110 | super(VerifyRebootVM, self).__init__(manager, |
| 111 | state, |
| 112 | target_server) |
| 113 | # FIX ME: this is a nova bug |
| 114 | if reboot_type == 'SOFT': |
| 115 | self._reboot_state = 'REBOOT' |
| 116 | else: |
| 117 | self._reboot_state = 'REBOOT' # should be HARD REBOOT |
| 118 | |
| 119 | if state_name == 'ACTIVE': # was still active, check to see if REBOOT |
| 120 | self._retry_state = self.States.REBOOT_CHECK |
| 121 | else: # was REBOOT, so now check for ACTIVE |
| 122 | self._retry_state = self.States.ACTIVE_CHECK |
| 123 | |
| 124 | def retry(self): |
| 125 | """ |
| 126 | Check to see that the server of interest has actually rebooted. Update |
| 127 | state to indicate that server is running again. |
| 128 | """ |
| 129 | # don't run reboot verification if target machine has been |
| 130 | # deleted or is going to be deleted |
| 131 | if (self._target['id'] not in self._state.get_instances().keys() or |
| 132 | self._state.get_instances()[self._target['id']][1] == |
| 133 | 'TERMINATING'): |
| 134 | self._logger.debug('machine %s is deleted or TERMINATING' % |
| 135 | self._target['id']) |
| 136 | return True |
| 137 | |
| 138 | if time.time() - self._start_time > self._timeout: |
| 139 | raise TimeoutException |
| 140 | reboot_state = self._reboot_state |
| 141 | if self._retry_state == self.States.REBOOT_CHECK: |
| 142 | server_state = self._check_for_status(reboot_state) |
| 143 | if server_state == reboot_state: |
| 144 | self._logger.info('machine %s ACTIVE -> %s' % |
| 145 | (self._target['id'], reboot_state)) |
| 146 | self._state.set_instance_state(self._target['id'], |
| 147 | (self._target, reboot_state) |
| 148 | ) |
| 149 | self._retry_state = self.States.ACTIVE_CHECK |
| 150 | elif server_state == 'ACTIVE': |
| 151 | # machine must have gone ACTIVE -> REBOOT ->ACTIVE |
| 152 | self._retry_state = self.States.ACTIVE_CHECK |
| 153 | |
| 154 | elif self._retry_state == self.States.ACTIVE_CHECK: |
| 155 | if not self._check_for_status('ACTIVE'): |
| 156 | return False |
| 157 | target = self._target |
| 158 | self._logger.info('machine %s REBOOT -> ACTIVE [%.1f secs elapsed]' % |
| 159 | (target['id'], time.time() - self._start_time)) |
| 160 | self._state.set_instance_state(target['id'], |
| 161 | (target, 'ACTIVE')) |
| 162 | |
| 163 | return True |
| 164 | |
| 165 | # This code needs to be tested against a cluster that supports resize. |
| 166 | #class TestResizeVM(test_case.StressTestCase): |
| 167 | # """Resize a server (change flavors)""" |
| 168 | # |
| 169 | # def run(self, manager, state, *pargs, **kwargs): |
| 170 | # """ |
| 171 | # Send an HTTP POST request to the nova cluster to resize a random |
| 172 | # server. Update `state` to indicate server is rebooting. |
| 173 | # |
| 174 | # `manager` : Manager object. |
| 175 | # `state` : `State` object describing our view of state of cluster |
| 176 | # `pargs` : positional arguments |
| 177 | # `kwargs` : keyword arguments, which include: |
| 178 | # `timeout` : how long to wait before issuing Exception |
| 179 | # """ |
| 180 | # |
| 181 | # vms = state.get_instances() |
| 182 | # active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE'] |
| 183 | # # no active vms, so return null |
| 184 | # if not active_vms: |
| 185 | # self._logger.debug('no ACTIVE instances to resize') |
| 186 | # return |
| 187 | # |
| 188 | # target = random.choice(active_vms) |
| 189 | # resize_target = target[0] |
| 190 | # print resize_target |
| 191 | # |
| 192 | # _timeout = kwargs.get('timeout', 600) |
| 193 | # |
| 194 | # # determine current flavor type, and resize to a different type |
| 195 | # # m1.tiny -> m1.small, m1.small -> m1.tiny |
| 196 | # curr_size = int(resize_target['flavor']['id']) |
| 197 | # if curr_size == 1: |
| 198 | # new_size = 2 |
| 199 | # else: |
| 200 | # new_size = 1 |
| 201 | # flavor_type = { 'flavorRef': new_size } # resize to m1.small |
| 202 | # |
| 203 | # post_body = json.dumps({'resize' : flavor_type}) |
| 204 | # url = '/servers/%s/action' % resize_target['id'] |
| 205 | # (response, body) = manager.request('POST', |
| 206 | # url, |
| 207 | # body=post_body) |
| 208 | # |
| 209 | # if (response.status != 202): |
| 210 | # self._logger.error("response: %s" % response) |
| 211 | # raise Exception |
| 212 | # |
| 213 | # state_name = check_for_status(manager, resize_target, 'RESIZE') |
| 214 | # |
| 215 | # if state_name == 'RESIZE': |
| 216 | # self._logger.info('machine %s: ACTIVE -> RESIZE' % |
| 217 | # resize_target['id']) |
| 218 | # state.set_instance_state(resize_target['id'], |
| 219 | # (resize_target, 'RESIZE')) |
| 220 | # |
| 221 | # return VerifyResizeVM(manager, |
| 222 | # state, |
| 223 | # resize_target, |
| 224 | # state_name=state_name, |
| 225 | # timeout=_timeout) |
| 226 | # |
| 227 | #class VerifyResizeVM(pending_action.PendingAction): |
| 228 | # """Verify that resizing of a VM was successful""" |
| 229 | # States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK') |
| 230 | # |
| 231 | # def __init__(self, manager, state, created_server, |
| 232 | # state_name=None, |
| 233 | # timeout=300): |
| 234 | # super(VerifyResizeVM, self).__init__(manager, |
| 235 | # state, |
| 236 | # created_server, |
| 237 | # timeout=timeout) |
| 238 | # self._retry_state = self.States.VERIFY_RESIZE_CHECK |
| 239 | # self._state_name = state_name |
| 240 | # |
| 241 | # def retry(self): |
| 242 | # """ |
| 243 | # Check to see that the server was actually resized. And change `state` |
| 244 | # of server to running again. |
| 245 | # """ |
| 246 | # # don't run resize if target machine has been deleted |
| 247 | # # or is going to be deleted |
| 248 | # if (self._target['id'] not in self._state.get_instances().keys() or |
| 249 | # self._state.get_instances()[self._target['id']][1] == |
| 250 | # 'TERMINATING'): |
| 251 | # self._logger.debug('machine %s is deleted or TERMINATING' % |
| 252 | # self._target['id']) |
| 253 | # return True |
| 254 | # |
| 255 | # if time.time() - self._start_time > self._timeout: |
| 256 | # raise TimeoutException |
| 257 | # |
| 258 | # if self._retry_state == self.States.VERIFY_RESIZE_CHECK: |
| 259 | # if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE': |
| 260 | # # now issue command to CONFIRM RESIZE |
| 261 | # post_body = json.dumps({'confirmResize' : null}) |
| 262 | # url = '/servers/%s/action' % self._target['id'] |
| 263 | # (response, body) = manager.request('POST', |
| 264 | # url, |
| 265 | # body=post_body) |
| 266 | # if (response.status != 204): |
| 267 | # self._logger.error("response: %s" % response) |
| 268 | # raise Exception |
| 269 | # |
| 270 | # self._logger.info( |
| 271 | # 'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' % |
| 272 | # (self._target['id'], time.time() - self._start_time) |
| 273 | # ) |
| 274 | # state.set_instance_state(self._target['id'], |
| 275 | # (self._target, 'CONFIRM_RESIZE')) |
| 276 | # |
| 277 | # # change states |
| 278 | # self._retry_state = self.States.ACTIVE_CHECK |
| 279 | # |
| 280 | # return False |
| 281 | # |
| 282 | # elif self._retry_state == self.States.ACTIVE_CHECK: |
| 283 | # if not self._check_manager("ACTIVE"): |
| 284 | # return False |
| 285 | # else: |
| 286 | # server = self._manager.get_server(self._target['id']) |
| 287 | # |
| 288 | # # Find private IP of server? |
| 289 | # try: |
| 290 | # (_, network) = server['addresses'].popitem() |
| 291 | # ip = network[0]['addr'] |
| 292 | # except KeyError: |
| 293 | # self._logger.error( |
| 294 | # 'could not get ip address for machine %s' % |
| 295 | # self._target['id'] |
| 296 | # ) |
| 297 | # raise Exception |
| 298 | # |
| 299 | # self._logger.info( |
| 300 | # 'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' % |
| 301 | # (self._target['id'], time.time() - self._start_time) |
| 302 | # ) |
| 303 | # self._state.set_instance_state(self._target['id'], |
| 304 | # (self._target, 'ACTIVE')) |
| 305 | # |
| 306 | # return True |
| 307 | # |
| 308 | # else: |
| 309 | # # should never get here |
| 310 | # self._logger.error('Unexpected state') |
| 311 | # raise Exception |