blob: c8506aeafec410eadcb6dda0e1a5752bab8e3ce9 [file] [log] [blame]
Yaroslav Lobankov18544b02014-04-24 20:17:49 +04001# Copyright (c) 2014 Mirantis Inc.
Sergey Lukjanov03c95c82013-12-10 16:42:37 +04002#
Yaroslav Lobankov18544b02014-04-24 20:17:49 +04003# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
Sergey Lukjanov03c95c82013-12-10 16:42:37 +04006#
Yaroslav Lobankov18544b02014-04-24 20:17:49 +04007# http://www.apache.org/licenses/LICENSE-2.0
Sergey Lukjanov03c95c82013-12-10 16:42:37 +04008#
Yaroslav Lobankov18544b02014-04-24 20:17:49 +04009# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
Sergey Lukjanov03c95c82013-12-10 16:42:37 +040014
Cao Xuan Hoang36fe23c2016-08-25 16:11:14 +070015import collections
Vitaly Gridnev44ece752015-12-10 19:27:25 +030016import copy
Luigi Toscanob61567a2015-03-11 18:35:33 +010017
18import six
Masayuki Igawabfa07602015-01-20 18:47:17 +090019
Sergey Lukjanov03c95c82013-12-10 16:42:37 +040020from tempest import config
Luigi Toscanob61567a2015-03-11 18:35:33 +010021from tempest import exceptions
Jordan Pittier9e227c52016-02-09 14:35:18 +010022from tempest.lib.common.utils import test_utils
Sergey Lukjanov03c95c82013-12-10 16:42:37 +040023import tempest.test
24
25
26CONF = config.CONF
27
Luigi Toscanob61567a2015-03-11 18:35:33 +010028"""Default templates.
29There should always be at least a master1 and a worker1 node
30group template."""
Vitaly Gridnev44ece752015-12-10 19:27:25 +030031BASE_VANILLA_DESC = {
32 'NODES': {
33 'master1': {
34 'count': 1,
35 'node_processes': ['namenode', 'resourcemanager',
36 'hiveserver']
37 },
38 'master2': {
39 'count': 1,
40 'node_processes': ['oozie', 'historyserver',
41 'secondarynamenode']
42 },
43 'worker1': {
44 'count': 1,
45 'node_processes': ['datanode', 'nodemanager'],
46 'node_configs': {
47 'MapReduce': {
48 'yarn.app.mapreduce.am.resource.mb': 256,
49 'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
Luigi Toscanob61567a2015-03-11 18:35:33 +010050 },
Vitaly Gridnev44ece752015-12-10 19:27:25 +030051 'YARN': {
52 'yarn.scheduler.minimum-allocation-mb': 256,
53 'yarn.scheduler.maximum-allocation-mb': 1024,
54 'yarn.nodemanager.vmem-check-enabled': False
Luigi Toscanob61567a2015-03-11 18:35:33 +010055 }
56 }
Vitaly Gridnev44ece752015-12-10 19:27:25 +030057 }
58 },
59 'cluster_configs': {
60 'HDFS': {
61 'dfs.replication': 1
62 }
63 }
64}
65
66BASE_SPARK_DESC = {
67 'NODES': {
68 'master1': {
69 'count': 1,
70 'node_processes': ['namenode', 'master']
71 },
72 'worker1': {
73 'count': 1,
74 'node_processes': ['datanode', 'slave']
75 }
76 },
77 'cluster_configs': {
78 'HDFS': {
79 'dfs.replication': 1
80 }
81 }
82}
83
84BASE_CDH_DESC = {
85 'NODES': {
86 'master1': {
87 'count': 1,
88 'node_processes': ['CLOUDERA_MANAGER']
89 },
90 'master2': {
91 'count': 1,
92 'node_processes': ['HDFS_NAMENODE',
93 'YARN_RESOURCEMANAGER']
94 },
95 'master3': {
96 'count': 1,
97 'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
98 'HDFS_SECONDARYNAMENODE',
99 'HIVE_METASTORE', 'HIVE_SERVER2']
100 },
101 'worker1': {
102 'count': 1,
103 'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
104 }
105 },
106 'cluster_configs': {
107 'HDFS': {
108 'dfs_replication': 1
109 }
110 }
111}
112
113
114DEFAULT_TEMPLATES = {
Cao Xuan Hoang36fe23c2016-08-25 16:11:14 +0700115 'vanilla': collections.OrderedDict([
Vitaly Gridnev44ece752015-12-10 19:27:25 +0300116 ('2.6.0', copy.deepcopy(BASE_VANILLA_DESC)),
117 ('2.7.1', copy.deepcopy(BASE_VANILLA_DESC)),
Luigi Toscanob61567a2015-03-11 18:35:33 +0100118 ('1.2.1', {
119 'NODES': {
120 'master1': {
121 'count': 1,
122 'node_processes': ['namenode', 'jobtracker']
123 },
124 'worker1': {
125 'count': 1,
126 'node_processes': ['datanode', 'tasktracker'],
127 'node_configs': {
128 'HDFS': {
129 'Data Node Heap Size': 1024
130 },
131 'MapReduce': {
132 'Task Tracker Heap Size': 1024
133 }
134 }
135 }
136 },
137 'cluster_configs': {
138 'HDFS': {
139 'dfs.replication': 1
140 },
141 'MapReduce': {
142 'mapred.map.tasks.speculative.execution': False,
143 'mapred.child.java.opts': '-Xmx500m'
144 },
145 'general': {
146 'Enable Swift': False
147 }
148 }
149 })
150 ]),
Cao Xuan Hoang36fe23c2016-08-25 16:11:14 +0700151 'hdp': collections.OrderedDict([
Luigi Toscanob61567a2015-03-11 18:35:33 +0100152 ('2.0.6', {
153 'NODES': {
154 'master1': {
155 'count': 1,
156 'node_processes': ['NAMENODE', 'SECONDARY_NAMENODE',
157 'ZOOKEEPER_SERVER', 'AMBARI_SERVER',
158 'HISTORYSERVER', 'RESOURCEMANAGER',
159 'GANGLIA_SERVER', 'NAGIOS_SERVER',
160 'OOZIE_SERVER']
161 },
162 'worker1': {
163 'count': 1,
164 'node_processes': ['HDFS_CLIENT', 'DATANODE',
165 'YARN_CLIENT', 'ZOOKEEPER_CLIENT',
166 'MAPREDUCE2_CLIENT', 'NODEMANAGER',
167 'PIG', 'OOZIE_CLIENT']
168 }
169 },
170 'cluster_configs': {
171 'HDFS': {
172 'dfs.replication': 1
173 }
174 }
175 })
176 ]),
Cao Xuan Hoang36fe23c2016-08-25 16:11:14 +0700177 'spark': collections.OrderedDict([
Vitaly Gridnev44ece752015-12-10 19:27:25 +0300178 ('1.0.0', copy.deepcopy(BASE_SPARK_DESC)),
179 ('1.3.1', copy.deepcopy(BASE_SPARK_DESC))
Luigi Toscanob61567a2015-03-11 18:35:33 +0100180 ]),
Cao Xuan Hoang36fe23c2016-08-25 16:11:14 +0700181 'cdh': collections.OrderedDict([
Vitaly Gridnev44ece752015-12-10 19:27:25 +0300182 ('5.4.0', copy.deepcopy(BASE_CDH_DESC)),
183 ('5.3.0', copy.deepcopy(BASE_CDH_DESC)),
184 ('5', copy.deepcopy(BASE_CDH_DESC))
Luigi Toscanob61567a2015-03-11 18:35:33 +0100185 ]),
Luigi Toscanob61567a2015-03-11 18:35:33 +0100186}
187
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400188
189class BaseDataProcessingTest(tempest.test.BaseTestCase):
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400190
Andrea Frittolib21de6c2015-02-06 20:12:38 +0000191 credentials = ['primary']
192
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400193 @classmethod
Rohan Kanade30575822015-02-03 12:04:43 +0530194 def skip_checks(cls):
195 super(BaseDataProcessingTest, cls).skip_checks()
Sergey Lukjanov9c95a252014-03-13 23:59:22 +0400196 if not CONF.service_available.sahara:
Yaroslav Lobankov93aa8192014-04-01 20:03:25 +0400197 raise cls.skipException('Sahara support is required')
Luigi Toscanob61567a2015-03-11 18:35:33 +0100198 cls.default_plugin = cls._get_default_plugin()
Zhi Kun Liu2259c972014-03-27 02:11:10 -0500199
Rohan Kanade30575822015-02-03 12:04:43 +0530200 @classmethod
Rohan Kanade30575822015-02-03 12:04:43 +0530201 def setup_clients(cls):
202 super(BaseDataProcessingTest, cls).setup_clients()
Yaroslav Lobankov72876772014-06-11 16:18:19 +0400203 cls.client = cls.os.data_processing_client
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400204
Rohan Kanade30575822015-02-03 12:04:43 +0530205 @classmethod
206 def resource_setup(cls):
207 super(BaseDataProcessingTest, cls).resource_setup()
208
Luigi Toscanob61567a2015-03-11 18:35:33 +0100209 cls.default_version = cls._get_default_version()
210 if cls.default_plugin is not None and cls.default_version is None:
211 raise exceptions.InvalidConfiguration(
212 message="No known Sahara plugin version was found")
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400213 cls.flavor_ref = CONF.compute.flavor_ref
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400214
215 # add lists for watched resources
216 cls._node_group_templates = []
Yaroslav Lobankov93aa8192014-04-01 20:03:25 +0400217 cls._cluster_templates = []
Yaroslav Lobankov4267bcc2014-04-25 13:25:03 +0400218 cls._data_sources = []
Yaroslav Lobankovd5dcf192014-05-21 13:58:10 +0400219 cls._job_binary_internals = []
Yaroslav Lobankov74c923b2014-05-21 13:13:07 +0400220 cls._job_binaries = []
Yaroslav Lobankovc04ae232014-07-04 16:13:55 +0400221 cls._jobs = []
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400222
223 @classmethod
Andrea Frittoli581c3932014-09-15 13:14:53 +0100224 def resource_cleanup(cls):
Yaroslav Lobankov93aa8192014-04-01 20:03:25 +0400225 cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
226 cls.client.delete_cluster_template)
227 cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
228 cls.client.delete_node_group_template)
Yaroslav Lobankovc04ae232014-07-04 16:13:55 +0400229 cls.cleanup_resources(getattr(cls, '_jobs', []), cls.client.delete_job)
Yaroslav Lobankov74c923b2014-05-21 13:13:07 +0400230 cls.cleanup_resources(getattr(cls, '_job_binaries', []),
231 cls.client.delete_job_binary)
Yaroslav Lobankovc04ae232014-07-04 16:13:55 +0400232 cls.cleanup_resources(getattr(cls, '_job_binary_internals', []),
233 cls.client.delete_job_binary_internal)
234 cls.cleanup_resources(getattr(cls, '_data_sources', []),
235 cls.client.delete_data_source)
Andrea Frittoli581c3932014-09-15 13:14:53 +0100236 super(BaseDataProcessingTest, cls).resource_cleanup()
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400237
Yaroslav Lobankov93aa8192014-04-01 20:03:25 +0400238 @staticmethod
239 def cleanup_resources(resource_id_list, method):
240 for resource_id in resource_id_list:
Jordan Pittier9e227c52016-02-09 14:35:18 +0100241 test_utils.call_and_ignore_notfound_exc(method, resource_id)
Yaroslav Lobankov93aa8192014-04-01 20:03:25 +0400242
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400243 @classmethod
244 def create_node_group_template(cls, name, plugin_name, hadoop_version,
245 node_processes, flavor_id,
246 node_configs=None, **kwargs):
247 """Creates watched node group template with specified params.
248
249 It supports passing additional params using kwargs and returns created
250 object. All resources created in this method will be automatically
251 removed in tearDownClass method.
252 """
David Kranz77f57202015-02-09 14:10:04 -0500253 resp_body = cls.client.create_node_group_template(name, plugin_name,
254 hadoop_version,
255 node_processes,
256 flavor_id,
257 node_configs,
258 **kwargs)
Yaroslav Lobankov1662b0e2015-08-10 16:48:07 +0300259 resp_body = resp_body['node_group_template']
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400260 # store id of created node group template
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400261 cls._node_group_templates.append(resp_body['id'])
Sergey Lukjanov03c95c82013-12-10 16:42:37 +0400262
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400263 return resp_body
Yaroslav Lobankov93aa8192014-04-01 20:03:25 +0400264
265 @classmethod
266 def create_cluster_template(cls, name, plugin_name, hadoop_version,
267 node_groups, cluster_configs=None, **kwargs):
268 """Creates watched cluster template with specified params.
269
270 It supports passing additional params using kwargs and returns created
271 object. All resources created in this method will be automatically
272 removed in tearDownClass method.
273 """
David Kranz77f57202015-02-09 14:10:04 -0500274 resp_body = cls.client.create_cluster_template(name, plugin_name,
275 hadoop_version,
276 node_groups,
277 cluster_configs,
278 **kwargs)
Yaroslav Lobankov1662b0e2015-08-10 16:48:07 +0300279 resp_body = resp_body['cluster_template']
Yaroslav Lobankov93aa8192014-04-01 20:03:25 +0400280 # store id of created cluster template
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400281 cls._cluster_templates.append(resp_body['id'])
Yaroslav Lobankov93aa8192014-04-01 20:03:25 +0400282
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400283 return resp_body
Yaroslav Lobankov4267bcc2014-04-25 13:25:03 +0400284
285 @classmethod
286 def create_data_source(cls, name, type, url, **kwargs):
287 """Creates watched data source with specified params.
288
289 It supports passing additional params using kwargs and returns created
290 object. All resources created in this method will be automatically
291 removed in tearDownClass method.
292 """
David Kranz77f57202015-02-09 14:10:04 -0500293 resp_body = cls.client.create_data_source(name, type, url, **kwargs)
Yaroslav Lobankov1662b0e2015-08-10 16:48:07 +0300294 resp_body = resp_body['data_source']
Yaroslav Lobankov4267bcc2014-04-25 13:25:03 +0400295 # store id of created data source
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400296 cls._data_sources.append(resp_body['id'])
Yaroslav Lobankov4267bcc2014-04-25 13:25:03 +0400297
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400298 return resp_body
Yaroslav Lobankovd5dcf192014-05-21 13:58:10 +0400299
300 @classmethod
301 def create_job_binary_internal(cls, name, data):
302 """Creates watched job binary internal with specified params.
303
304 It returns created object. All resources created in this method will
305 be automatically removed in tearDownClass method.
306 """
David Kranz77f57202015-02-09 14:10:04 -0500307 resp_body = cls.client.create_job_binary_internal(name, data)
Yaroslav Lobankov1662b0e2015-08-10 16:48:07 +0300308 resp_body = resp_body['job_binary_internal']
Yaroslav Lobankovd5dcf192014-05-21 13:58:10 +0400309 # store id of created job binary internal
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400310 cls._job_binary_internals.append(resp_body['id'])
Yaroslav Lobankovd5dcf192014-05-21 13:58:10 +0400311
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400312 return resp_body
Yaroslav Lobankov74c923b2014-05-21 13:13:07 +0400313
Yaroslav Lobankovc04ae232014-07-04 16:13:55 +0400314 @classmethod
Yaroslav Lobankov74c923b2014-05-21 13:13:07 +0400315 def create_job_binary(cls, name, url, extra=None, **kwargs):
316 """Creates watched job binary with specified params.
317
318 It supports passing additional params using kwargs and returns created
319 object. All resources created in this method will be automatically
320 removed in tearDownClass method.
321 """
David Kranz77f57202015-02-09 14:10:04 -0500322 resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
Yaroslav Lobankov1662b0e2015-08-10 16:48:07 +0300323 resp_body = resp_body['job_binary']
Yaroslav Lobankov74c923b2014-05-21 13:13:07 +0400324 # store id of created job binary
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400325 cls._job_binaries.append(resp_body['id'])
Yaroslav Lobankov74c923b2014-05-21 13:13:07 +0400326
Yaroslav Lobankov2f8525e2014-07-21 16:40:23 +0400327 return resp_body
Yaroslav Lobankovc04ae232014-07-04 16:13:55 +0400328
329 @classmethod
330 def create_job(cls, name, job_type, mains, libs=None, **kwargs):
331 """Creates watched job with specified params.
332
333 It supports passing additional params using kwargs and returns created
334 object. All resources created in this method will be automatically
335 removed in tearDownClass method.
336 """
David Kranz77f57202015-02-09 14:10:04 -0500337 resp_body = cls.client.create_job(name,
338 job_type, mains, libs, **kwargs)
Yaroslav Lobankov1662b0e2015-08-10 16:48:07 +0300339 resp_body = resp_body['job']
Yaroslav Lobankovc04ae232014-07-04 16:13:55 +0400340 # store id of created job
341 cls._jobs.append(resp_body['id'])
342
343 return resp_body
Luigi Toscanob61567a2015-03-11 18:35:33 +0100344
345 @classmethod
346 def _get_default_plugin(cls):
347 """Returns the default plugin used for testing."""
348 if len(CONF.data_processing_feature_enabled.plugins) == 0:
349 return None
350
351 for plugin in CONF.data_processing_feature_enabled.plugins:
Joe H. Rahmea72f2c62016-07-11 16:28:19 +0200352 if plugin in DEFAULT_TEMPLATES:
Luigi Toscanob61567a2015-03-11 18:35:33 +0100353 break
354 else:
355 plugin = ''
356 return plugin
357
358 @classmethod
359 def _get_default_version(cls):
360 """Returns the default plugin version used for testing.
Ken'ichi Ohmichi9e3dac02015-11-19 07:01:07 +0000361
Luigi Toscanob61567a2015-03-11 18:35:33 +0100362 This is gathered separately from the plugin to allow
363 the usage of plugin name in skip_checks. This method is
364 rather invoked into resource_setup, which allows API calls
365 and exceptions.
366 """
367 if not cls.default_plugin:
368 return None
Yaroslav Lobankov1662b0e2015-08-10 16:48:07 +0300369 plugin = cls.client.get_plugin(cls.default_plugin)['plugin']
Luigi Toscanob61567a2015-03-11 18:35:33 +0100370
371 for version in DEFAULT_TEMPLATES[cls.default_plugin].keys():
372 if version in plugin['versions']:
373 break
374 else:
375 version = None
376
377 return version
378
379 @classmethod
380 def get_node_group_template(cls, nodegroup='worker1'):
381 """Returns a node group template for the default plugin."""
382 try:
383 plugin_data = (
384 DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
385 )
386 nodegroup_data = plugin_data['NODES'][nodegroup]
387 node_group_template = {
388 'description': 'Test node group template',
389 'plugin_name': cls.default_plugin,
390 'hadoop_version': cls.default_version,
391 'node_processes': nodegroup_data['node_processes'],
392 'flavor_id': cls.flavor_ref,
393 'node_configs': nodegroup_data.get('node_configs', {}),
394 }
395 return node_group_template
396 except (IndexError, KeyError):
397 return None
398
399 @classmethod
400 def get_cluster_template(cls, node_group_template_ids=None):
401 """Returns a cluster template for the default plugin.
Ken'ichi Ohmichi9e3dac02015-11-19 07:01:07 +0000402
Luigi Toscanob61567a2015-03-11 18:35:33 +0100403 node_group_template_defined contains the type and ID of pre-defined
404 node group templates that have to be used in the cluster template
405 (instead of dynamically defining them with 'node_processes').
406 """
407 if node_group_template_ids is None:
408 node_group_template_ids = {}
409 try:
410 plugin_data = (
411 DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
412 )
413
414 all_node_groups = []
415 for ng_name, ng_data in six.iteritems(plugin_data['NODES']):
416 node_group = {
417 'name': '%s-node' % (ng_name),
418 'flavor_id': cls.flavor_ref,
419 'count': ng_data['count']
420 }
421 if ng_name in node_group_template_ids.keys():
422 # node group already defined, use it
423 node_group['node_group_template_id'] = (
424 node_group_template_ids[ng_name]
425 )
426 else:
427 # node_processes list defined on-the-fly
428 node_group['node_processes'] = ng_data['node_processes']
429 if 'node_configs' in ng_data:
430 node_group['node_configs'] = ng_data['node_configs']
431 all_node_groups.append(node_group)
432
433 cluster_template = {
434 'description': 'Test cluster template',
435 'plugin_name': cls.default_plugin,
436 'hadoop_version': cls.default_version,
437 'cluster_configs': plugin_data.get('cluster_configs', {}),
438 'node_groups': all_node_groups,
439 }
440 return cluster_template
441 except (IndexError, KeyError):
442 return None