Merge "Drop remaining options for panko"
diff --git a/.zuul.yaml b/.zuul.yaml
index daa30ec..5d12ba6 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -33,18 +33,9 @@
devstack_services:
tempest: true
devstack_localrc:
- USE_PYTHON3: True
TEMPEST_PLUGINS: '"/opt/stack/telemetry-tempest-plugin /opt/stack/heat-tempest-plugin"'
GNOCCHI_ARCHIVE_POLICY_TEMPEST: "ceilometer-high-rate"
- # NOTE(jwysogla): We can define both of the variables. In versions, where
- # the ceilometer devstack plugin doesn't support the CEILOMETER_BACKENDS,
- # it'll just ignore it and use the CEILOMETER_BACKEND. In versions, where
- # CEILOMETER_BACKENDS is supported, the ceilometer devstack plugin will
- # just try to merge the variables, so the final contents in this casse will
- # be "gnocchi,sg-core"
- CEILOMETER_BACKEND: "gnocchi"
CEILOMETER_BACKENDS: "gnocchi,sg-core"
- PROMETHEUS_SERVICE_SCRAPE_TARGETS: "sg-core"
CEILOMETER_PIPELINE_INTERVAL: 15
CEILOMETER_ALARM_THRESHOLD: 6000000000
GLOBAL_VENV: False
@@ -66,6 +57,45 @@
parent: telemetry-tempest-base
- job:
+ name: telemetry-dsvm-integration-2024-2
+ parent: telemetry-tempest-base
+ override-checkout: stable/2024.2
+
+- job:
+ name: telemetry-dsvm-integration-2024-1
+ parent: telemetry-tempest-base
+ override-checkout: stable/2024.1
+
+- job:
+ name: telemetry-dsvm-integration-2023-2
+ parent: telemetry-tempest-base
+ override-checkout: stable/2023.2
+ vars: &no_prometheus_vars
+ devstack_localrc:
+ TEMPEST_PLUGINS: '"/opt/stack/telemetry-tempest-plugin /opt/stack/heat-tempest-plugin"'
+ GNOCCHI_ARCHIVE_POLICY_TEMPEST: "ceilometer-high-rate"
+ CEILOMETER_BACKENDS: "gnocchi"
+ CEILOMETER_PIPELINE_INTERVAL: 15
+ CEILOMETER_ALARM_THRESHOLD: 6000000000
+ GLOBAL_VENV: False
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ service_available:
+ sg_core: False
+ telemetry_services:
+ metric_backends: gnocchi
+ telemetry:
+ disable_ssl_certificate_validation: True
+ ceilometer_polling_interval: 15
+
+- job:
+ name: telemetry-dsvm-integration-2023-1
+ parent: telemetry-tempest-base
+ override-checkout: stable/2023.1
+ vars: *no_prometheus_vars
+
+- job:
name: telemetry-dsvm-integration
parent: telemetry-tempest-base
branches:
@@ -79,6 +109,12 @@
parent: devstack-tempest-ipv6
description: |
Telemetry devstack tempest tests job for IPv6-only deployment
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^setup.cfg$
+ - ^doc/.*$
+ - ^.*\.rst$
+ - ^releasenotes/.*$
required-projects: *base_required_projects
vars: *base_vars
@@ -141,7 +177,10 @@
check:
jobs:
- telemetry-dsvm-integration
- - telemetry-dsvm-integration-wallaby
+ - telemetry-dsvm-integration-2023-1
+ - telemetry-dsvm-integration-2023-2
+ - telemetry-dsvm-integration-2024-1
+ - telemetry-dsvm-integration-2024-2
- telemetry-dsvm-integration-ipv6-only
- telemetry-dsvm-integration-centos-9s
- telemetry-dsvm-integration-centos-9s-fips
diff --git a/doc/source/conf.py b/doc/source/conf.py
old mode 100755
new mode 100644
diff --git a/telemetry_tempest_plugin/config.py b/telemetry_tempest_plugin/config.py
index b47f5f3..136356a 100644
--- a/telemetry_tempest_plugin/config.py
+++ b/telemetry_tempest_plugin/config.py
@@ -93,7 +93,15 @@
cfg.IntOpt('prometheus_scrape_interval',
default=15,
help="Scrape interval configured for prometheus. This can "
- "be used in test cases to properly configure autoscaling")
+ "be used in test cases to properly configure autoscaling"),
+ cfg.StrOpt('autoscaling_instance_grouping',
+ default='prefix',
+ choices=['prefix', 'metadata'],
+ help="How to group instances for autoscaling testing. "
+ "'prefix' relies on the instances having a common string "
+ "at the start of their name. 'metadata' is a new and "
+ "prefered way of grouping since 2024.2 relying on "
+ "metering.server_group instance metadata")
]
telemetry_services_opts = [
diff --git a/telemetry_tempest_plugin/scenario/telemetry_integration_prometheus_gabbits/autoscaling.yaml b/telemetry_tempest_plugin/scenario/telemetry_integration_prometheus_gabbits/autoscaling.yaml
index b66ae40..158cbde 100644
--- a/telemetry_tempest_plugin/scenario/telemetry_integration_prometheus_gabbits/autoscaling.yaml
+++ b/telemetry_tempest_plugin/scenario/telemetry_integration_prometheus_gabbits/autoscaling.yaml
@@ -57,22 +57,6 @@
$.servers[1].status: ACTIVE
$.servers.`len`: 2
- - name: check prometheus query for the servers count .
- desc: Check the Prometheus metric for the existence of servers
- url: $ENVIRON['PROMETHEUS_SERVICE_URL']/api/v1/query
- verbose: all
- method: POST
- request_headers:
- content-type: application/x-www-form-urlencoded
- data:
- query=ceilometer_cpu{resource_name=~"te-$ENVIRON['RESOURCE_PREFIX'].*"}
- poll:
- count: 300
- delay: 1
- status: 200
- response_json_paths:
- $.data.result.`len`: 2
-
- name: check alarm cpu_alarm_high ALARM
verbose: all
desc: Check the aodh alarm and its state
diff --git a/telemetry_tempest_plugin/scenario/telemetry_integration_prometheus_gabbits/create_stack.json b/telemetry_tempest_plugin/scenario/telemetry_integration_prometheus_gabbits/create_stack.json
index 036e5fb..32a8219 100644
--- a/telemetry_tempest_plugin/scenario/telemetry_integration_prometheus_gabbits/create_stack.json
+++ b/telemetry_tempest_plugin/scenario/telemetry_integration_prometheus_gabbits/create_stack.json
@@ -54,7 +54,7 @@
}
}
],
- "query": "(rate(ceilometer_cpu{resource_name=~'te-$ENVIRON['RESOURCE_PREFIX'].*'}[$ENVIRON['PROMETHEUS_RATE_DURATION']s])) * 100"
+ "query": $ENVIRON["QUERY"]
}
},
"web_server_scaledown_policy": {
@@ -82,7 +82,7 @@
}
}
],
- "query": "(rate(ceilometer_cpu{resource_name=~'te-$ENVIRON['RESOURCE_PREFIX'].*'}[$ENVIRON['PROMETHEUS_RATE_DURATION']s])) * 100"
+ "query": $ENVIRON["QUERY"]
}
}
}
diff --git a/telemetry_tempest_plugin/scenario/test_telemetry_integration_prometheus.py b/telemetry_tempest_plugin/scenario/test_telemetry_integration_prometheus.py
index 9c13b68..122a3f9 100644
--- a/telemetry_tempest_plugin/scenario/test_telemetry_integration_prometheus.py
+++ b/telemetry_tempest_plugin/scenario/test_telemetry_integration_prometheus.py
@@ -104,6 +104,28 @@
super(PrometheusGabbiTest, cls).resource_cleanup()
+ def _prep_query(self, prometheus_rate_duration, resource_prefix):
+ if config.CONF.telemetry.autoscaling_instance_grouping == "metadata":
+ query = ("\"(rate(ceilometer_cpu{{server_group=~'stack_id'}}"
+ "[{}s])) * 100\"").format(prometheus_rate_duration)
+ metadata_query = '''
+ {{
+ "str_replace": {{
+ "template": {},
+ "params": {{
+ "stack_id": {{ "get_param": "OS::stack_id" }}
+ }}
+ }}
+ }}
+ '''.format(query)
+ return metadata_query
+
+ else:
+ prefix_query = '''
+ "(rate(ceilometer_cpu{{resource_name=~'te-{}.*'}}[{}s])) * 100"
+ '''.format(resource_prefix, prometheus_rate_duration)
+ return prefix_query
+
def _prep_test(self, filename):
auth = self.os_primary.auth_provider.get_auth()
networks = self.os_primary.networks_client.list_networks(
@@ -115,6 +137,7 @@
prometheus_rate_duration = (
config.CONF.telemetry.ceilometer_polling_interval
+ config.CONF.telemetry.prometheus_scrape_interval)
+ query = self._prep_query(prometheus_rate_duration, resource_prefix)
os.environ.update({
"USER_TOKEN": auth[0],
"AODH_THRESHOLD": str(config.CONF.telemetry.alarm_threshold),
@@ -136,6 +159,7 @@
"RESOURCE_PREFIX": resource_prefix,
"PROMETHEUS_RATE_DURATION": str(prometheus_rate_duration),
"LOAD_LENGTH": str(prometheus_rate_duration * 2),
+ "QUERY": query,
})