Merge the tip of origin/release/proposed/2019.2.0 into origin/release/2019.2.0
d2b3284 Bump Contrail images to 2019.2.9 version
28a14c4 Added config file for alertad.
4d6a586 Add Ceph Nautilus mirror to offline image
e3d66d6 Fix env vars for sf-notifer and gainsight
70ed33f change team for tjaroszyk and remove mlos from services
749edf3 Multiply Gainsight OS API query results by 100
5bfd866 Update jenkins in the CICD cluster
5423542 Bump telegraf image tag
65933f5 Use new aptly containers
d642ef9 Pin cvp-sanity-checks:2019.2.9
4ee1f61 Add default policy for Gnocchi
c1dbd20 Add default policies for Ironic
d57c494 Bump docker images versions
e68cf0e Set 022 umask for image files in salt control
7267bd0 Add missing controllers to designate haproxy
d23d606 Add parameters for OS upgrade to Stacklight upgrade job
f077244 Enable Basic auth for Sphinx
14561b0 Adjust Gainsight configuration
07fda05 Bump prometheus-relay image
f4a5e41 Disable outdated validate_openstack pipeline
27b0d8e Pin distro-info-data package
7e86051 Changed body request max size accepted by proxy for rados-gw service. Was 10240MB, now 20MB.
Change-Id: I96c38f83cb2d22f779ea1689dcf18f1696b4b35a
diff --git a/debmirror/mirror_mirantis_com/init.yml b/debmirror/mirror_mirantis_com/init.yml
index 753a3a4..89c1e7f 100644
--- a/debmirror/mirror_mirantis_com/init.yml
+++ b/debmirror/mirror_mirantis_com/init.yml
@@ -3,6 +3,7 @@
- system.debmirror.mirror_mirantis_com.update.cassandra.xenial
- system.debmirror.mirror_mirantis_com.ceph-luminous.xenial
- system.debmirror.mirror_mirantis_com.update.ceph-luminous.xenial
+- system.debmirror.mirror_mirantis_com.update.ceph-nautilus.xenial
- system.debmirror.mirror_mirantis_com.docker.xenial
- system.debmirror.mirror_mirantis_com.update.docker.xenial
- system.debmirror.mirror_mirantis_com.elasticsearch-5.x.xenial
diff --git a/debmirror/mirror_mirantis_com/update/ceph-nautilus/xenial.yml b/debmirror/mirror_mirantis_com/update/ceph-nautilus/xenial.yml
new file mode 100644
index 0000000..20d73b7
--- /dev/null
+++ b/debmirror/mirror_mirantis_com/update/ceph-nautilus/xenial.yml
@@ -0,0 +1,22 @@
+classes:
+- system.defaults.debmirror
+parameters:
+ debmirror:
+ client:
+ enabled: true
+ mirrors:
+ mirror_mirantis_com_update_ceph_nautilus_xenial:
+ force: ${_param:mirror_mirantis_com_ceph_nautilus_xenial_force}
+ lock_target: True
+ extra_flags: [ '--verbose', '--progress', '--nosource', '--no-check-gpg', '--rsync-extra=none' ]
+ method: "${_param:debmirror_mirrors_sync_method}"
+ arch: [ 'amd64' ]
+ mirror_host: "${_param:debmirror_mirrors_host}"
+ mirror_root: "${_param:debmirror_mirrors_update_root}/ceph-nautilus/xenial/"
+ target_dir: "${_param:debmirror_mirrors_update_target_dir}/ceph-nautilus/xenial/"
+ cache_dir: "${_param:debmirror_mirrors_common_cache_dir}/ceph-nautilus/"
+ log_file: "/var/log/debmirror/mirror_mirantis_com_update_ceph_nautilus_xenial.log"
+ dist: [ xenial ]
+ section: [ main ]
+ filter:
+ 001: "--exclude='(-dbg_|-dbg-)'"
diff --git a/defaults/debmirror.yml b/defaults/debmirror.yml
index 8c636ac..cef73b5 100644
--- a/defaults/debmirror.yml
+++ b/defaults/debmirror.yml
@@ -13,6 +13,7 @@
# Per repo modificators
mirror_mirantis_com_cassandra_xenial_force: False
mirror_mirantis_com_ceph_luminous_xenial_force: False
+ mirror_mirantis_com_ceph_nautilus_xenial_force: False
mirror_mirantis_com_docker_xenial_force: False
mirror_mirantis_com_elasticsearch_5_x_xenial_force: False
mirror_mirantis_com_elasticsearch_6_x_xenial_force: False
diff --git a/defaults/docker_images.yml b/defaults/docker_images.yml
index a8aa495..9fd6c56 100644
--- a/defaults/docker_images.yml
+++ b/defaults/docker_images.yml
@@ -11,21 +11,21 @@
docker_image_mongodb: "${_param:mcp_docker_registry}/mirantis/external/mongo:2019.2.6"
###
# phpldapadmin:0.6.12
- docker_image_phpldapadmin: "${_param:mcp_docker_registry}/mirantis/cicd/phpldapadmin:2019.2.5"
+ docker_image_phpldapadmin: "${_param:mcp_docker_registry}/mirantis/cicd/phpldapadmin:2019.2.9"
# gerrit:2.15.17
docker_image_gerrit: "${_param:mcp_docker_registry}/mirantis/cicd/gerrit:2019.2.7"
# mysql:5.6
docker_image_mysql: "${_param:mcp_docker_registry}/mirantis/cicd/mysql:2019.2.6"
- # jenkins:2.150.3
- docker_image_jenkins: "${_param:mcp_docker_registry}/mirantis/cicd/jenkins:2019.2.8"
- docker_image_jenkins_jnlp_slave: "${_param:mcp_docker_registry}/mirantis/cicd/jnlp-slave:2019.2.8"
+ # jenkins:2.204.3
+ docker_image_jenkins: "${_param:mcp_docker_registry}/mirantis/cicd/jenkins:2019.2.9"
+ docker_image_jenkins_jnlp_slave: "${_param:mcp_docker_registry}/mirantis/cicd/jnlp-slave:2019.2.9"
# TODO: fix tag
docker_image_jenkins_ssh_slave: "${_param:mcp_docker_registry}/mirantis/cicd/ssh-slave:2019.2.5"
# model-generator
docker_image_operations_api: "${_param:mcp_docker_registry}/mirantis/model-generator/operations-api:2019.2.6"
docker_image_operations_ui: "${_param:mcp_docker_registry}/mirantis/model-generator/operations-ui:2019.2.6"
# OpenContrail
- opencontrail_docker_image_tag: "2019.2.8"
+ opencontrail_docker_image_tag: "2019.2.9"
# stacklight
# 6.5.0 version, from 11/29/2018, differ from latest upstream 6.5.0 - update next cycle
docker_image_alerta: "${_param:mcp_docker_registry}/mirantis/external/alerta-web:2019.2.6"
@@ -33,11 +33,11 @@
docker_image_grafana: "${_param:mcp_docker_registry}/openstack-docker/grafana:2019.2.6"
docker_image_prometheus_es_exporter: "${_param:mcp_docker_registry}/openstack-docker/prometheus-es-exporter:2019.2.6"
docker_image_prometheus: "${_param:mcp_docker_registry}/openstack-docker/prometheus:2019.2.6"
- docker_image_prometheus_gainsight: "${_param:mcp_docker_registry}/openstack-docker/gainsight:2019.2.4"
+ docker_image_prometheus_gainsight: "${_param:mcp_docker_registry}/openstack-docker/sf-reporter:2019.2.9"
docker_image_prometheus_gainsight_elasticsearch: "${_param:mcp_docker_registry}/openstack-docker/gainsight_elasticsearch:2019.2.6"
- docker_image_prometheus_relay: "${_param:mcp_docker_registry}/openstack-docker/prometheus-relay:2019.2.5"
+ docker_image_prometheus_relay: "${_param:mcp_docker_registry}/openstack-docker/prometheus-relay:2019.2.9"
docker_image_pushgateway: "${_param:mcp_docker_registry}/openstack-docker/pushgateway:2019.2.6"
- docker_image_remote_agent: "${_param:mcp_docker_registry}/openstack-docker/telegraf:2019.2.8"
+ docker_image_remote_agent: "${_param:mcp_docker_registry}/openstack-docker/telegraf:2019.2.9"
docker_image_remote_collector: "${_param:mcp_docker_registry}/openstack-docker/heka:2019.2.6"
docker_image_remote_storage_adapter: "${_param:mcp_docker_registry}/openstack-docker/remote_storage_adapter:2019.2.6"
docker_image_sf_notifier: "${_param:mcp_docker_registry}/openstack-docker/sf_notifier:2019.2.4"
@@ -47,16 +47,16 @@
docker_image_keycloak_server: "${_param:mcp_docker_registry}/mirantis/external/jboss/keycloak:4.5.0.Final"
docker_image_keycloak_proxy: "${_param:mcp_docker_registry}/mirantis/external/jboss/keycloak:3.4.2.Final"
# CVP
- docker_image_cvp_sanity_checks: "${_param:mcp_docker_registry}/mirantis/cvp/cvp-sanity-checks:2019.2.8"
+ docker_image_cvp_sanity_checks: "${_param:mcp_docker_registry}/mirantis/cvp/cvp-sanity-checks:2019.2.9"
docker_image_cvp_tempest: "${_param:mcp_docker_registry}/mirantis/cicd/ci-tempest:${_param:openstack_version}"
docker_image_cvp_shaker_checks: "${_param:mcp_docker_registry}/mirantis/cvp/cvp-shaker:2019.2.3"
docker_image_cvp_rally: "${_param:mcp_docker_registry}/mirantis/cvp/cvp-rally:2019.2.5"
docker_image_cvp_xrally: "${_param:mcp_docker_registry}/mirantis/external/xrally/xrally-openstack:0.11.2"
# aptly
docker_image_aptly:
- base: "${_param:mcp_docker_registry}/mirantis/cicd/aptly:2019.2.6"
- public: "${_param:mcp_docker_registry}/mirantis/cicd/aptly-public:2019.2.6"
- publisher: "${_param:mcp_docker_registry}/mirantis/cicd/aptly-publisher:2019.2.6"
+ base: "${_param:mcp_docker_registry}/mirantis/cicd/aptly:2019.2.9"
+ public: "${_param:mcp_docker_registry}/mirantis/cicd/aptly-public:2019.2.9"
+ publisher: "${_param:mcp_docker_registry}/mirantis/cicd/aptly-publisher:2019.2.9"
# List of images, to be placed into offline image, during separate image build process
# WARNING: registry|target_registry and names - must be exactly same as list above!
@@ -82,27 +82,27 @@
name: postgres:9.6.10
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
- name: phpldapadmin:2019.2.5
+ name: phpldapadmin:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
- name: aptly:2019.2.6
+ name: aptly:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
- name: aptly-public:2019.2.6
+ name: aptly-public:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
- name: aptly-publisher:2019.2.6
+ name: aptly-publisher:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
- name: jnlp-slave:2019.2.8
+ name: jnlp-slave:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
name: ssh-slave:2019.2.5
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
- name: jenkins:2019.2.8
+ name: jenkins:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
name: gerrit:2019.2.7
@@ -130,16 +130,16 @@
name: prometheus:2019.2.6
- registry: ${_param:mcp_docker_registry}/openstack-docker
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/openstack-docker
- name: gainsight:2019.2.4
+ name: sf-reporter:2019.2.9
- registry: ${_param:mcp_docker_registry}/openstack-docker
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/openstack-docker
- name: telegraf:2019.2.8
+ name: telegraf:2019.2.9
- registry: ${_param:mcp_docker_registry}/openstack-docker
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/openstack-docker
name: remote_storage_adapter:2019.2.6
- registry: ${_param:mcp_docker_registry}/openstack-docker
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/openstack-docker
- name: prometheus-relay:2019.2.5
+ name: prometheus-relay:2019.2.9
- registry: ${_param:mcp_docker_registry}/openstack-docker
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/openstack-docker
name: grafana:2019.2.6
@@ -182,7 +182,7 @@
name: cvp-shaker:2019.2.3
- registry: ${_param:mcp_docker_registry}/mirantis/cvp
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cvp
- name: cvp-sanity-checks:2019.2.8
+ name: cvp-sanity-checks:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/external/xrally
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/external/xrally
name: xrally-openstack:0.11.2
diff --git a/defaults/openstack/policy/all.yml b/defaults/openstack/policy/all.yml
index 39d7c40..3e0975a 100644
--- a/defaults/openstack/policy/all.yml
+++ b/defaults/openstack/policy/all.yml
@@ -440,6 +440,46 @@
"tasks_api_access": "role:admin"
"upload_image": ""
glance_default_policy_queens: ${_param:glance_default_policy_pike}
+ gnocchi_default_policy_ocata: {}
+ gnocchi_default_policy_pike: &gnocchi_default_policy_pike
+ "admin_or_creator": "role:admin or user:%(creator)s or project_id:%(created_by_project_id)s"
+ "create archive policy rule": "role:admin"
+ "create archive policy": "role:admin"
+ "create metric": ""
+ "create resource type": "role:admin"
+ "create resource": ""
+ "delete archive policy rule": "role:admin"
+ "delete archive policy": "role:admin"
+ "delete metric": "rule:admin_or_creator"
+ "delete resource type": "role:admin"
+ "delete resource": "rule:admin_or_creator"
+ "delete resources": "rule:admin_or_creator"
+ "get archive policy rule": ""
+ "get archive policy": ""
+ "get measures": "rule:admin_or_creator or rule:metric_owner"
+ "get metric": "rule:admin_or_creator or rule:metric_owner"
+ "get resource type": ""
+ "get resource": "rule:admin_or_creator or rule:resource_owner"
+ "get status": "role:admin"
+ "list all metric": "role:admin"
+ "list archive policy rule": ""
+ "list archive policy": ""
+ "list metric": ""
+ "list resource type": ""
+ "list resource": "rule:admin_or_creator or rule:resource_owner"
+ "metric_owner": "project_id:%(resource.project_id)s"
+ "post measures": "rule:admin_or_creator"
+ "resource_owner": "project_id:%(project_id)s"
+ "search metric": "rule:admin_or_creator or rule:metric_owner"
+ "search resource": "rule:admin_or_creator or rule:resource_owner"
+ "update archive policy": "role:admin"
+ "update resource type": "role:admin"
+ "update resource": "rule:admin_or_creator"
+ gnocchi_default_policy_queens:
+ << : *gnocchi_default_policy_pike
+ "list all metric":
+ "list metric": "rule:admin_or_creator or rule:metric_owner"
+ "update archive policy rule": "role:admin"
heat_default_policy_ocata: {}
heat_default_policy_pike:
"actions:action": "rule:deny_stack_user"
@@ -537,6 +577,62 @@
"stacks:update_patch": "rule:deny_stack_user"
"stacks:validate_template": "rule:deny_stack_user"
heat_default_policy_queens: ${_param:heat_default_policy_pike}
+ ironic_default_policy_ocata: {}
+ ironic_default_policy_pike: &ironic_default_policy_pike
+ "admin_api": "role:admin or role:administrator"
+ "baremetal:chassis:create": "rule:is_admin"
+ "baremetal:chassis:delete": "rule:is_admin"
+ "baremetal:chassis:get": "rule:is_admin or rule:is_observer"
+ "baremetal:chassis:update": "rule:is_admin"
+ "baremetal:driver:get": "rule:is_admin or rule:is_observer"
+ "baremetal:driver:get_properties": "rule:is_admin or rule:is_observer"
+ "baremetal:driver:get_raid_logical_disk_properties": "rule:is_admin or rule:is_observer"
+ "baremetal:driver:ipa_lookup": "rule:public_api"
+ "baremetal:driver:vendor_passthru": "rule:is_admin"
+ "baremetal:node:clear_maintenance": "rule:is_admin"
+ "baremetal:node:create": "rule:is_admin"
+ "baremetal:node:delete": "rule:is_admin"
+ "baremetal:node:get": "rule:is_admin or rule:is_observer"
+ "baremetal:node:get_boot_device": "rule:is_admin or rule:is_observer"
+ "baremetal:node:get_console": "rule:is_admin"
+ "baremetal:node:get_states": "rule:is_admin or rule:is_observer"
+ "baremetal:node:inject_nmi": "rule:is_admin"
+ "baremetal:node:ipa_heartbeat": "rule:public_api"
+ "baremetal:node:set_boot_device": "rule:is_admin"
+ "baremetal:node:set_console_state": "rule:is_admin"
+ "baremetal:node:set_maintenance": "rule:is_admin"
+ "baremetal:node:set_power_state": "rule:is_admin"
+ "baremetal:node:set_provision_state": "rule:is_admin"
+ "baremetal:node:set_raid_state": "rule:is_admin"
+ "baremetal:node:update": "rule:is_admin"
+ "baremetal:node:validate": "rule:is_admin"
+ "baremetal:node:vendor_passthru": "rule:is_admin"
+ "baremetal:node:vif:attach": "rule:is_admin"
+ "baremetal:node:vif:detach": "rule:is_admin"
+ "baremetal:node:vif:list": "rule:is_admin"
+ "baremetal:port:create": "rule:is_admin"
+ "baremetal:port:delete": "rule:is_admin"
+ "baremetal:port:get": "rule:is_admin or rule:is_observer"
+ "baremetal:port:update": "rule:is_admin"
+ "baremetal:portgroup:create": "rule:is_admin"
+ "baremetal:portgroup:delete": "rule:is_admin"
+ "baremetal:portgroup:get": "rule:is_admin or rule:is_observer"
+ "baremetal:portgroup:update": "rule:is_admin"
+ "baremetal:volume:create": "rule:is_admin"
+ "baremetal:volume:delete": "rule:is_admin"
+ "baremetal:volume:get": "rule:is_admin or rule:is_observer"
+ "baremetal:volume:update": "rule:is_admin"
+ "is_admin": "rule:admin_api or (rule:is_member and role:baremetal_admin)"
+ "is_member": "(project_domain_id:default or project_domain_id:None) and (project_name:demo or project_name:baremetal)"
+ "is_observer": "rule:is_member and (role:observer or role:baremetal_observer)"
+ "public_api": "is_public_api:True"
+ "show_instance_secrets": "!"
+ "show_password": "!"
+ ironic_default_policy_queens:
+ << : *ironic_default_policy_pike
+ "baremetal:node:traits:delete": "rule:is_admin"
+ "baremetal:node:traits:list": "rule:is_admin or rule:is_observer"
+ "baremetal:node:traits:set": "rule:is_admin"
keystone_default_policy_ocata: {}
keystone_default_policy_pike: &keystone_default_policy_pike
"admin_or_owner": "rule:admin_required or rule:owner"
diff --git a/defaults/openstack/policy/gnocchi.yml b/defaults/openstack/policy/gnocchi.yml
new file mode 100644
index 0000000..a56e91b
--- /dev/null
+++ b/defaults/openstack/policy/gnocchi.yml
@@ -0,0 +1,6 @@
+classes:
+- system.defaults.openstack.policy.all
+parameters:
+ gnocchi:
+ server:
+ policy: ${_param:gnocchi_default_policy_${_param:openstack_version}}
diff --git a/defaults/openstack/policy/ironic.yml b/defaults/openstack/policy/ironic.yml
new file mode 100644
index 0000000..f6addcb
--- /dev/null
+++ b/defaults/openstack/policy/ironic.yml
@@ -0,0 +1,6 @@
+classes:
+- system.defaults.openstack.policy.all
+parameters:
+ ironic:
+ api:
+ policy: ${_param:ironic_default_policy_${_param:openstack_version}}
diff --git a/defaults/stacklight.yml b/defaults/stacklight.yml
index 625d20c..9ed6a8f 100644
--- a/defaults/stacklight.yml
+++ b/defaults/stacklight.yml
@@ -8,3 +8,10 @@
# ELK stack versions
elasticsearch_version: 5
kibana_version: 5
+ # Salesforce
+ sf_notifier_sfdc_auth_url: "default"
+ sf_notifier_sfdc_username: "default"
+ sf_notifier_sfdc_password: "default"
+ sf_notifier_sfdc_organization_id: "default"
+ sf_notifier_sfdc_environment_id: "default"
+ sf_notifier_sfdc_sandbox_enabled: "True"
diff --git a/docker/swarm/stack/monitoring/alerta.yml b/docker/swarm/stack/monitoring/alerta.yml
index 6112b60..acd4d70 100644
--- a/docker/swarm/stack/monitoring/alerta.yml
+++ b/docker/swarm/stack/monitoring/alerta.yml
@@ -24,6 +24,7 @@
- 15017:8080
volumes:
- ${prometheus:alerta:config_dir}/alerta.conf:/web/config.js
+ - ${prometheus:alerta:config_dir}/alertad.conf:/app/alertad.conf
environment:
ADMIN_USERS: ${_param:alerta_admin_username}
ADMIN_PASSWORD: ${_param:alerta_admin_password}
diff --git a/docker/swarm/stack/monitoring/gainsight.yml b/docker/swarm/stack/monitoring/gainsight.yml
index 554c8ec..472b047 100644
--- a/docker/swarm/stack/monitoring/gainsight.yml
+++ b/docker/swarm/stack/monitoring/gainsight.yml
@@ -2,19 +2,17 @@
- system.prometheus.gainsight.container
parameters:
_param:
- gainsight_enabled: 'true'
- gainsight_csv_upload_url: 'http://localhost:9999'
- gainsight_account_id: 'default'
- gainsight_environment_id: 'default'
- gainsight_app_org_id: 'default'
- gainsight_access_key: 'default'
- gainsight_job_id: 'default'
- gainsight_login: 'default'
+ gainsight_cluster_id: "${_param:cluster_domain}"
gainsight_prometheus_url: "http://${_param:stacklight_monitor_address}:15010"
- gainsight_config_directory: '/srv/gainsight'
- gainsight_crontab_directory: '/etc/cron.d'
+ gainsight_config_directory: "/srv/gainsight"
+ gainsight_crontab_directory: "/etc/cron.d"
gainsight_config_path: "${_param:gainsight_config_directory}/config.ini"
- gainsight_csv_retention: 180
+ gainsight_sfdc_auth_url: "${_param:sf_notifier_sfdc_auth_url}"
+ gainsight_sfdc_username: "${_param:sf_notifier_sfdc_username}"
+ gainsight_sfdc_password: "${_param:sf_notifier_sfdc_password}"
+ gainsight_sfdc_organization_id: "${_param:sf_notifier_sfdc_organization_id}"
+ gainsight_sfdc_environment_id: "${_param:sf_notifier_sfdc_environment_id}"
+ gainsight_sfdc_sandbox_enabled: "${_param:sf_notifier_sfdc_sandbox_enabled}"
docker:
client:
stack:
@@ -40,16 +38,13 @@
volumes:
- ${prometheus:gainsight:dir:config}:${_param:gainsight_config_directory}
- ${prometheus:gainsight:dir:crontab}:${_param:gainsight_crontab_directory}
- - ${prometheus:gainsight:dir:csv}:/opt/gainsight/csv
environment:
- CSV_UPLOAD_URL: "${_param:gainsight_csv_upload_url}"
- ACCOUNT_ID: "${_param:gainsight_account_id}"
- ENVIRONMENT_ID: "${_param:gainsight_environment_id}"
- APP_ORG_ID: "${_param:gainsight_app_org_id}"
- ACCESS_KEY: "${_param:gainsight_access_key}"
- JOB_ID: "${_param:gainsight_job_id}"
- LOGIN: "${_param:gainsight_login}"
- PROMETHEUS_URL: "${_param:gainsight_prometheus_url}"
- CONFIG_PATH: "${_param:gainsight_config_path}"
- ENABLED: "${_param:gainsight_enabled}"
- RETENTION: ${_param:gainsight_csv_retention}
+ - CLUSTER_ID='${_param:gainsight_cluster_id}'
+ - PROMETHEUS_URL='${_param:gainsight_prometheus_url}'
+ - CONFIG_PATH='${_param:gainsight_config_path}'
+ - SFDC_AUTH_URL='${_param:gainsight_sfdc_auth_url}'
+ - SFDC_USERNAME='${_param:gainsight_sfdc_username}'
+ - SFDC_PASSWORD='${_param:gainsight_sfdc_password}'
+ - SFDC_ORGANIZATION_ID='${_param:gainsight_sfdc_organization_id}'
+ - SFDC_ENVIRONMENT_ID='${_param:gainsight_sfdc_environment_id}'
+ - SFDC_SANDBOX_ENABLED='${_param:gainsight_sfdc_sandbox_enabled}'
diff --git a/docker/swarm/stack/monitoring/sf_notifier.yml b/docker/swarm/stack/monitoring/sf_notifier.yml
index b8b2dd6..a77b457 100644
--- a/docker/swarm/stack/monitoring/sf_notifier.yml
+++ b/docker/swarm/stack/monitoring/sf_notifier.yml
@@ -30,13 +30,13 @@
ports:
- 15018:5000
environment:
- SF_NOTIFIER_WORKERS: ${_param:sf_notifier_workers}
- SF_NOTIFIER_BUFFER_SIZE: ${_param:sf_notifier_buffer_size}
- SF_NOTIFIER_APP_PORT: ${prometheus:sf_notifier:uwsgi:bind_port}
- SF_NOTIFIER_ALERT_ID_HASH_FUNC: ${_param:sf_notifier_alert_id_hash_func}
- SFDC_AUTH_URL: "${_param:sf_notifier_sfdc_auth_url}"
- SFDC_USERNAME: "${_param:sf_notifier_sfdc_username}"
- SFDC_PASSWORD: "${_param:sf_notifier_sfdc_password}"
- SFDC_ORGANIZATION_ID: "${_param:sf_notifier_sfdc_organization_id}"
- SFDC_ENVIRONMENT_ID: "${_param:sf_notifier_sfdc_environment_id}"
- SFDC_SANDBOX_ENABLED: "${_param:sf_notifier_sfdc_sandbox_enabled}"
+ - SF_NOTIFIER_WORKERS=${_param:sf_notifier_workers}
+ - SF_NOTIFIER_BUFFER_SIZE=${_param:sf_notifier_buffer_size}
+ - SF_NOTIFIER_APP_PORT=${prometheus:sf_notifier:uwsgi:bind_port}
+ - SF_NOTIFIER_ALERT_ID_HASH_FUNC='${_param:sf_notifier_alert_id_hash_func}'
+ - SFDC_AUTH_URL='${_param:sf_notifier_sfdc_auth_url}'
+ - SFDC_USERNAME='${_param:sf_notifier_sfdc_username}'
+ - SFDC_PASSWORD='${_param:sf_notifier_sfdc_password}'
+ - SFDC_ORGANIZATION_ID='${_param:sf_notifier_sfdc_organization_id}'
+ - SFDC_ENVIRONMENT_ID='${_param:sf_notifier_sfdc_environment_id}'
+ - SFDC_SANDBOX_ENABLED='${_param:sf_notifier_sfdc_sandbox_enabled}'
diff --git a/haproxy/proxy/listen/openstack/designate.yml b/haproxy/proxy/listen/openstack/designate.yml
index 1310be4..0f19a1f 100644
--- a/haproxy/proxy/listen/openstack/designate.yml
+++ b/haproxy/proxy/listen/openstack/designate.yml
@@ -20,3 +20,7 @@
host: ${_param:cluster_node02_address}
port: ${_param:haproxy_designate_port}
params: ${_param:haproxy_designate_check_params}
+ - name: ${_param:cluster_node03_hostname}
+ host: ${_param:cluster_node03_address}
+ port: ${_param:haproxy_designate_port}
+ params: ${_param:haproxy_designate_check_params}
diff --git a/haproxy/proxy/listen/openstack/designate_large.yml b/haproxy/proxy/listen/openstack/designate_large.yml
new file mode 100644
index 0000000..01d92aa
--- /dev/null
+++ b/haproxy/proxy/listen/openstack/designate_large.yml
@@ -0,0 +1,34 @@
+parameters:
+ _param:
+ haproxy_designate_check_params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
+ haproxy_designate_port: 9001
+ haproxy:
+ proxy:
+ listen:
+ designate_api:
+ type: openstack-service
+ service_name: designate
+ binds:
+ - address: ${_param:cluster_vip_address}
+ port: ${_param:haproxy_designate_port}
+ servers:
+ - name: ${_param:cluster_node01_hostname}
+ host: ${_param:cluster_node01_address}
+ port: ${_param:haproxy_designate_port}
+ params: ${_param:haproxy_designate_check_params}
+ - name: ${_param:cluster_node02_hostname}
+ host: ${_param:cluster_node02_address}
+ port: ${_param:haproxy_designate_port}
+ params: ${_param:haproxy_designate_check_params}
+ - name: ${_param:cluster_node03_hostname}
+ host: ${_param:cluster_node03_address}
+ port: ${_param:haproxy_designate_port}
+ params: ${_param:haproxy_designate_check_params}
+ - name: ${_param:cluster_node04_hostname}
+ host: ${_param:cluster_node04_address}
+ port: ${_param:haproxy_designate_port}
+ params: ${_param:haproxy_designate_check_params}
+ - name: ${_param:cluster_node05_hostname}
+ host: ${_param:cluster_node05_address}
+ port: ${_param:haproxy_designate_port}
+ params: ${_param:haproxy_designate_check_params}
diff --git a/jenkins/client/job/deploy/update/upgrade_stacklight.yml b/jenkins/client/job/deploy/update/upgrade_stacklight.yml
index 57747e4..f043de8 100644
--- a/jenkins/client/job/deploy/update/upgrade_stacklight.yml
+++ b/jenkins/client/job/deploy/update/upgrade_stacklight.yml
@@ -39,3 +39,11 @@
type: boolean
default: 'true'
description: "Set to True if upgrade for components running in Docker Swarm is desired"
+ OS_UPGRADE:
+ type: boolean
+ default: 'false'
+ description: 'Run apt-get upgrade on Stacklight nodes'
+ OS_DIST_UPGRADE:
+ type: boolean
+ default: 'false'
+ description: 'Run apt-get dist-upgrade on Stacklight nodes and reboot to apply changes'
diff --git a/jenkins/client/job/validate.yml b/jenkins/client/job/validate.yml
index 92d369c..aeb9553 100644
--- a/jenkins/client/job/validate.yml
+++ b/jenkins/client/job/validate.yml
@@ -21,6 +21,8 @@
artifact:
keep_num: 50
concurrent: false
+ disabled: true
+ description: "Pipeline is outdated and no longer supported. Please do not use in production"
scm:
type: git
url: "${_param:jenkins_gerrit_url}/mk/mk-pipelines"
diff --git a/maas/region/single.yml b/maas/region/single.yml
index 601b883..580ca18 100644
--- a/maas/region/single.yml
+++ b/maas/region/single.yml
@@ -5,6 +5,13 @@
_param:
maas_admin_username: mirantis
maas_region_main_archive: ${_param:linux_system_repo_update_url}/ubuntu/
+ # Pin distro-info-data package. See PROD-34940 for details
+ linux:
+ system:
+ package:
+ distro-info-data:
+ version: 0.28ubuntu0.9
+ hold: true
maas:
cluster:
enabled: true
diff --git a/nginx/server/proxy/ceph_radosgw.yml b/nginx/server/proxy/ceph_radosgw.yml
index 8207bda..37fc390 100644
--- a/nginx/server/proxy/ceph_radosgw.yml
+++ b/nginx/server/proxy/ceph_radosgw.yml
@@ -20,8 +20,6 @@
enabled: true
value: "$remote_addr"
proxy:
- size: 10240m
- buffer_size: 10240m
host: ${_param:nginx_proxy_radosgw_service_host}
port: ${_param:nginx_proxy_radosgw_service_port}
protocol: http
diff --git a/openssh/server/team/services.yml b/openssh/server/team/services.yml
index da993c4..1cd16bc 100644
--- a/openssh/server/team/services.yml
+++ b/openssh/server/team/services.yml
@@ -11,13 +11,11 @@
- system.openssh.server.team.members.korlowska
- system.openssh.server.team.members.lmercl
- system.openssh.server.team.members.mchernik
-- system.openssh.server.team.members.mlos
- system.openssh.server.team.members.osavatieiev
- system.openssh.server.team.members.pbasov
- system.openssh.server.team.members.pruzicka
- system.openssh.server.team.members.qwu
- system.openssh.server.team.members.sburns
-- system.openssh.server.team.members.tjaroszyk
- system.openssh.server.team.members.vkuspits
- system.openssh.server.team.members.yisakov
- system.openssh.server.team.members.wnawrot
diff --git a/openssh/server/team/storage.yml b/openssh/server/team/storage.yml
index 5531f97..931692b 100644
--- a/openssh/server/team/storage.yml
+++ b/openssh/server/team/storage.yml
@@ -3,3 +3,4 @@
- system.openssh.server.team.members.deprecated.mvollman
- system.openssh.server.team.members.mlos
- system.openssh.server.team.members.mjedynski
+- system.openssh.server.team.members.tjaroszyk
diff --git a/prometheus/alerta/init.yml b/prometheus/alerta/init.yml
index a82e8d4..6e16b63 100644
--- a/prometheus/alerta/init.yml
+++ b/prometheus/alerta/init.yml
@@ -1,2 +1,7 @@
classes:
- service.prometheus.alerta
+parameters:
+ prometheus:
+ alerta:
+ timeouts:
+ alert: 28800
diff --git a/prometheus/gainsight/container.yml b/prometheus/gainsight/container.yml
index f98e052..fda03e8 100644
--- a/prometheus/gainsight/container.yml
+++ b/prometheus/gainsight/container.yml
@@ -1,3 +1,2 @@
classes:
- service.prometheus.gainsight.container
-
diff --git a/prometheus/gainsight/elasticsearch_container.yml b/prometheus/gainsight/elasticsearch_container.yml
index 8a10fbf..a32320f 100644
--- a/prometheus/gainsight/elasticsearch_container.yml
+++ b/prometheus/gainsight/elasticsearch_container.yml
@@ -1,3 +1,2 @@
classes:
- service.prometheus.gainsight.elasticsearch_container
-
diff --git a/prometheus/gainsight/query/openstack.yml b/prometheus/gainsight/query/openstack.yml
index 679a6fc..1eac4c3 100644
--- a/prometheus/gainsight/query/openstack.yml
+++ b/prometheus/gainsight/query/openstack.yml
@@ -11,11 +11,11 @@
instances: "'Instances','avg(sum(avg_over_time(openstack_nova_instances{state=\"active\"}[24h])) by (instance))'"
compute_nodes: "'Compute Nodes','avg(sum(openstack_nova_services{binary=~\"nova.compute\"}) by (instance))'"
tenants: "'Tenants','avg(sum(avg_over_time(openstack_keystone_tenants_total[24h])) by (instance))'"
- cinder_api: "'Cinder API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"cinderv2\"}[24h])'"
- nova_api: "'Nova API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"nova\"}[24h])'"
- keystone_api: "'Keystone API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"keystone\"}[24h])'"
- glance_api: "'Glance API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"glance\"}[24h])'"
- neutron_api: "'Neutron API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"neutron\"}[24h])'"
+ cinder_api: "'Cinder API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"cinderv2\"}[24h]) * 100'"
+ nova_api: "'Nova API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"nova\"}[24h]) * 100'"
+ keystone_api: "'Keystone API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"keystone\"}[24h]) * 100'"
+ glance_api: "'Glance API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"glance\"}[24h]) * 100'"
+ neutron_api: "'Neutron API','avg_over_time(name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{name=\"neutron\"}[24h]) * 100'"
nova_vm_all: "'Total VM number','avg_over_time(total:openstack_nova_instance_all[1d])'"
nova_vm_failed: "'Failed VM number','avg_over_time(total:openstack_nova_instance_failed[1d])'"
kpi_downtime: "'KPI Downtime','1 - avg_over_time(total:openstack_nova_instance_failed[1d]) / avg_over_time(total:openstack_nova_instance_all[1d])'"
diff --git a/salt/control/virt.yml b/salt/control/virt.yml
index 03891c5..a408218 100644
--- a/salt/control/virt.yml
+++ b/salt/control/virt.yml
@@ -5,6 +5,7 @@
control:
enabled: True
virt_enabled: True
+ file_mask: 022
virt:
nic:
default:
diff --git a/sphinx/server/doc/reclass.yml b/sphinx/server/doc/reclass.yml
index 53fa5dd..651ec48 100644
--- a/sphinx/server/doc/reclass.yml
+++ b/sphinx/server/doc/reclass.yml
@@ -3,6 +3,9 @@
parameters:
_param:
nginx_static_reclass_doc_host: ${_param:cluster_public_host}
+ nginx_proxy_sphinx_user: sphinx
+ nginx_proxy_sphinx_password: ${_param:sphinx_proxy_password_generated}
+ nginx_proxy_sphinx_htpasswd_file: .htpasswd_sphinx
sphinx:
server:
doc:
@@ -13,12 +16,22 @@
engine: reclass
nginx:
server:
+ user:
+ sphinx:
+ enabled: true
+ name: ${_param:nginx_proxy_sphinx_user}
+ password: ${_param:nginx_proxy_sphinx_password}
+ htpasswd: ${_param:nginx_proxy_sphinx_htpasswd_file}
site:
nginx_static_reclass_doc:
enabled: true
type: nginx_static
name: reclass_doc
+ auth:
+ engine: basic
+ htpasswd: ${_param:nginx_proxy_sphinx_htpasswd_file}
host:
name: ${_param:nginx_static_reclass_doc_host}
port: 8090
protocol: http
+