blob: 422056cfb3bb833bb2a2804a21385b2120a58d95 [file] [log] [blame]
parameters:
jenkins:
client:
job:
ceph-remove-node:
type: workflow-scm
concurrent: true
display_name: "Ceph - remove node"
discard:
build:
keep_num: 50
scm:
type: git
url: "${_param:jenkins_gerrit_url}/mk/mk-pipelines"
branch: "${_param:jenkins_pipelines_branch}"
credentials: ${_param:jenkins_gerrit_credentials}
script: ceph-remove-node.groovy
param:
# general parameters
SALT_MASTER_URL:
type: string
description: URL of Salt master
default: "${_param:jenkins_salt_api_url}"
SALT_MASTER_CREDENTIALS:
type: string
description: Credentials for login to Salt API
default: salt
HOST:
type: string
description: OSD HOST that will be removed from Ceph cluster (rgw04*)
default: 'rgw04*'
HOST_TYPE:
type: string
description: Type of Ceph node to be removed. Valid values are mon/osd/rgw
default: 'rgw'
ADMIN_HOST:
type: string
description: Host with admin keyring and access to cluster management
default: 'cmn01*'
GENERATE_CRUSHMAP:
type: boolean
default: 'false'
description: Only if removing OSD host. Set to true if crush map file should be updated. Enforce has to happen manually unless it is specifically set to be enforced in pillar.
WAIT_FOR_HEALTHY:
type: boolean
default: 'true'
description: Wait for healthy during pipeline
CLEANDISK:
type: boolean
default: 'false'
description: Clean data/block partitions