| {%- from "opencontrail/map.jinja" import common,compute with context %} |
| # |
| # Vnswad configuration options |
| # |
| |
| [CONTROL-NODE] |
| # List of IPAdress:Port of control-node's separated by space.(Mandatory) |
| # servers=10.0.0.1:5269 10.0.0.2:5260 10.0.0.3:5260 |
| servers={% for member in compute.control.members %}{{ member.host }}:5269{% if not loop.last %} {% endif %}{% endfor %} |
| |
| [DEFAULT] |
| # Everything in this section is optional |
| |
| # IP address and port to be used to connect to collector. |
| # Multiple IP:port strings separated by space can be provided |
| collectors = {% for member in compute.collector.members %}{{ member.host }}:8086{% if not loop.last %} {% endif %}{% endfor %} |
| |
| # Agent mode : can be vrouter / tsn / tor (default is vrouter) |
| # agent_mode= |
| {%- if compute.get('tor', {}).get('enabled', False) %} |
| agent_mode = tsn |
| {%- endif %} |
| |
| # Enable/disable debug logging. Possible values are 0 (disable) and 1 (enable) |
| # debug=0 |
| |
| # Aging time for flow-records in seconds |
| # flow_cache_timeout=0 |
| {%- if compute.flow_cache_timeout is defined %} |
| flow_cache_timeout={{ compute.flow_cache_timeout }} |
| {%- endif %} |
| |
| {%- if compute.dpdk.enabled %} |
| |
| # DPDK or legacy work mode |
| platform=dpdk |
| |
| # Physical address of PCI used by dpdk |
| #physical_interface_address=0000:81:00.1 |
| physical_interface_address={{ compute.interface.pci }} |
| |
| # MAC address of device used by dpdk |
| physical_interface_mac={{ compute.interface.mac_address }} |
| |
| {%- endif %} |
| # hostname= # Retrieved from gethostname() or `hostname -s` equivalent |
| {%- if compute.hostname is defined %} |
| hostname={{ compute.hostname }} |
| {%- endif %} |
| |
| # Http server port for inspecting vnswad state (useful for debugging) |
| # http_server_port=8085 |
| |
| # Category for logging. Default value is '*' |
| # log_category= |
| |
| # Number of tx-buffers on pkt0 interface |
| # pkt0_tx_buffers=1000 |
| # |
| # Measure delays in different queues |
| # measure_queue_delay=0 |
| |
| # Local log file name |
| log_file=/var/log/contrail/contrail-vrouter-agent.log |
| |
| # Log severity levels. Possible values are SYS_EMERG, SYS_ALERT, SYS_CRIT, |
| # SYS_ERR, SYS_WARN, SYS_NOTICE, SYS_INFO and SYS_DEBUG. Default is SYS_DEBUG |
| log_level=SYS_NOTICE |
| |
| # Enable/Disable local file logging. Possible values are 0 (disable) and 1 (enable) |
| log_local=1 |
| |
| # Enable/Disable local flow message logging. Possible values are 0 (disable) and 1 (enable) |
| # log_flow=0 |
| |
| # Encapsulation type for tunnel. Possible values are MPLSoGRE, MPLSoUDP, VXLAN |
| # tunnel_type= |
| |
| # Enable/Disable headless mode for agent. In headless mode agent retains last |
| # known good configuration from control node when all control nodes are lost. |
| # Possible values are true(enable) and false(disable) |
| # headless_mode= |
| headless_mode=true |
| |
| # DHCP relay mode (true or false) to determine if a DHCP request in fabric |
| # interface with an unconfigured IP should be relayed or not |
| # dhcp_relay_mode= |
| |
| # Sandesh send rate limit can be used to throttle system logs transmitted per |
| # second. System logs are dropped if the sending rate is exceeded |
| # sandesh_send_rate_limit= |
| {%- if compute.get('sandesh_send_rate_limits', {}).get('agent') %} |
| sandesh_send_rate_limit={{ compute.sandesh_send_rate_limits.agent }} |
| {%- elif compute.get('sandesh_send_rate_limits', {}).get('global') %} |
| sandesh_send_rate_limit={{ compute.sandesh_send_rate_limits.global }} |
| {%- endif %} |
| |
| # Enable/Disable SSL based XMPP Authentication |
| {%- if compute.xmpp.tls.enabled %} |
| xmpp_auth_enable = {{ compute.xmpp.tls.auth.enabled }} |
| xmpp_server_cert = {{ compute.xmpp.tls.cert_file }} |
| xmpp_server_key = {{ compute.xmpp.tls.key_file }} |
| xmpp_ca_cert = {{ compute.xmpp.tls.ca_cert_file }} |
| {%- endif %} |
| # Gateway mode : can be server/ vcpe (default is none) |
| # gateway_mode= |
| {%- if compute.gateway_mode is defined %} |
| gateway_mode={{ compute.gateway_mode }} |
| {%- endif %} |
| |
| [SANDESH] |
| # sandesh_ssl_enable=false |
| # introspect_ssl_enable=false |
| # sandesh_keyfile=/etc/contrail/ssl/private/server-privkey.pem |
| # sandesh_certfile=/etc/contrail/ssl/certs/server.pem |
| # sandesh_ca_cert=/etc/contrail/ssl/certs/ca-cert.pem |
| |
| [RESTART] |
| # Enable/Disable backup of config and resource files |
| # backup_enable=true |
| # |
| # Directory containing backup of config and resource files |
| # backup_dir=/var/lib/contrail/backup |
| # |
| # Number of backup files |
| # backup_file_count=3 |
| # |
| # Agent avoids generating backup file if change is detected within time |
| # configured below (in milli-sec) |
| # backup_idle_timeout=10000 |
| # |
| # Restore config/resource definitions from file |
| # restore_enable=true |
| # |
| # Audit time for config/resource read from file |
| # restore_audit_timeout=15000 |
| |
| [DNS] |
| # Client port used by vrouter-agent while connecting to contrail-named |
| # dns_client_port= |
| |
| # List of IPAdress:Port of DNS Servers separated by space. |
| # servers=10.0.0.1:53 10.0.0.2:53 10.0.0.3:53 |
| servers={% for member in compute.control.members %}{{ member.host }}:53{% if not loop.last %} {% endif %}{% endfor %} |
| |
| # Timeout for DNS server queries in milli-seconds |
| # dns_timeout= |
| |
| # Maximum retries for DNS server queries |
| # dns_max_retries= |
| |
| [HYPERVISOR] |
| # Everything in this section is optional |
| |
| # Hypervisor type. Possible values are kvm, xen and vmware |
| type=kvm |
| |
| # Link-local IP address and prefix in ip/prefix_len format (for xen) |
| # xen_ll_ip= |
| |
| # Link-local interface name when hypervisor type is Xen |
| # xen_ll_interface= |
| |
| # Physical interface name when hypervisor type is vmware |
| # vmware_physical_interface= |
| |
| # Mode of operation for VMWare. Possible values esxi_neutron, vcenter |
| # default is esxi_neutron |
| # vmware_mode= |
| |
| [FLOWS] |
| # Everything in this section is optional |
| |
| # Number of threads for flow setup |
| # thread_count = 4 |
| # |
| # Maximum flows allowed per VM (given as % of maximum system flows) |
| # max_vm_flows= |
| {%- if compute.max_vm_flows is defined %} |
| max_vm_flows={{ compute.max_vm_flows }} |
| {%- endif %} |
| |
| # Maximum number of link-local flows allowed across all VMs |
| # max_system_linklocal_flows=4096 |
| |
| # Maximum number of link-local flows allowed per VM |
| # max_vm_linklocal_flows=1024 |
| |
| # Number of Index state-machine events to log |
| # index_sm_log_count=0 |
| |
| # Enable/Disable tracing of flow messages. Introspect can over-ride this value |
| # trace_enable=false |
| # |
| # Number of add-tokens |
| # add_tokens=100 |
| # Number of ksync-tokens |
| # ksync_tokens=50 |
| # Number of del-tokens |
| # del_tokens=50 |
| # Number of update-tokens |
| # update_tokens=50 |
| |
| [METADATA] |
| # Shared secret for metadata proxy service (Optional) |
| # metadata_proxy_secret=contrail |
| {%- if compute.metadata is defined %} |
| metadata_proxy_secret={{ compute.metadata.secret }} |
| {%- endif %} |
| |
| # Metadata proxy port on which agent listens (Optional) |
| # metadata_proxy_port= |
| |
| # Enable(true) ssl support for metadata proxy service |
| # metadata_use_ssl= |
| |
| # Path for Metadata Agent client certificate |
| # metadata_client_cert= |
| |
| # Metadata Agent client certificate type(default=PEM) |
| # metdata_client_cert_type= |
| |
| # Path for Metadata Agent client private key |
| # metadata_client_key= |
| |
| # Path for CA certificate |
| # metadata_ca_cert= |
| |
| [NETWORKS] |
| # control-channel IP address used by WEB-UI to connect to vnswad to fetch |
| # required information (Optional) |
| {%- if compute.bind is defined %} |
| control_network_ip={{ compute.bind.address }} |
| {%- else %} |
| control_network_ip={{ compute.interface.address }} |
| {%- endif %} |
| |
| [VIRTUAL-HOST-INTERFACE] |
| # Everything in this section is mandatory |
| |
| # name of virtual host interface |
| name=vhost0 |
| |
| # IP address and prefix in ip/prefix_len format |
| ip={{ compute.interface.address }}/{{ compute.interface.mask }} |
| |
| # Gateway IP address for virtual host |
| gateway={{ compute.interface.gateway }} |
| |
| # Flag to indicate if hosts in vhost subnet can be resolved by ARP |
| # If set to 1 host in subnet would be resolved by ARP, if set to 0 |
| # all the traffic destined to hosts within subnet also go via |
| # default gateway |
| # subnet_hosts_resolvable=0 |
| |
| # Physical interface name to which virtual host interface maps to |
| physical_interface={{ compute.interface.dev }} |
| |
| # List of IP addresses assigned for the compute node other than vhost. Specify |
| # this only if vhost interface is un-numbered in host-os. Agent will use one |
| # of the compute_node_address to run services that need IP Address in host-os |
| # (like metadata...) |
| #compute_node_address = 10.204.216.28 |
| |
| # We can have multiple gateway sections with different indices in the |
| # following format |
| [GATEWAY-0] |
| # Name of the routing_instance for which the gateway is being configured |
| # routing_instance=default-domain:admin:public:public |
| |
| # Gateway interface name |
| # interface=vgw |
| |
| # Virtual network ip blocks for which gateway service is required. Each IP |
| # block is represented as ip/prefix. Multiple IP blocks are represented by |
| # separating each with a space |
| # ip_blocks=1.1.1.1/24 |
| |
| [GATEWAY-1] |
| # Name of the routing_instance for which the gateway is being configured |
| # routing_instance=default-domain:admin:public1:public1 |
| |
| # Gateway interface name |
| # interface=vgw1 |
| |
| # Virtual network ip blocks for which gateway service is required. Each IP |
| # block is represented as ip/prefix. Multiple IP blocks are represented by |
| # separating each with a space |
| # ip_blocks=2.2.1.0/24 2.2.2.0/24 |
| |
| # Routes to be exported in routing_instance. Each route is represented as |
| # ip/prefix. Multiple routes are represented by separating each with a space |
| # routes=10.10.10.1/24 11.11.11.1/24 |
| |
| [SERVICE-INSTANCE] |
| # Path to the script which handles the netns commands |
| netns_command=/usr/bin/opencontrail-vrouter-netns |
| docker_command=/usr/bin/opencontrail-vrouter-docker |
| |
| # Number of workers that will be used to start netns commands |
| #netns_workers=1 |
| |
| # Timeout for each netns command, when the timeout is reached, the netns |
| # command is killed. |
| #netns_timeout=30 |
| |
| [TASK] |
| # Number of threads used by TBB |
| # thread_count = 8 |
| # Log message if time taken to execute task exceeds a threshold (in msec) |
| # log_exec_threshold = 10 |
| # |
| # Log message if time taken to schedule task exceeds a threshold (in msec) |
| # log_schedule_threshold = 25 |
| # |
| # TBB Keepawake timer interval in msec |
| # tbb_keepawake_timeout = 20 |
| # |
| # Timeout for task monitor in msec |
| # task_monitor_timeout = 50000 |
| # |
| # Policy to pin the ksync netlink io thread to CPU. By default, CPU pinning |
| # is disabled. Other values for policy are, |
| # "last" - Last CPUID |
| # "<num>" - CPU-ID to pin (in decimal) |
| # ksync_thread_cpu_pin_policy=last |
| |
| [SERVICES] |
| # bgp_as_a_service_port_range - reserving set of ports to be used. |
| # bgp_as_a_service_port_range=30000-35000 |
| |
| # [QOS] |
| # |
| # #Knob to configure priority tagging when in DCB mode. Default value is true |
| # priority_tagging = false |
| # |
| # [QUEUE-1] |
| # Logical nic queues for qos config |
| # logical_queue= |
| |
| # [QUEUE-2] |
| # Logical nic queues for qos config |
| # logical_queue= |
| |
| # [QUEUE-3] |
| # This is the default hardware queue |
| # default_hw_queue= true |
| |
| # Logical nic queues for qos config |
| # logical_queue= |
| |
| # [QOS-NIANTIC] |
| # [PG-1] |
| # Scheduling algorithm for priority group (strict/rr) |
| # scheduling= |
| |
| # Total hardware queue bandwidth used by priority group |
| # bandwidth= |
| |
| # [PG-2] |
| # Scheduling algorithm for priority group (strict/rr) |
| # scheduling= |
| |
| # Total hardware queue bandwidth used by priority group |
| # bandwidth= |
| |
| # [PG-3] |
| # Scheduling algorithm for priority group (strict/rr) |
| # scheduling= |
| |
| # Total hardware queue bandwidth used by priority group |
| # bandwidth= |
| |
| [LLGR] |
| # Note: All time values are in seconds. |
| |
| # End of Rib Rx(received from CN) |
| # Fallback time in seconds to age out stale entries on CN becoming |
| # active this is used only when end-of-rib is not seen from CN. |
| # end_of_rib_rx_fallback_time= |
| |
| # End of Rib Tx(to be sent to CN) |
| # Fallback time in seconds to send EOR to CN. Agent waits for inactivity to |
| # send the same however it may so happen that activity never dies down, |
| # so use fallback. |
| # Inactivity time is the time agent waits to conclude EOC. During this interval |
| # no config will be seen. |
| # end_of_rib_tx_fallback_time= |
| # end_of_rib_tx_inactivity_time= |
| |
| # Config cleanup time |
| # Once end of config is determined this time is used to start stale cleanup |
| # of config. |
| # stale_config_cleanup_time= |
| |
| # End of config determination time |
| # Inactivity time is the time agent waits to conclude EOC. During this interval |
| # no config will be seen. |
| # Fallback time in seconds to find EOC in case config inactivity is not seen. |
| # config_fallback_time= |
| # config_inactivity_time= |