Commit 34bb822
Changed files (159)
openstack
ansible
devstack
lab-01
lab-02
lab-03
lab-04
lab-05
lab-06
lab-00
lab-01
lab-02
lab-03
lab-04
img
rdo
lab-01
lab-02
img
lab-03
openstack/ansible/files/bashrc.j2
@@ -0,0 +1,12 @@
+# .bashrc
+
+# Source global definitions
+if [ -f /etc/bashrc ]; then
+ . /etc/bashrc
+fi
+
+# Uncomment the following line if you don't like systemctl's auto-paging feature:
+# export SYSTEMD_PAGER=
+
+# User specific aliases and functions
+PS1='\[\e[1;{{ prompt_color }}m\][\u@\h \W]\$\[\e[0m\] '
openstack/ansible/files/ceph.repo.j2
@@ -0,0 +1,7 @@
+[ceph-noarch]
+name=Ceph noarch packages
+baseurl=http://ceph.com/rpm-{{ceph_release}}/{{distro}}/noarch
+enabled=1
+gpgcheck=1
+type=rpm-md
+gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
openstack/ansible/files/hosts
@@ -0,0 +1,9 @@
+127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
+::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
+192.168.0.10 controller
+192.168.0.11 neutron
+192.168.0.12 compute1
+192.168.0.13 compute2
+192.168.0.14 storage1
+192.168.0.15 storage2
+192.168.0.16 storage3
openstack/ansible/files/known_hosts
@@ -0,0 +1,6 @@
+compute1,192.168.0.12 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBB9KdsuWbMKDHwYtCnUZ1ccwWITvsBmSltHvvvls/WxDwaN0zCCnbRH77yzu4Q7B6Bq8IZ+/ZO4cnWdbglb/RhM=
+compute2,192.168.0.13 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBB9KdsuWbMKDHwYtCnUZ1ccwWITvsBmSltHvvvls/WxDwaN0zCCnbRH77yzu4Q7B6Bq8IZ+/ZO4cnWdbglb/RhM=
+neutron,192.168.0.11 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBB9KdsuWbMKDHwYtCnUZ1ccwWITvsBmSltHvvvls/WxDwaN0zCCnbRH77yzu4Q7B6Bq8IZ+/ZO4cnWdbglb/RhM=
+storage1,192.168.0.14 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBANVXb89qnohhf9oeODQ/oqUHddFxcZoctC9sXTn/bTmnSGQlyETZwXjN/aKZ7+0HMlbns9D1bsRhLAABvblBhQ=
+storage2,192.168.0.15 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBANVXb89qnohhf9oeODQ/oqUHddFxcZoctC9sXTn/bTmnSGQlyETZwXjN/aKZ7+0HMlbns9D1bsRhLAABvblBhQ=
+storage3,192.168.0.16 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBANVXb89qnohhf9oeODQ/oqUHddFxcZoctC9sXTn/bTmnSGQlyETZwXjN/aKZ7+0HMlbns9D1bsRhLAABvblBhQ=
openstack/ansible/files/nova-compute.conf
@@ -0,0 +1,4075 @@
+[DEFAULT]
+notification_driver=ceilometer.compute.nova_notifier
+notification_driver=nova.openstack.common.notifier.rpc_notifier
+
+#
+# From oslo.messaging
+#
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo_messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host=localhost
+
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+# (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC thread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi valued)
+#notification_driver =
+notification_driver =
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group;name - [rpc_notifier2]/topics
+#notification_topics=notifications
+notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full configuration. If
+# not set, we fall back to the rpc_backend option and driver specific
+# configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers include qpid
+# and zmq. (string value)
+#rpc_backend=rabbit
+rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in nova.availability_zones
+#
+
+# The availability_zone to show internal services under
+# (string value)
+#internal_service_availability_zone=internal
+internal_service_availability_zone=internal
+
+# Default compute node availability_zone (string value)
+#default_availability_zone=nova
+default_availability_zone=nova
+
+
+#
+# Options defined in nova.crypto
+#
+
+# Filename of root CA (string value)
+#ca_file=cacert.pem
+
+# Filename of private key (string value)
+#key_file=private/cakey.pem
+
+# Filename of root Certificate Revocation List (string value)
+#crl_file=crl.pem
+
+# Where we keep our keys (string value)
+#keys_path=$state_path/keys
+
+# Where we keep our root CA (string value)
+#ca_path=$state_path/CA
+
+# Should we use a CA for each project? (boolean value)
+#use_project_ca=false
+
+# Subject for certificate for users, %s for project, user,
+# timestamp (string value)
+#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+# Subject for certificate for projects, %s for project,
+# timestamp (string value)
+#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+
+#
+# Options defined in nova.exception
+#
+
+# Make exception message format errors fatal (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in nova.netconf
+#
+
+# IP address of this host (string value)
+#my_ip=10.0.0.1
+
+# Block storage IP address of this host (string value)
+#my_block_storage_ip=$my_ip
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address (string value)
+#host=nova
+
+# Use IPv6 (boolean value)
+#use_ipv6=false
+
+
+#
+# Options defined in nova.notifications
+#
+
+# If set, send compute.instance.update notifications on
+# instance state changes. Valid values are None for no
+# notifications, "vm_state" for notifications on VM state
+# changes, or "vm_and_task_state" for notifications on VM and
+# task state changes. (string value)
+#notify_on_state_change=<None>
+
+# If set, send api.fault notifications on caught exceptions in
+# the API service. (boolean value)
+#notify_api_faults=false
+notify_api_faults=False
+
+# Default notification level for outgoing notifications
+# (string value)
+#default_notification_level=INFO
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in nova.paths
+#
+
+# Directory where the nova python module is installed (string
+# value)
+#pybasedir=/usr/lib/python/site-packages
+
+# Directory where nova binaries are installed (string value)
+#bindir=/usr/local/bin
+
+# Top-level directory for maintaining nova's state (string
+# value)
+#state_path=/var/lib/nova
+state_path=/var/lib/nova
+
+
+#
+# Options defined in nova.quota
+#
+
+# Number of instances allowed per project (integer value)
+#quota_instances=10
+
+# Number of instance cores allowed per project (integer value)
+#quota_cores=20
+
+# Megabytes of instance RAM allowed per project (integer
+# value)
+#quota_ram=51200
+
+# Number of floating IPs allowed per project (integer value)
+#quota_floating_ips=10
+
+# Number of fixed IPs allowed per project (this should be at
+# least the number of instances allowed) (integer value)
+#quota_fixed_ips=-1
+
+# Number of metadata items allowed per instance (integer
+# value)
+#quota_metadata_items=128
+
+# Number of injected files allowed (integer value)
+#quota_injected_files=5
+
+# Number of bytes allowed per injected file (integer value)
+#quota_injected_file_content_bytes=10240
+
+# Length of injected file path (integer value)
+#quota_injected_file_path_length=255
+
+# Number of security groups per project (integer value)
+#quota_security_groups=10
+
+# Number of security rules per security group (integer value)
+#quota_security_group_rules=20
+
+# Number of key pairs per user (integer value)
+#quota_key_pairs=100
+
+# Number of server groups per project (integer value)
+#quota_server_groups=10
+
+# Number of servers per server group (integer value)
+#quota_server_group_members=10
+
+# Number of seconds until a reservation expires (integer
+# value)
+#reservation_expire=86400
+
+# Count of reservations until usage is refreshed. This
+# defaults to 0(off) to avoid additional load but it is useful
+# to turn on to help keep quota usage up to date and reduce
+# the impact of out of sync usage issues. (integer value)
+#until_refresh=0
+
+# Number of seconds between subsequent usage refreshes. This
+# defaults to 0(off) to avoid additional load but it is useful
+# to turn on to help keep quota usage up to date and reduce
+# the impact of out of sync usage issues. Note that quotas are
+# not updated on a periodic task, they will update on a new
+# reservation if max_age has passed since the last reservation
+# (integer value)
+#max_age=0
+
+# Default driver to use for quota checks (string value)
+#quota_driver=nova.quota.DbQuotaDriver
+
+
+#
+# Options defined in nova.service
+#
+
+# Seconds between nodes reporting state to datastore (integer
+# value)
+#report_interval=10
+report_interval=10
+
+# Enable periodic tasks (boolean value)
+#periodic_enable=true
+
+# Range of seconds to randomly delay when starting the
+# periodic task scheduler to reduce stampeding. (Disable by
+# setting to 0) (integer value)
+#periodic_fuzzy_delay=60
+
+# A list of APIs to enable by default (list value)
+#enabled_apis=ec2,osapi_compute,metadata
+
+# A list of APIs with enabled SSL (list value)
+#enabled_ssl_apis=
+
+# The IP address on which the EC2 API will listen. (string
+# value)
+#ec2_listen=0.0.0.0
+
+# The port on which the EC2 API will listen. (integer value)
+#ec2_listen_port=8773
+
+# Number of workers for EC2 API service. The default will be
+# equal to the number of CPUs available. (integer value)
+#ec2_workers=<None>
+
+# The IP address on which the OpenStack API will listen.
+# (string value)
+#osapi_compute_listen=0.0.0.0
+
+# The port on which the OpenStack API will listen. (integer
+# value)
+#osapi_compute_listen_port=8774
+
+# Number of workers for OpenStack API service. The default
+# will be the number of CPUs available. (integer value)
+#osapi_compute_workers=<None>
+
+# OpenStack metadata service manager (string value)
+#metadata_manager=nova.api.manager.MetadataManager
+
+# The IP address on which the metadata API will listen.
+# (string value)
+#metadata_listen=0.0.0.0
+
+# The port on which the metadata API will listen. (integer
+# value)
+#metadata_listen_port=8775
+
+# Number of workers for metadata service. The default will be
+# the number of CPUs available. (integer value)
+#metadata_workers=<None>
+
+# Full class name for the Manager for compute (string value)
+#compute_manager=nova.compute.manager.ComputeManager
+compute_manager=nova.compute.manager.ComputeManager
+
+# Full class name for the Manager for console proxy (string
+# value)
+#console_manager=nova.console.manager.ConsoleProxyManager
+
+# Manager for console auth (string value)
+#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager
+
+# Full class name for the Manager for cert (string value)
+#cert_manager=nova.cert.manager.CertManager
+
+# Full class name for the Manager for network (string value)
+#network_manager=nova.network.manager.FlatDHCPManager
+
+# Full class name for the Manager for scheduler (string value)
+#scheduler_manager=nova.scheduler.manager.SchedulerManager
+
+# Maximum time since last check-in for up service (integer
+# value)
+#service_down_time=60
+service_down_time=60
+
+
+#
+# Options defined in nova.utils
+#
+
+# Whether to log monkey patching (boolean value)
+#monkey_patch=false
+
+# List of modules/decorators to monkey patch (list value)
+#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator
+
+# Length of generated instance admin passwords (integer value)
+#password_length=12
+
+# Time period to generate instance usages for. Time period
+# must be hour, day, month or year (string value)
+#instance_usage_audit_period=month
+
+# Path to the rootwrap configuration file to use for running
+# commands as root (string value)
+#rootwrap_config=/etc/nova/rootwrap.conf
+rootwrap_config=/etc/nova/rootwrap.conf
+
+# Explicitly specify the temporary working directory (string
+# value)
+#tempdir=<None>
+
+
+#
+# Options defined in nova.wsgi
+#
+
+# File name for the paste.deploy config for nova-api (string
+# value)
+#api_paste_config=api-paste.ini
+
+# A python format string that is used as the template to
+# generate log lines. The following values can be formatted
+# into it: client_ip, date_time, request_line, status_code,
+# body_length, wall_seconds. (string value)
+#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ssl_ca_file=<None>
+
+# SSL certificate of API server (string value)
+#ssl_cert_file=<None>
+
+# SSL private key of API server (string value)
+#ssl_key_file=<None>
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Not supported on OS X. (integer value)
+#tcp_keepidle=600
+
+# Size of the pool of greenthreads used by wsgi (integer
+# value)
+#wsgi_default_pool_size=1000
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large
+# tokens (typically those generated by the Keystone v3 API
+# with big service catalogs). (integer value)
+#max_header_line=16384
+
+# If False, closes the client socket connection explicitly.
+# (boolean value)
+#wsgi_keep_alive=true
+
+# Timeout for client connections' socket operations. If an
+# incoming connection is idle for this number of seconds it
+# will be closed. A value of '0' means wait forever. (integer
+# value)
+#client_socket_timeout=900
+
+
+#
+# Options defined in nova.api.auth
+#
+
+# Whether to use per-user rate limiting for the api. This
+# option is only used by v2 api. Rate limiting is removed from
+# v3 api. (boolean value)
+#api_rate_limit=false
+
+# The strategy to use for auth: keystone, noauth
+# (deprecated), or noauth2. Both noauth and noauth2 are
+# designed for testing only, as they do no actual credential
+# checking. noauth provides administrative credentials
+# regardless of the passed in user, noauth2 only does if
+# 'admin' is specified as the username. (string value)
+#auth_strategy=keystone
+auth_strategy=keystone
+
+# Treat X-Forwarded-For as the canonical remote address. Only
+# enable this if you have a sanitizing proxy. (boolean value)
+#use_forwarded_for=false
+
+
+#
+# Options defined in nova.api.ec2
+#
+
+# Number of failed auths before lockout. (integer value)
+#lockout_attempts=5
+
+# Number of minutes to lockout if triggered. (integer value)
+#lockout_minutes=15
+
+# Number of minutes for lockout window. (integer value)
+#lockout_window=15
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+# Return the IP address as private dns hostname in describe
+# instances (boolean value)
+#ec2_private_dns_show_ip=false
+
+# Validate security group names according to EC2 specification
+# (boolean value)
+#ec2_strict_validation=true
+
+# Time in seconds before ec2 timestamp expires (integer value)
+#ec2_timestamp_expiry=300
+
+# Disable SSL certificate verification. (boolean value)
+#keystone_ec2_insecure=false
+
+
+#
+# Options defined in nova.api.ec2.cloud
+#
+
+# The IP address of the EC2 API server (string value)
+#ec2_host=$my_ip
+
+# The internal IP address of the EC2 API server (string value)
+#ec2_dmz_host=$my_ip
+
+# The port of the EC2 API server (integer value)
+#ec2_port=8773
+
+# The protocol to use when connecting to the EC2 API server
+# (http, https) (string value)
+#ec2_scheme=http
+
+# The path prefix used to call the ec2 API server (string
+# value)
+#ec2_path=/
+
+# List of region=fqdn pairs separated by commas (list value)
+#region_list=
+
+
+#
+# Options defined in nova.api.metadata.base
+#
+
+# List of metadata versions to skip placing into the config
+# drive (string value)
+#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
+
+# Driver to use for vendor data (string value)
+#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData
+
+
+#
+# Options defined in nova.api.metadata.handler
+#
+
+# Time in seconds to cache metadata; 0 to disable metadata
+# caching entirely (not recommended). Increasingthis should
+# improve response times of the metadata API when under heavy
+# load. Higher values may increase memoryusage and result in
+# longer times for host metadata changes to take effect.
+# (integer value)
+#metadata_cache_expiration=15
+
+
+#
+# Options defined in nova.api.metadata.vendordata_json
+#
+
+# File to load JSON formatted vendor data from (string value)
+#vendordata_jsonfile_path=<None>
+
+
+#
+# Options defined in nova.api.openstack.common
+#
+
+# The maximum number of items returned in a single response
+# from a collection resource (integer value)
+#osapi_max_limit=1000
+
+# Base URL that will be presented to users in links to the
+# OpenStack Compute API (string value)
+#osapi_compute_link_prefix=<None>
+
+# Base URL that will be presented to users in links to glance
+# resources (string value)
+#osapi_glance_link_prefix=<None>
+
+
+#
+# Options defined in nova.api.openstack.compute
+#
+
+# Permit instance snapshot operations. (boolean value)
+#allow_instance_snapshots=true
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib
+#
+
+# Specify list of extensions to load when using
+# osapi_compute_extension option with
+# nova.api.openstack.compute.contrib.select_extensions (list
+# value)
+#osapi_compute_ext_list=
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib.fping
+#
+
+# Full path to fping. (string value)
+#fping_path=/usr/sbin/fping
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks
+#
+
+# Enables or disables quota checking for tenant networks
+# (boolean value)
+#enable_network_quota=false
+
+# Control for checking for default networks (string value)
+#use_neutron_default_nets=False
+
+# Default tenant id when creating neutron networks (string
+# value)
+#neutron_default_tenant_id=default
+
+# Number of private networks allowed per project (integer
+# value)
+#quota_networks=3
+
+
+#
+# Options defined in nova.api.openstack.compute.extensions
+#
+
+# osapi compute extension to load (multi valued)
+#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
+
+
+#
+# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses
+#
+
+# List of instance states that should hide network info (list
+# value)
+#osapi_hide_server_address_states=building
+
+
+#
+# Options defined in nova.api.openstack.compute.servers
+#
+
+# Enables returning of the instance password by the relevant
+# server API calls such as create, rebuild or rescue, If the
+# hypervisor does not support password injection then the
+# password returned will not be correct (boolean value)
+#enable_instance_password=true
+
+
+#
+# Options defined in nova.cert.rpcapi
+#
+
+# The topic cert nodes listen on (string value)
+#cert_topic=cert
+
+
+#
+# Options defined in nova.cloudpipe.pipelib
+#
+
+# Image ID used when starting up a cloudpipe vpn server
+# (string value)
+#vpn_image_id=0
+
+# Flavor for vpn instances (string value)
+#vpn_flavor=m1.tiny
+
+# Template for cloudpipe instance boot script (string value)
+#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template
+
+# Network to push into openvpn config (string value)
+#dmz_net=10.0.0.0
+
+# Netmask to push into openvpn config (string value)
+#dmz_mask=255.255.255.0
+
+# Suffix to add to project name for vpn key and secgroups
+# (string value)
+#vpn_key_suffix=-vpn
+
+
+#
+# Options defined in nova.cmd.novnc
+#
+
+# Record sessions to FILE.[session_number] (boolean value)
+#record=false
+
+# Become a daemon (background process) (boolean value)
+#daemon=false
+
+# Disallow non-encrypted connections (boolean value)
+#ssl_only=false
+
+# Source is ipv6 (boolean value)
+#source_is_ipv6=false
+
+# SSL certificate file (string value)
+#cert=self.pem
+
+# SSL key file (if separate from cert) (string value)
+#key=<None>
+
+# Run webserver on same port. Serve files from DIR. (string
+# value)
+#web=/usr/share/spice-html5
+
+
+#
+# Options defined in nova.cmd.novncproxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#novncproxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#novncproxy_port=6080
+
+
+#
+# Options defined in nova.compute.api
+#
+
+# Allow destination machine to match source for resize. Useful
+# when testing in single-host environments. (boolean value)
+#allow_resize_to_same_host=false
+
+# Allow migrate machine to the same host. Useful when testing
+# in single-host environments. (boolean value)
+#allow_migrate_to_same_host=false
+
+# Availability zone to use when user doesn't specify one
+# (string value)
+#default_schedule_zone=<None>
+
+# These are image properties which a snapshot should not
+# inherit from an instance (list value)
+#non_inheritable_image_properties=cache_in_nova,bittorrent
+
+# Kernel image that indicates not to use a kernel, but to use
+# a raw disk image instead (string value)
+#null_kernel=nokernel
+
+# When creating multiple instances with a single request using
+# the os-multiple-create API extension, this template will be
+# used to build the display name for each instance. The
+# benefit is that the instances end up with different
+# hostnames. To restore legacy behavior of every instance
+# having the same name, set this option to "%(name)s". Valid
+# keys for the template are: name, uuid, count. (string value)
+#multi_instance_display_name_template=%(name)s-%(count)d
+
+# Maximum number of devices that will result in a local image
+# being created on the hypervisor node. Setting this to 0
+# means nova will allow only boot from volume. A negative
+# number means unlimited. (integer value)
+#max_local_block_devices=3
+
+
+#
+# Options defined in nova.compute.flavors
+#
+
+# Default flavor to use for the EC2 API only. The Nova API
+# does not support a default flavor. (string value)
+#default_flavor=m1.small
+
+
+#
+# Options defined in nova.compute.manager
+#
+
+# Console proxy host to use to connect to instances on this
+# host. (string value)
+#console_host=nova
+
+# Name of network to use to set access IPs for instances
+# (string value)
+#default_access_ip_network_name=<None>
+
+# Whether to batch up the application of IPTables rules during
+# a host restart and apply all at the end of the init phase
+# (boolean value)
+#defer_iptables_apply=false
+
+# Where instances are stored on disk (string value)
+#instances_path=$state_path/instances
+
+# Generate periodic compute.instance.exists notifications
+# (boolean value)
+#instance_usage_audit=false
+
+# Number of 1 second retries needed in live_migration (integer
+# value)
+#live_migration_retry_count=30
+
+# Whether to start guests that were running before the host
+# rebooted (boolean value)
+#resume_guests_state_on_host_boot=false
+
+# Number of times to retry network allocation on failures
+# (integer value)
+#network_allocate_retries=0
+
+# Maximum number of instance builds to run concurrently
+# (integer value)
+#max_concurrent_builds=10
+
+# Number of times to retry block device allocation on failures
+# (integer value)
+#block_device_allocate_retries=60
+
+# The number of times to attempt to reap an instance's files.
+# (integer value)
+#maximum_instance_delete_attempts=5
+
+# Interval to pull network bandwidth usage info. Not supported
+# on all hypervisors. Set to -1 to disable. Setting this to 0
+# will run at the default rate. (integer value)
+#bandwidth_poll_interval=600
+
+# Interval to sync power states between the database and the
+# hypervisor. Set to -1 to disable. Setting this to 0 will run
+# at the default rate. (integer value)
+#sync_power_state_interval=600
+
+# Number of seconds between instance network information cache
+# updates (integer value)
+#heal_instance_info_cache_interval=60
+heal_instance_info_cache_interval=60
+
+# Interval in seconds for reclaiming deleted instances
+# (integer value)
+#reclaim_instance_interval=0
+
+# Interval in seconds for gathering volume usages (integer
+# value)
+#volume_usage_poll_interval=0
+
+# Interval in seconds for polling shelved instances to
+# offload. Set to -1 to disable.Setting this to 0 will run at
+# the default rate. (integer value)
+#shelved_poll_interval=3600
+
+# Time in seconds before a shelved instance is eligible for
+# removing from a host. -1 never offload, 0 offload when
+# shelved (integer value)
+#shelved_offload_time=0
+
+# Interval in seconds for retrying failed instance file
+# deletes. Set to -1 to disable. Setting this to 0 will run at
+# the default rate. (integer value)
+#instance_delete_interval=300
+
+# Waiting time interval (seconds) between block device
+# allocation retries on failures (integer value)
+#block_device_allocate_retries_interval=3
+
+# Waiting time interval (seconds) between sending the
+# scheduler a list of current instance UUIDs to verify that
+# its view of instances is in sync with nova. If the CONF
+# option `scheduler_tracks_instance_changes` is False,
+# changing this option will have no effect. (integer value)
+#scheduler_instance_sync_interval=120
+
+# Action to take if a running deleted instance is detected.
+# Valid options are 'noop', 'log', 'shutdown', or 'reap'. Set
+# to 'noop' to take no action. (string value)
+#running_deleted_instance_action=reap
+
+# Number of seconds to wait between runs of the cleanup task.
+# (integer value)
+#running_deleted_instance_poll_interval=1800
+
+# Number of seconds after being deleted when a running
+# instance should be considered eligible for cleanup. (integer
+# value)
+#running_deleted_instance_timeout=0
+
+# Automatically hard reboot an instance if it has been stuck
+# in a rebooting state longer than N seconds. Set to 0 to
+# disable. (integer value)
+#reboot_timeout=0
+
+# Amount of time in seconds an instance can be in BUILD before
+# going into ERROR status. Set to 0 to disable. (integer
+# value)
+#instance_build_timeout=0
+
+# Automatically unrescue an instance after N seconds. Set to 0
+# to disable. (integer value)
+#rescue_timeout=0
+
+# Automatically confirm resizes after N seconds. Set to 0 to
+# disable. (integer value)
+#resize_confirm_window=0
+
+# Total amount of time to wait in seconds for an instance to
+# perform a clean shutdown. (integer value)
+#shutdown_timeout=60
+
+
+#
+# Options defined in nova.compute.monitors
+#
+
+# Monitor classes available to the compute which may be
+# specified more than once. (multi valued)
+#compute_available_monitors=nova.compute.monitors.all_monitors
+
+# A list of monitors that can be used for getting compute
+# metrics. (list value)
+#compute_monitors=
+
+
+#
+# Options defined in nova.compute.resource_tracker
+#
+
+# Amount of disk in MB to reserve for the host (integer value)
+#reserved_host_disk_mb=0
+
+# Amount of memory in MB to reserve for the host (integer
+# value)
+#reserved_host_memory_mb=512
+reserved_host_memory_mb=512
+
+# Class that will manage stats for the local compute host
+# (string value)
+#compute_stats_class=nova.compute.stats.Stats
+
+# The names of the extra resources to track. (list value)
+#compute_resources=vcpu
+
+
+#
+# Options defined in nova.compute.rpcapi
+#
+
+# The topic compute nodes listen on (string value)
+#compute_topic=compute
+
+
+#
+# Options defined in nova.conductor.tasks.live_migrate
+#
+
+# Number of times to retry live-migration before failing. If
+# == -1, try until out of hosts. If == 0, only try once, no
+# retries. (integer value)
+#migrate_max_retries=-1
+
+
+#
+# Options defined in nova.console.manager
+#
+
+# Driver to use for the console proxy (string value)
+#console_driver=nova.console.xvp.XVPConsoleProxy
+
+# Stub calls to compute worker for tests (boolean value)
+#stub_compute=false
+
+# Publicly visible name for this console host (string value)
+#console_public_hostname=nova
+
+
+#
+# Options defined in nova.console.rpcapi
+#
+
+# The topic console proxy nodes listen on (string value)
+#console_topic=console
+
+
+#
+# Options defined in nova.console.xvp
+#
+
+# XVP conf template (string value)
+#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template
+
+# Generated XVP conf file (string value)
+#console_xvp_conf=/etc/xvp.conf
+
+# XVP master process pid file (string value)
+#console_xvp_pid=/var/run/xvp.pid
+
+# XVP log file (string value)
+#console_xvp_log=/var/log/xvp.log
+
+# Port for XVP to multiplex VNC connections on (integer value)
+#console_xvp_multiplex_port=5900
+
+
+#
+# Options defined in nova.consoleauth
+#
+
+# The topic console auth proxy nodes listen on (string value)
+#consoleauth_topic=consoleauth
+
+
+#
+# Options defined in nova.consoleauth.manager
+#
+
+# How many seconds before deleting tokens (integer value)
+#console_token_ttl=600
+
+
+#
+# Options defined in nova.db.api
+#
+
+# Services to be added to the available pool on create
+# (boolean value)
+#enable_new_services=true
+
+# Template string to be used to generate instance names
+# (string value)
+#instance_name_template=instance-%08x
+
+# Template string to be used to generate snapshot names
+# (string value)
+#snapshot_name_template=snapshot-%s
+
+
+#
+# Options defined in nova.db.base
+#
+
+# The driver to use for database access (string value)
+#db_driver=nova.db
+
+
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+
+# When set, compute API will consider duplicate hostnames
+# invalid within the specified scope, regardless of case.
+# Should be empty, "project" or "global". (string value)
+#osapi_compute_unique_server_name_scope=
+
+
+#
+# Options defined in nova.image.s3
+#
+
+# Parent directory for tempdir used for image decryption
+# (string value)
+#image_decryption_dir=/tmp
+
+# Hostname or IP for OpenStack to use when accessing the S3
+# api (string value)
+#s3_host=$my_ip
+
+# Port used when accessing the S3 api (integer value)
+#s3_port=3333
+
+# Access key to use for S3 server for images (string value)
+#s3_access_key=notchecked
+
+# Secret key to use for S3 server for images (string value)
+#s3_secret_key=notchecked
+
+# Whether to use SSL when talking to S3 (boolean value)
+#s3_use_ssl=false
+
+# Whether to affix the tenant id to the access key when
+# downloading from S3 (boolean value)
+#s3_affix_tenant=false
+
+
+#
+# Options defined in nova.ipv6.api
+#
+
+# Backend to use for IPv6 generation (string value)
+#ipv6_backend=rfc2462
+
+
+#
+# Options defined in nova.network
+#
+
+# The full class name of the network API class to use (string
+# value)
+#network_api_class=nova.network.api.API
+network_api_class=nova.network.neutronv2.api.API
+
+
+#
+# Options defined in nova.network.driver
+#
+
+# Driver to use for network creation (string value)
+#network_driver=nova.network.linux_net
+
+
+#
+# Options defined in nova.network.floating_ips
+#
+
+# Default pool for floating IPs (string value)
+#default_floating_pool=nova
+default_floating_pool=public
+
+# Autoassigning floating IP to VM (boolean value)
+#auto_assign_floating_ip=false
+
+# Full class name for the DNS Manager for floating IPs (string
+# value)
+#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# Full class name for the DNS Manager for instance IPs (string
+# value)
+#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# Full class name for the DNS Zone for instance IPs (string
+# value)
+#instance_dns_domain=
+
+
+#
+# Options defined in nova.network.ldapdns
+#
+
+# URL for LDAP server which will store DNS entries (string
+# value)
+#ldap_dns_url=ldap://ldap.example.com:389
+
+# User for LDAP DNS (string value)
+#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org
+
+# Password for LDAP DNS (string value)
+#ldap_dns_password=password
+
+# Hostmaster for LDAP DNS driver Statement of Authority
+# (string value)
+#ldap_dns_soa_hostmaster=hostmaster@example.org
+
+# DNS Servers for LDAP DNS driver (multi valued)
+#ldap_dns_servers=dns.example.org
+
+# Base DN for DNS entries in LDAP (string value)
+#ldap_dns_base_dn=ou=hosts,dc=example,dc=org
+
+# Refresh interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_refresh=1800
+
+# Retry interval (in seconds) for LDAP DNS driver Statement of
+# Authority (string value)
+#ldap_dns_soa_retry=3600
+
+# Expiry interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_expiry=86400
+
+# Minimum interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_minimum=7200
+
+
+#
+# Options defined in nova.network.linux_net
+#
+
+# Location of flagfiles for dhcpbridge (multi valued)
+#dhcpbridge_flagfile=/etc/nova/nova.conf
+
+# Location to keep network config files (string value)
+#networks_path=$state_path/networks
+
+# Interface for public IP addresses (string value)
+#public_interface=eth0
+
+# Location of nova-dhcpbridge (string value)
+#dhcpbridge=/usr/bin/nova-dhcpbridge
+
+# Public IP of network host (string value)
+#routing_source_ip=$my_ip
+
+# Lifetime of a DHCP lease in seconds (integer value)
+#dhcp_lease_time=86400
+
+# If set, uses specific DNS server for dnsmasq. Can be
+# specified multiple times. (multi valued)
+#dns_server=
+
+# If set, uses the dns1 and dns2 from the network ref. as dns
+# servers. (boolean value)
+#use_network_dns_servers=false
+
+# A list of dmz ranges that should be accepted (list value)
+#dmz_cidr=
+
+# Traffic to this range will always be snatted to the fallback
+# ip, even if it would normally be bridged out of the node.
+# Can be specified multiple times. (multi valued)
+#force_snat_range=
+force_snat_range=0.0.0.0/0
+
+# Override the default dnsmasq settings with this file (string
+# value)
+#dnsmasq_config_file=
+
+# Driver used to create ethernet devices. (string value)
+#linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver
+
+# Name of Open vSwitch bridge used with linuxnet (string
+# value)
+#linuxnet_ovs_integration_bridge=br-int
+
+# Send gratuitous ARPs for HA setup (boolean value)
+#send_arp_for_ha=false
+
+# Send this many gratuitous ARPs for HA setup (integer value)
+#send_arp_for_ha_count=3
+
+# Use single default gateway. Only first nic of vm will get
+# default gateway from dhcp server (boolean value)
+#use_single_default_gateway=false
+
+# An interface that bridges can forward to. If this is set to
+# all then all traffic will be forwarded. Can be specified
+# multiple times. (multi valued)
+#forward_bridge_interface=all
+
+# The IP address for the metadata API server (string value)
+#metadata_host=$my_ip
+metadata_host=192.168.0.10
+
+# The port for the metadata API port (integer value)
+#metadata_port=8775
+
+# Regular expression to match the iptables rule that should
+# always be on the top. (string value)
+#iptables_top_regex=
+
+# Regular expression to match the iptables rule that should
+# always be on the bottom. (string value)
+#iptables_bottom_regex=
+
+# The table that iptables to jump to when a packet is to be
+# dropped. (string value)
+#iptables_drop_action=DROP
+
+# Amount of time, in seconds, that ovs_vsctl should wait for a
+# response from the database. 0 is to wait forever. (integer
+# value)
+#ovs_vsctl_timeout=120
+
+# If passed, use fake network devices and addresses (boolean
+# value)
+#fake_network=false
+
+# Number of times to retry ebtables commands on failure.
+# (integer value)
+#ebtables_exec_attempts=3
+
+# Number of seconds to wait between ebtables retries.
+# (floating point value)
+#ebtables_retry_interval=1.0
+
+
+#
+# Options defined in nova.network.manager
+#
+
+# Bridge for simple network instances (string value)
+#flat_network_bridge=<None>
+
+# DNS server for simple network (string value)
+#flat_network_dns=8.8.4.4
+
+# Whether to attempt to inject network setup into guest
+# (boolean value)
+#flat_injected=false
+
+# FlatDhcp will bridge into this interface if set (string
+# value)
+#flat_interface=<None>
+
+# First VLAN for private networks (integer value)
+#vlan_start=100
+
+# VLANs will bridge into this interface if set (string value)
+#vlan_interface=<None>
+
+# Number of networks to support (integer value)
+#num_networks=1
+
+# Public IP for the cloudpipe VPN servers (string value)
+#vpn_ip=$my_ip
+
+# First Vpn port for private networks (integer value)
+#vpn_start=1000
+
+# Number of addresses in each private subnet (integer value)
+#network_size=256
+
+# Fixed IPv6 address block (string value)
+#fixed_range_v6=fd00::/48
+
+# Default IPv4 gateway (string value)
+#gateway=<None>
+
+# Default IPv6 gateway (string value)
+#gateway_v6=<None>
+
+# Number of addresses reserved for vpn clients (integer value)
+#cnt_vpn_clients=0
+
+# Seconds after which a deallocated IP is disassociated
+# (integer value)
+#fixed_ip_disassociate_timeout=600
+
+# Number of attempts to create unique mac address (integer
+# value)
+#create_unique_mac_address_attempts=5
+
+# If True, skip using the queue and make local calls (boolean
+# value)
+#fake_call=false
+
+# If True, unused gateway devices (VLAN and bridge) are
+# deleted in VLAN network mode with multi hosted networks
+# (boolean value)
+#teardown_unused_network_gateway=false
+
+# If True, send a dhcp release on instance termination
+# (boolean value)
+#force_dhcp_release=True
+
+# If True, when a DNS entry must be updated, it sends a fanout
+# cast to all network hosts to update their DNS entries in
+# multi host mode (boolean value)
+#update_dns_entries=false
+
+# Number of seconds to wait between runs of updates to DNS
+# entries. (integer value)
+#dns_update_periodic_interval=-1
+
+# Domain to use for building the hostnames (string value)
+#dhcp_domain=novalocal
+dhcp_domain=novalocal
+
+# Indicates underlying L3 management library (string value)
+#l3_lib=nova.network.l3.LinuxNetL3
+
+
+#
+# Options defined in nova.network.rpcapi
+#
+
+# The topic network nodes listen on (string value)
+#network_topic=network
+
+# Default value for multi_host in networks. Also, if set, some
+# rpc network calls will be sent directly to host. (boolean
+# value)
+#multi_host=false
+
+
+#
+# Options defined in nova.network.security_group.openstack_driver
+#
+
+# The full class name of the security API class (string value)
+#security_group_api=nova
+security_group_api=neutron
+
+
+#
+# Options defined in nova.objects.network
+#
+
+# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE
+# NETWORK. If True in multi_host mode, all compute hosts share
+# the same dhcp address. The same IP address used for DHCP
+# will be added on each nova-network node which is only
+# visible to the vms on the same host. (boolean value)
+#share_dhcp_address=false
+
+# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE
+# NETWORK. MTU setting for network interface. (integer value)
+#network_device_mtu=<None>
+
+
+#
+# Options defined in nova.objectstore.s3server
+#
+
+# Path to S3 buckets (string value)
+#buckets_path=$state_path/buckets
+
+# IP address for S3 API to listen (string value)
+#s3_listen=0.0.0.0
+
+# Port for S3 API to listen (integer value)
+#s3_listen_port=3333
+
+
+#
+# From oslo.log
+#
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING
+# level). (boolean value)
+#debug=false
+debug=False
+
+# Print more verbose output (set logging level to INFO instead of default
+# WARNING level). (boolean value)
+#verbose=false
+verbose=True
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. (string value)
+# Deprecated group;name - DEFAULT;log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string which may use any
+# of the available logging.LogRecord attributes. This option is deprecated.
+# Please use logging_context_format_string and logging_default_format_string
+# instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default: %(default)s . (string
+# value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is set, logging will
+# go to stdout. (string value)
+# Deprecated group;name - DEFAULT;logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file paths. (string
+# value)
+# Deprecated group;name - DEFAULT;logdir
+#log_dir=/var/log/nova
+log_dir=/var/log/nova
+
+# Use syslog for logging. Existing syslog format is DEPRECATED during I, and
+# will change in J to honor RFC5424. (boolean value)
+#use_syslog=false
+use_syslog=False
+
+# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled,
+# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The
+# format without the APP-NAME is deprecated in I, and will be removed in J.
+# (boolean value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+# Log output to standard error. (boolean value)
+#use_stderr=False
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context. (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+
+#
+# Options defined in nova.pci.request
+#
+
+# An alias for a PCI passthrough device requirement. This
+# allows users to specify the alias in the extra_spec for a
+# flavor, without needing to repeat all the PCI property
+# requirements. For example: pci_alias = { "name":
+# "QuicAssist", "product_id": "0443", "vendor_id": "8086",
+# "device_type": "ACCEL" } defines an alias for the Intel
+# QuickAssist card. (multi valued) (multi valued)
+#pci_alias=
+
+
+#
+# Options defined in nova.pci.whitelist
+#
+
+# White list of PCI devices available to VMs. For example:
+# pci_passthrough_whitelist = [{"vendor_id": "8086",
+# "product_id": "0443"}] (multi valued)
+#pci_passthrough_whitelist=
+
+
+#
+# Options defined in nova.scheduler.driver
+#
+
+# The scheduler host manager class to use (string value)
+#scheduler_host_manager=nova.scheduler.host_manager.HostManager
+
+
+#
+# Options defined in nova.scheduler.filter_scheduler
+#
+
+# New instances will be scheduled on a host chosen randomly
+# from a subset of the N best hosts. This property defines the
+# subset size that a host is chosen from. A value of 1 chooses
+# the first host returned by the weighing functions. This
+# value must be at least 1. Any value less than 1 will be
+# ignored, and 1 will be used instead (integer value)
+#scheduler_host_subset_size=1
+
+
+#
+# Options defined in nova.scheduler.filters.aggregate_image_properties_isolation
+#
+
+# Force the filter to consider only keys matching the given
+# namespace. (string value)
+#aggregate_image_properties_isolation_namespace=<None>
+
+# The separator used between the namespace and keys (string
+# value)
+#aggregate_image_properties_isolation_separator=.
+
+
+#
+# Options defined in nova.scheduler.filters.core_filter
+#
+
+# Virtual CPU to physical CPU allocation ratio which affects
+# all CPU filters. This configuration specifies a global ratio
+# for CoreFilter. For AggregateCoreFilter, it will fall back
+# to this configuration value if no per-aggregate setting
+# found. (floating point value)
+#cpu_allocation_ratio=16.0
+
+
+#
+# Options defined in nova.scheduler.filters.disk_filter
+#
+
+# Virtual disk to physical disk allocation ratio (floating
+# point value)
+#disk_allocation_ratio=1.0
+
+
+#
+# Options defined in nova.scheduler.filters.io_ops_filter
+#
+
+# Tells filters to ignore hosts that have this many or more
+# instances currently in build, resize, snapshot, migrate,
+# rescue or unshelve task states (integer value)
+#max_io_ops_per_host=8
+
+
+#
+# Options defined in nova.scheduler.filters.isolated_hosts_filter
+#
+
+# Images to run on isolated host (list value)
+#isolated_images=
+
+# Host reserved for specific images (list value)
+#isolated_hosts=
+
+# Whether to force isolated hosts to run only isolated images
+# (boolean value)
+#restrict_isolated_hosts_to_isolated_images=true
+
+
+#
+# Options defined in nova.scheduler.filters.num_instances_filter
+#
+
+# Ignore hosts that have too many instances (integer value)
+#max_instances_per_host=50
+
+
+#
+# Options defined in nova.scheduler.filters.ram_filter
+#
+
+# Virtual ram to physical ram allocation ratio which affects
+# all ram filters. This configuration specifies a global ratio
+# for RamFilter. For AggregateRamFilter, it will fall back to
+# this configuration value if no per-aggregate setting found.
+# (floating point value)
+#ram_allocation_ratio=1.5
+
+
+#
+# Options defined in nova.scheduler.host_manager
+#
+
+# Filter classes available to the scheduler which may be
+# specified more than once. An entry of
+# "nova.scheduler.filters.all_filters" maps to all filters
+# included with nova. (multi valued)
+#scheduler_available_filters=nova.scheduler.filters.all_filters
+
+# Which filter class names to use for filtering hosts when not
+# specified in the request. (list value)
+#scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
+
+# Which weight class names to use for weighing hosts (list
+# value)
+#scheduler_weight_classes=nova.scheduler.weights.all_weighers
+
+# Determines if the Scheduler tracks changes to instances to
+# help with its filtering decisions. (boolean value)
+#scheduler_tracks_instance_changes=true
+
+
+#
+# Options defined in nova.scheduler.ironic_host_manager
+#
+
+# Which filter class names to use for filtering baremetal
+# hosts when not specified in the request. (list value)
+#baremetal_scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
+
+# Flag to decide whether to use
+# baremetal_scheduler_default_filters or not. (boolean value)
+#scheduler_use_baremetal_filters=false
+
+
+#
+# Options defined in nova.scheduler.manager
+#
+
+# Default driver to use for the scheduler (string value)
+#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
+
+# How often (in seconds) to run periodic tasks in the
+# scheduler driver of your choice. Please note this is likely
+# to interact with the value of service_down_time, but exactly
+# how they interact will depend on your choice of scheduler
+# driver. (integer value)
+#scheduler_driver_task_period=60
+
+
+#
+# Options defined in nova.scheduler.rpcapi
+#
+
+# The topic scheduler nodes listen on (string value)
+#scheduler_topic=scheduler
+
+
+#
+# Options defined in nova.scheduler.scheduler_options
+#
+
+# Absolute path to scheduler configuration JSON file. (string
+# value)
+#scheduler_json_config_location=
+
+
+#
+# Options defined in nova.scheduler.utils
+#
+
+# Maximum number of attempts to schedule an instance (integer
+# value)
+#scheduler_max_attempts=3
+
+
+#
+# Options defined in nova.scheduler.weights.io_ops
+#
+
+# Multiplier used for weighing host io ops. Negative numbers
+# mean a preference to choose light workload compute hosts.
+# (floating point value)
+#io_ops_weight_multiplier=-1.0
+
+
+#
+# Options defined in nova.scheduler.weights.ram
+#
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=1.0
+
+
+#
+# Options defined in nova.servicegroup.api
+#
+
+# The driver for servicegroup service (valid options are: db,
+# zk, mc) (string value)
+#servicegroup_driver=db
+
+
+#
+# Options defined in nova.virt.configdrive
+#
+
+# Config drive format. One of iso9660 (default) or vfat
+# (string value)
+#config_drive_format=iso9660
+
+# Set to "always" to force injection to take place on a config
+# drive. NOTE: The "always" will be deprecated in the Liberty
+# release cycle. (string value)
+#force_config_drive=<None>
+
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#mkisofs_cmd=genisoimage
+
+
+#
+# Options defined in nova.virt.disk.api
+#
+
+# Name of the mkfs commands for ephemeral device. The format
+# is <os_type>=<mkfs command> (multi valued)
+#virt_mkfs=
+
+# Attempt to resize the filesystem by accessing the image over
+# a block device. This is done by the host and may not be
+# necessary if the image contains a recent version of cloud-
+# init. Possible mechanisms require the nbd driver (for qcow
+# and raw), or loop (for raw). (boolean value)
+#resize_fs_using_block_device=false
+
+
+#
+# Options defined in nova.virt.disk.mount.nbd
+#
+
+# Amount of time, in seconds, to wait for NBD device start up.
+# (integer value)
+#timeout_nbd=10
+
+
+#
+# Options defined in nova.virt.driver
+#
+
+# Driver to use for controlling virtualization. Options
+# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
+# fake.FakeDriver, baremetal.BareMetalDriver,
+# vmwareapi.VMwareVCDriver, hyperv.HyperVDriver (string value)
+#compute_driver=libvirt.LibvirtDriver
+compute_driver=libvirt.LibvirtDriver
+
+# The default format an ephemeral_volume will be formatted
+# with on creation. (string value)
+#default_ephemeral_format=<None>
+
+# VM image preallocation mode: "none" => no storage
+# provisioning is done up front, "space" => storage is fully
+# allocated at instance start (string value)
+#preallocate_images=none
+
+# Whether to use cow images (boolean value)
+#use_cow_images=true
+
+# Fail instance boot if vif plugging fails (boolean value)
+#vif_plugging_is_fatal=true
+vif_plugging_is_fatal=True
+
+# Number of seconds to wait for neutron vif plugging events to
+# arrive before continuing or failing (see
+# vif_plugging_is_fatal). If this is set to zero and
+# vif_plugging_is_fatal is False, events should not be
+# expected to arrive at all. (integer value)
+#vif_plugging_timeout=300
+vif_plugging_timeout=300
+
+
+#
+# Options defined in nova.virt.firewall
+#
+
+# Firewall driver (defaults to hypervisor specific iptables
+# driver) (string value)
+#firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+
+# Whether to allow network traffic from same network (boolean
+# value)
+#allow_same_net_traffic=true
+
+
+#
+# Options defined in nova.virt.hardware
+#
+
+# Defines which pcpus that instance vcpus can use. For
+# example, "4-12,^8,15" (string value)
+#vcpu_pin_set=<None>
+
+
+#
+# Options defined in nova.virt.imagecache
+#
+
+# Number of seconds to wait between runs of the image cache
+# manager. Set to -1 to disable. Setting this to 0 will run at
+# the default rate. (integer value)
+#image_cache_manager_interval=2400
+
+# Where cached images are stored under $instances_path. This
+# is NOT the full path - just a folder name. For per-compute-
+# host cached images, set to _base_$my_ip (string value)
+#image_cache_subdirectory_name=_base
+
+# Should unused base images be removed? (boolean value)
+#remove_unused_base_images=true
+
+# Unused unresized base images younger than this will not be
+# removed (integer value)
+#remove_unused_original_minimum_age_seconds=86400
+
+
+#
+# Options defined in nova.virt.images
+#
+
+# Force backing images to raw format (boolean value)
+#force_raw_images=true
+force_raw_images=True
+
+
+#
+# Options defined in nova.virt.netutils
+#
+
+# Template file for injected network (string value)
+#injected_network_template=/usr/share/nova/interfaces.template
+
+
+#
+# Options defined in nova.vnc
+#
+
+# Location of VNC console proxy, in the form
+# "http://127.0.0.1:6080/vnc_auto.html" (string value)
+#novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html
+novncproxy_base_url=https://85.190.177.90:6080/vnc_auto.html
+
+# Location of nova xvp VNC console proxy, in the form
+# "http://127.0.0.1:6081/console" (string value)
+#xvpvncproxy_base_url=http://127.0.0.1:6081/console
+
+# IP address on which instance vncservers should listen
+# (string value)
+#vncserver_listen=127.0.0.1
+vncserver_listen=0.0.0.0
+
+# The address to which proxy clients (like nova-xvpvncproxy)
+# should connect (string value)
+#vncserver_proxyclient_address=127.0.0.1
+vncserver_proxyclient_address=compute1.localdomain
+
+# Enable VNC related features (boolean value)
+#vnc_enabled=true
+vnc_enabled=True
+
+# Keymap for VNC (string value)
+#vnc_keymap=en-us
+vnc_keymap=en-us
+
+
+#
+# Options defined in nova.vnc.xvp_proxy
+#
+
+# Port that the XCP VNC proxy should bind to (integer value)
+#xvpvncproxy_port=6081
+
+# Address that the XCP VNC proxy should bind to (string value)
+#xvpvncproxy_host=0.0.0.0
+
+
+#
+# Options defined in nova.volume
+#
+
+# The full class name of the volume API class to use (string
+# value)
+#volume_api_class=nova.volume.cinder.API
+volume_api_class=nova.volume.cinder.API
+
+
+#
+# Options defined in nova.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in nova.openstack.common.memorycache
+#
+
+# Memcached servers or None for in process cache. (list value)
+#memcached_servers=<None>
+
+
+#
+# Options defined in nova.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
+# Options defined in nova.openstack.common.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+# Directories where policy configuration files are stored.
+# They can be relative to any directory in the search path
+# defined by the config_dir option, or absolute paths. The
+# file defined by policy_file must exist for these directories
+# to be searched. Missing or empty directories are ignored.
+# (multi valued)
+#policy_dirs=policy.d
+
+
+#
+# Options defined in nova.openstack.common.versionutils
+#
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+amqp_durable_queues=False
+rabbit_hosts=192.168.0.10:5672
+rabbit_use_ssl=False
+rabbit_userid=guest
+rabbit_ha_queues=False
+rabbit_password=guest
+rabbit_host=192.168.0.10
+sql_connection=mysql://nova@192.168.0.10/nova
+rabbit_virtual_host=/
+image_service=nova.image.glance.GlanceImageService
+rabbit_port=5672
+lock_path=/var/lib/nova/tmp
+
+
+[api_database]
+
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+
+# The SQLAlchemy connection string to use to connect to the
+# Nova API database. (string value)
+#connection=mysql://nova:nova@localhost/nova
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+#idle_timeout=3600
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+#max_pool_size=<None>
+
+# Maximum number of database connection retries during
+# startup. Set to -1 to specify an infinite retry count.
+# (integer value)
+#max_retries=-1
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+#pool_timeout=<None>
+
+
+[barbican]
+
+#
+# Options defined in nova.keymgr.barbican
+#
+
+# Info to match when looking for barbican in the service
+# catalog. Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#catalog_info=key-manager:barbican:public
+
+# Override service catalog lookup with template for barbican
+# endpoint e.g. http://localhost:9311/v1/%(project_id)s
+# (string value)
+#endpoint_template=<None>
+
+# Region name of this node (string value)
+#os_region_name=<None>
+
+
+#
+# Options defined in nova.volume.cinder
+#
+
+# Region name of this node (string value)
+#os_region_name=<None>
+
+
+[cells]
+
+#
+# Options defined in nova.cells.manager
+#
+
+# Cells communication driver to use (string value)
+#driver=nova.cells.rpc_driver.CellsRPCDriver
+
+# Number of seconds after an instance was updated or deleted
+# to continue to update cells (integer value)
+#instance_updated_at_threshold=3600
+
+# Number of instances to update per periodic task run (integer
+# value)
+#instance_update_num_instances=1
+
+
+#
+# Options defined in nova.cells.messaging
+#
+
+# Maximum number of hops for cells routing. (integer value)
+#max_hop_count=10
+
+# Cells scheduler to use (string value)
+#scheduler=nova.cells.scheduler.CellsScheduler
+
+
+#
+# Options defined in nova.cells.opts
+#
+
+# Enable cell functionality (boolean value)
+#enable=false
+
+# The topic cells nodes listen on (string value)
+#topic=cells
+
+# Manager for cells (string value)
+#manager=nova.cells.manager.CellsManager
+
+# Name of this cell (string value)
+#name=nova
+
+# Key/Multi-value list with the capabilities of the cell (list
+# value)
+#capabilities=hypervisor=xenserver;kvm,os=linux;windows
+
+# Seconds to wait for response from a call to a cell. (integer
+# value)
+#call_timeout=60
+
+# Percentage of cell capacity to hold in reserve. Affects both
+# memory and disk utilization (floating point value)
+#reserve_percent=10.0
+
+# Type of cell: api or compute (string value)
+#cell_type=compute
+
+# Number of seconds after which a lack of capability and
+# capacity updates signals the child cell is to be treated as
+# a mute. (integer value)
+#mute_child_interval=300
+
+# Seconds between bandwidth updates for cells. (integer value)
+#bandwidth_update_interval=600
+
+
+#
+# Options defined in nova.cells.rpc_driver
+#
+
+# Base queue name to use when communicating between cells.
+# Various topics by message type will be appended to this.
+# (string value)
+#rpc_driver_queue_base=cells.intercell
+
+
+#
+# Options defined in nova.cells.scheduler
+#
+
+# Filter classes the cells scheduler should use. An entry of
+# "nova.cells.filters.all_filters" maps to all cells filters
+# included with nova. (list value)
+#scheduler_filter_classes=nova.cells.filters.all_filters
+
+# Weigher classes the cells scheduler should use. An entry of
+# "nova.cells.weights.all_weighers" maps to all cell weighers
+# included with nova. (list value)
+#scheduler_weight_classes=nova.cells.weights.all_weighers
+
+# How many retries when no cells are available. (integer
+# value)
+#scheduler_retries=10
+
+# How often to retry in seconds when no cells are available.
+# (integer value)
+#scheduler_retry_delay=2
+
+
+#
+# Options defined in nova.cells.state
+#
+
+# Interval, in seconds, for getting fresh cell information
+# from the database. (integer value)
+#db_check_interval=60
+
+# Configuration file from which to read cells configuration.
+# If given, overrides reading cells from the database. (string
+# value)
+#cells_config=<None>
+
+
+#
+# Options defined in nova.cells.weights.mute_child
+#
+
+# Multiplier used to weigh mute children. (The value should be
+# negative.) (floating point value)
+#mute_weight_multiplier=-10.0
+
+# Weight value assigned to mute children. (The value should be
+# positive.) (floating point value)
+#mute_weight_value=1000.0
+
+
+#
+# Options defined in nova.cells.weights.ram_by_instance_type
+#
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=10.0
+
+
+#
+# Options defined in nova.cells.weights.weight_offset
+#
+
+# Multiplier used to weigh offset weigher. (floating point
+# value)
+#offset_weight_multiplier=1.0
+
+
+[cinder]
+
+#
+# Options defined in nova.volume.cinder
+#
+
+# Info to match when looking for cinder in the service
+# catalog. Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#catalog_info=volumev2:cinderv2:publicURL
+
+# Override service catalog lookup with template for cinder
+# endpoint e.g. http://localhost:8776/v1/%(project_id)s
+# (string value)
+#endpoint_template=<None>
+
+# Number of cinderclient retries on failed http calls (integer
+# value)
+#http_retries=3
+
+# Allow attach between instance and volume in different
+# availability zones. (boolean value)
+#cross_az_attach=true
+
+
+[conductor]
+
+#
+# Options defined in nova.conductor.api
+#
+
+# Perform nova-conductor operations locally (boolean value)
+#use_local=false
+
+# The topic on which conductor nodes listen (string value)
+#topic=conductor
+
+# Full class name for the Manager for conductor (string value)
+#manager=nova.conductor.manager.ConductorManager
+
+# Number of workers for OpenStack Conductor service. The
+# default will be the number of CPUs available. (integer
+# value)
+#workers=<None>
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group;name - DEFAULT;sqlite_db
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group;name - DEFAULT;sqlite_synchronous
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group;name - DEFAULT;db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group;name - DEFAULT;sql_connection
+# Deprecated group;name - [DATABASE]/sql_connection
+# Deprecated group;name - [sql]/connection
+#connection=<None>
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group;name - DEFAULT;sql_idle_timeout
+# Deprecated group;name - [DATABASE]/sql_idle_timeout
+# Deprecated group;name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group;name - DEFAULT;sql_min_pool_size
+# Deprecated group;name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group;name - DEFAULT;sql_max_pool_size
+# Deprecated group;name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group;name - DEFAULT;sql_max_retries
+# Deprecated group;name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group;name - DEFAULT;sql_retry_interval
+# Deprecated group;name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group;name - DEFAULT;sql_max_overflow
+# Deprecated group;name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Deprecated group;name - DEFAULT;sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group;name - DEFAULT;sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group;name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect=false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval=10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries=20
+
+
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+
+[ephemeral_storage_encryption]
+
+#
+# Options defined in nova.compute.api
+#
+
+# Whether to encrypt ephemeral storage (boolean value)
+#enabled=false
+
+# The cipher and mode to be used to encrypt ephemeral storage.
+# Which ciphers are available ciphers depends on kernel
+# support. See /proc/crypto for the list of available options.
+# (string value)
+#cipher=aes-xts-plain64
+
+# The bit length of the encryption key to be used to encrypt
+# ephemeral storage (in XTS mode only half of the bits are
+# used for encryption key) (integer value)
+#key_size=512
+
+
+[glance]
+
+#
+# Options defined in nova.image.glance
+#
+
+# Default glance hostname or IP address (string value)
+#host=$my_ip
+
+# Default glance port (integer value)
+#port=9292
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#protocol=http
+
+# A list of the glance api servers available to nova. Prefix
+# with https:// for ssl-based glance api servers.
+# ([hostname|ip]:port) (list value)
+#api_servers=<None>
+api_servers=192.168.0.10:9292
+
+# Allow to perform insecure SSL (https) requests to glance
+# (boolean value)
+#api_insecure=false
+
+# Number of retries when uploading / downloading an image to /
+# from glance. (integer value)
+#num_retries=0
+
+# A list of url scheme that can be downloaded directly via the
+# direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+
+[guestfs]
+
+#
+# Options defined in nova.virt.disk.vfs.guestfs
+#
+
+# Enable guestfs debug (boolean value)
+#debug=false
+
+
+[hyperv]
+
+#
+# Options defined in nova.virt.hyperv.pathutils
+#
+
+# The name of a Windows share name mapped to the
+# "instances_path" dir and used by the resize feature to copy
+# files to the target host. If left blank, an administrative
+# share will be used, looking for the same "instances_path"
+# used locally (string value)
+#instances_path_share=
+
+
+#
+# Options defined in nova.virt.hyperv.utilsfactory
+#
+
+# Force V1 WMI utility classes (boolean value)
+#force_hyperv_utils_v1=false
+
+# Force V1 volume utility class (boolean value)
+#force_volumeutils_v1=false
+
+
+#
+# Options defined in nova.virt.hyperv.vif
+#
+
+# External virtual switch Name, if not provided, the first
+# external virtual switch is used (string value)
+#vswitch_name=<None>
+
+
+#
+# Options defined in nova.virt.hyperv.vmops
+#
+
+# Required for live migration among hosts with different CPU
+# features (boolean value)
+#limit_cpu_features=false
+
+# Sets the admin password in the config drive image (boolean
+# value)
+#config_drive_inject_password=false
+
+# Path of qemu-img command which is used to convert between
+# different image types (string value)
+#qemu_img_cmd=qemu-img.exe
+
+# Attaches the Config Drive image as a cdrom drive instead of
+# a disk drive (boolean value)
+#config_drive_cdrom=false
+
+# Enables metrics collections for an instance by using
+# Hyper-V's metric APIs. Collected data can by retrieved by
+# other apps and services, e.g.: Ceilometer. Requires Hyper-V
+# / Windows Server 2012 and above (boolean value)
+#enable_instance_metrics_collection=false
+
+# Enables dynamic memory allocation (ballooning) when set to a
+# value greater than 1. The value expresses the ratio between
+# the total RAM assigned to an instance and its startup RAM
+# amount. For example a ratio of 2.0 for an instance with
+# 1024MB of RAM implies 512MB of RAM allocated at startup
+# (floating point value)
+#dynamic_memory_ratio=1.0
+
+# Number of seconds to wait for instance to shut down after
+# soft reboot request is made. We fall back to hard reboot if
+# instance does not shutdown within this window. (integer
+# value)
+#wait_soft_reboot_seconds=60
+
+
+#
+# Options defined in nova.virt.hyperv.volumeops
+#
+
+# The number of times to retry to attach a volume (integer
+# value)
+#volume_attach_retry_count=10
+
+# Interval between volume attachment attempts, in seconds
+# (integer value)
+#volume_attach_retry_interval=5
+
+# The number of times to retry checking for a disk mounted via
+# iSCSI. (integer value)
+#mounted_disk_query_retry_count=10
+
+# Interval between checks for a mounted iSCSI disk, in
+# seconds. (integer value)
+#mounted_disk_query_retry_interval=5
+
+
+[image_file_url]
+
+#
+# Options defined in nova.image.download.file
+#
+
+# List of file systems that are configured in this file in the
+# image_file_url:<list entry name> sections (list value)
+#filesystems=
+
+
+[ironic]
+
+#
+# Options defined in nova.virt.ironic.driver
+#
+
+# Version of Ironic API service endpoint. (integer value)
+#api_version=1
+
+# URL for Ironic API endpoint. (string value)
+#api_endpoint=<None>
+
+# Ironic keystone admin name (string value)
+#admin_username=<None>
+
+# Ironic keystone admin password. (string value)
+#admin_password=%SERVICE_PASSWORD%
+
+# Ironic keystone auth token. (string value)
+#admin_auth_token=<None>
+
+# Keystone public API endpoint. (string value)
+#admin_url=<None>
+
+# Log level override for ironicclient. Set this in order to
+# override the global "default_log_levels", "verbose", and
+# "debug" settings. DEPRECATED: use standard logging
+# configuration. (string value)
+#client_log_level=<None>
+
+# Ironic keystone tenant name. (string value)
+#admin_tenant_name=%SERVICE_TENANT_NAME%
+
+# How many retries when a request does conflict. (integer
+# value)
+#api_max_retries=60
+
+# How often to retry in seconds when a request does conflict
+# (integer value)
+#api_retry_interval=2
+
+
+[keymgr]
+
+#
+# Options defined in nova.keymgr
+#
+
+# The full class name of the key manager API class (string
+# value)
+#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager
+
+
+#
+# Options defined in nova.keymgr.conf_key_mgr
+#
+
+# Fixed key returned by key manager, specified in hex (string
+# value)
+#fixed_key=<None>
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+#auth_uri=<None>
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version=v2.0
+
+# Do not handle authorization requests within the middleware, but delegate the
+# authorization decision to downstream WSGI components. (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API server. (integer
+# value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating with Identity
+# API Server. (integer value)
+#http_request_max_retries=3
+
+# Env key for the swift cache. (string value)
+#cache=<None>
+
+# Required if identity server requires client certificate (string value)
+#certfile=<None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens. (string value)
+#signing_dir=<None>
+
+# Optionally specify a list of memcached server(s) to use for caching. If left
+# undefined, tokens will instead be cached in-process. (list value)
+# Deprecated group;name - DEFAULT;memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive effort spent validating tokens, the middleware
+# caches previously-seen tokens for a configurable duration (in seconds). Set
+# to -1 to disable caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens is retrieved
+# from the Identity service (in seconds). A high number of revocation events
+# combined with a low cache duration may significantly reduce performance.
+# (integer value)
+#revocation_cache_time=10
+
+# (Optional) If defined, indicate whether token data should be authenticated or
+# authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC,
+# token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data
+# is encrypted and authenticated in the cache. If the value is not one of these
+# options or empty, auth_token will raise an exception on initialization.
+# (string value)
+#memcache_security_strategy=<None>
+
+# (Optional, mandatory if memcache_security_strategy is defined) This string is
+# used for key derivation. (string value)
+#memcache_secret_key=<None>
+
+# (Optional) Number of seconds memcached server is considered dead before it is
+# tried again. (integer value)
+#memcache_pool_dead_retry=300
+
+# (Optional) Maximum total number of open connections to every memcached
+# server. (integer value)
+#memcache_pool_maxsize=10
+
+# (Optional) Socket timeout in seconds for communicating with a memcache
+# server. (integer value)
+#memcache_pool_socket_timeout=3
+
+# (Optional) Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (integer value)
+#memcache_pool_unused_timeout=60
+
+# (Optional) Number of seconds that an operation will wait to get a memcache
+# client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout=10
+
+# (Optional) Use the advanced (eventlet safe) memcache client pool. The
+# advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool=false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
+# middleware will not ask for service catalog on token validation and will not
+# set the X-Service-Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be set to: "disabled"
+# to not check token binding. "permissive" (default) to validate binding
+# information if the bind type is of a form known to the server and ignore it
+# if not. "strict" like "permissive" but if the bind type is unknown the token
+# will be rejected. "required" any form of token binding is needed to be
+# allowed. Finally the name of a binding method that must be present in tokens.
+# (string value)
+#enforce_token_bind=permissive
+
+# If true, the revocation list will be checked for cached tokens. This requires
+# that PKI tokens are configured on the identity server. (boolean value)
+#check_revocations_for_cached=false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
+# or multiple. The algorithms are those supported by Python standard
+# hashlib.new(). The hashes will be tried in the order given, so put the
+# preferred one first for performance. The result of the first hash will be
+# stored in the cache. This will typically be set to multiple values only while
+# migrating from a less secure algorithm to a more secure one. Once all the old
+# tokens are expired this option should be set to a single value for better
+# performance. (list value)
+#hash_algorithms=md5
+
+# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
+# (string value)
+#auth_admin_prefix =
+
+# Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
+# (string value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use identity_uri.
+# (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https). Deprecated, use
+# identity_uri. (string value)
+#auth_protocol=http
+
+# Complete admin Identity API endpoint. This should specify the unversioned
+# root endpoint e.g. https://localhost:35357/ (string value)
+#identity_uri=<None>
+
+# This option is deprecated and may be removed in a future release. Single
+# shared secret with the Keystone configuration used for bootstrapping a
+# Keystone installation, or otherwise bypassing the normal authentication
+# process. This option should not be used, use `admin_user` and
+# `admin_password` instead. (string value)
+#admin_token=<None>
+
+# Service username. (string value)
+#admin_user=%SERVICE_USER%
+
+# Service user password. (string value)
+#admin_password=<None>
+
+# Service tenant name. (string value)
+#admin_tenant_name=admin
+
+
+[libvirt]
+
+#
+# Options defined in nova.virt.libvirt.driver
+#
+
+# Rescue ami image. This will not be used if an image id is
+# provided by the user. (string value)
+#rescue_image_id=<None>
+
+# Rescue aki image (string value)
+#rescue_kernel_id=<None>
+
+# Rescue ari image (string value)
+#rescue_ramdisk_id=<None>
+
+# Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
+# xen and parallels) (string value)
+#virt_type=kvm
+virt_type=qemu
+
+# Override the default libvirt URI (which is dependent on
+# virt_type) (string value)
+#connection_uri=
+
+# Inject the admin password at boot time, without an agent.
+# (boolean value)
+#inject_password=false
+inject_password=False
+
+# Inject the ssh public key at boot time (boolean value)
+#inject_key=false
+inject_key=False
+
+# The partition to inject to : -2 => disable, -1 => inspect
+# (libguestfs only), 0 => not partitioned, >0 => partition
+# number (integer value)
+#inject_partition=-2
+inject_partition=-1
+
+# Sync virtual and real mouse cursors in Windows VMs (boolean
+# value)
+#use_usb_tablet=true
+
+# Migration target URI (any included "%s" is replaced with the
+# migration target hostname) (string value)
+#live_migration_uri=qemu+tcp://%s/system
+live_migration_uri=qemu+tcp://nova@%s/system
+
+# Migration flags to be set for live migration (string value)
+#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED
+
+# Migration flags to be set for block migration (string value)
+#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_INC
+
+# Maximum bandwidth to be used during migration, in Mbps
+# (integer value)
+#live_migration_bandwidth=0
+
+# Snapshot image format (valid options are : raw, qcow2, vmdk,
+# vdi). Defaults to same as source image (string value)
+#snapshot_image_format=<None>
+
+# Override the default disk prefix for the devices attached to
+# a server, which is dependent on virt_type. (valid options
+# are: sd, xvd, uvd, vd) (string value)
+#disk_prefix=<None>
+
+# Number of seconds to wait for instance to shut down after
+# soft reboot request is made. We fall back to hard reboot if
+# instance does not shutdown within this window. (integer
+# value)
+#wait_soft_reboot_seconds=120
+
+# Set to "host-model" to clone the host CPU feature flags; to
+# "host-passthrough" to use the host CPU model exactly; to
+# "custom" to use a named CPU model; to "none" to not set any
+# CPU model. If virt_type="kvm|qemu", it will default to
+# "host-model", otherwise it will default to "none" (string
+# value)
+#cpu_mode=<None>
+cpu_mode=none
+
+# Set to a named libvirt CPU model (see names listed in
+# /usr/share/libvirt/cpu_map.xml). Only has effect if
+# cpu_mode="custom" and virt_type="kvm|qemu" (string value)
+#cpu_model=<None>
+
+# Location where libvirt driver will store snapshots before
+# uploading them to image service (string value)
+#snapshots_directory=$instances_path/snapshots
+
+# Location where the Xen hvmloader is kept (string value)
+#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader
+
+# Specific cachemodes to use for different disk types e.g:
+# file=directsync,block=none (list value)
+#disk_cachemodes=
+
+# A path to a device that will be used as source of entropy on
+# the host. Permitted options are: /dev/random or /dev/hwrng
+# (string value)
+#rng_dev_path=<None>
+
+# For qemu or KVM guests, set this option to specify a default
+# machine type per host architecture. You can find a list of
+# supported machine types in your environment by checking the
+# output of the "virsh capabilities"command. The format of the
+# value for this config option is host-arch=machine-type. For
+# example: x86_64=machinetype1,armv7l=machinetype2 (list
+# value)
+#hw_machine_type=<None>
+
+# The data source used to the populate the host "serial" UUID
+# exposed to guest in the virtual BIOS. Permitted options are
+# "hardware", "os", "none" or "auto" (default). (string value)
+#sysinfo_serial=auto
+
+# A number of seconds to memory usage statistics period. Zero
+# or negative value mean to disable memory usage statistics.
+# (integer value)
+#mem_stats_period_seconds=10
+
+# List of uid targets and ranges.Syntax is guest-uid:host-
+# uid:countMaximum of 5 allowed. (list value)
+#uid_maps=
+
+# List of guid targets and ranges.Syntax is guest-gid:host-
+# gid:countMaximum of 5 allowed. (list value)
+#gid_maps=
+
+
+#
+# Options defined in nova.virt.libvirt.imagebackend
+#
+
+# VM Images format. Acceptable values are: raw, qcow2, lvm,
+# rbd, default. If default is specified, then use_cow_images
+# flag is used instead of this one. (string value)
+#images_type=default
+
+# LVM Volume Group that is used for VM images, when you
+# specify images_type=lvm. (string value)
+#images_volume_group=<None>
+
+# Create sparse logical volumes (with virtualsize) if this
+# flag is set to True. (boolean value)
+#sparse_logical_volumes=false
+
+# The RADOS pool in which rbd volumes are stored (string
+# value)
+#images_rbd_pool=rbd
+
+# Path to the ceph configuration file to use (string value)
+#images_rbd_ceph_conf=
+
+# Discard option for nova managed disks (valid options are:
+# ignore, unmap). Need Libvirt(1.0.6) Qemu1.5 (raw format)
+# Qemu1.6(qcow2 format) (string value)
+#hw_disk_discard=<None>
+
+
+#
+# Options defined in nova.virt.libvirt.imagecache
+#
+
+# Allows image information files to be stored in non-standard
+# locations (string value)
+#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info
+
+# Should unused kernel images be removed? This is only safe to
+# enable if all compute nodes have been updated to support
+# this option. This will be enabled by default in future.
+# (boolean value)
+#remove_unused_kernels=false
+
+# Unused resized base images younger than this will not be
+# removed (integer value)
+#remove_unused_resized_minimum_age_seconds=3600
+
+# Write a checksum for files in _base to disk (boolean value)
+#checksum_base_images=false
+
+# How frequently to checksum base images (integer value)
+#checksum_interval_seconds=3600
+
+
+#
+# Options defined in nova.virt.libvirt.lvm
+#
+
+# Method used to wipe old volumes (valid options are: none,
+# zero, shred) (string value)
+#volume_clear=zero
+
+# Size in MiB to wipe at start of old volumes. 0 => all
+# (integer value)
+#volume_clear_size=0
+
+
+#
+# Options defined in nova.virt.libvirt.utils
+#
+
+# Compress snapshot images when possible. This currently
+# applies exclusively to qcow2 images (boolean value)
+#snapshot_compression=false
+
+
+#
+# Options defined in nova.virt.libvirt.vif
+#
+
+# Use virtio for bridge interfaces with KVM/QEMU (boolean
+# value)
+#use_virtio_for_bridges=true
+
+
+#
+# Options defined in nova.virt.libvirt.volume
+#
+
+# Number of times to rescan iSCSI target to find volume
+# (integer value)
+#num_iscsi_scan_tries=5
+
+# Number of times to rescan iSER target to find volume
+# (integer value)
+#num_iser_scan_tries=5
+
+# The RADOS client name for accessing rbd volumes (string
+# value)
+#rbd_user=<None>
+
+# The libvirt UUID of the secret for the rbd_uservolumes
+# (string value)
+#rbd_secret_uuid=<None>
+
+# Directory where the NFS volume is mounted on the compute
+# node (string value)
+#nfs_mount_point_base=$state_path/mnt
+
+# Mount options passed to the NFS client. See section of the
+# nfs man page for details (string value)
+#nfs_mount_options=<None>
+
+# Directory where the SMBFS shares are mounted on the compute
+# node (string value)
+#smbfs_mount_point_base=$state_path/mnt
+
+# Mount options passed to the SMBFS client. See mount.cifs man
+# page for details. Note that the libvirt-qemu uid and gid
+# must be specified. (string value)
+#smbfs_mount_options=
+
+# Number of times to rediscover AoE target to find volume
+# (integer value)
+#num_aoe_discover_tries=3
+
+# Directory where the glusterfs volume is mounted on the
+# compute node (string value)
+#glusterfs_mount_point_base=$state_path/mnt
+
+# Use multipath connection of the iSCSI volume (boolean value)
+#iscsi_use_multipath=false
+
+# Use multipath connection of the iSER volume (boolean value)
+#iser_use_multipath=false
+
+# Path or URL to Scality SOFS configuration file (string
+# value)
+#scality_sofs_config=<None>
+
+# Base dir where Scality SOFS shall be mounted (string value)
+#scality_sofs_mount_point=$state_path/scality
+
+# Protocols listed here will be accessed directly from QEMU.
+# Currently supported protocols: [gluster] (list value)
+#qemu_allowed_storage_drivers=
+
+# Directory where the Quobyte volume is mounted on the compute
+# node (string value)
+#quobyte_mount_point_base=$state_path/mnt
+
+# Path to a Quobyte Client configuration file. (string value)
+#quobyte_client_cfg=<None>
+
+# The iSCSI transport iface to use to connect to target in
+# case offload support is desired. Supported transports are
+# be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx and ocs. Default
+# format is transport_name.hwaddress and can be generated
+# manually or via iscsiadm -m iface (string value)
+# Deprecated group;name - DEFAULT;iscsi_transport
+#iscsi_iface=<None>
+vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
+
+
+[metrics]
+
+#
+# Options defined in nova.scheduler.weights.metrics
+#
+
+# Multiplier used for weighing metrics. (floating point value)
+#weight_multiplier=1.0
+
+# How the metrics are going to be weighed. This should be in
+# the form of "<name1>=<ratio1>, <name2>=<ratio2>, ...", where
+# <nameX> is one of the metrics to be weighed, and <ratioX> is
+# the corresponding ratio. So for "name1=1.0, name2=-1.0" The
+# final weight would be name1.value * 1.0 + name2.value *
+# -1.0. (list value)
+#weight_setting=
+
+# How to treat the unavailable metrics. When a metric is NOT
+# available for a host, if it is set to be True, it would
+# raise an exception, so it is recommended to use the
+# scheduler filter MetricFilter to filter out those hosts. If
+# it is set to be False, the unavailable metric would be
+# treated as a negative factor in weighing process, the
+# returned value would be set by the option
+# weight_of_unavailable. (boolean value)
+#required=true
+
+# The final weight value to be returned if required is set to
+# False and any one of the metrics set by weight_setting is
+# unavailable. (floating point value)
+#weight_of_unavailable=-10000.0
+
+
+[neutron]
+
+#
+# Options defined in nova.api.metadata.handler
+#
+
+# Set flag to indicate Neutron will proxy metadata requests
+# and resolve instance ids. (boolean value)
+#service_metadata_proxy=false
+
+# Shared secret to validate proxies Neutron metadata requests
+# (string value)
+#metadata_proxy_shared_secret=
+
+
+#
+# Options defined in nova.network.neutronv2.api
+#
+
+# URL for connecting to neutron (string value)
+#url=http://127.0.0.1:9696
+url=http://192.168.0.10:9696
+
+# User id for connecting to neutron in admin context.
+# DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_user_id=<None>
+
+# Username for connecting to neutron in admin context
+# DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_username=<None>
+admin_username=neutron
+
+# Password for connecting to neutron in admin context
+# DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_password=<None>
+admin_password=b80e24e5e8fe4939
+
+# Tenant id for connecting to neutron in admin context
+# DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_tenant_id=<None>
+
+# Tenant name for connecting to neutron in admin context. This
+# option will be ignored if neutron_admin_tenant_id is set.
+# Note that with Keystone V3 tenant names are only unique
+# within a domain. DEPRECATED: specify an auth_plugin and
+# appropriate credentials instead. (string value)
+#admin_tenant_name=<None>
+admin_tenant_name=services
+
+# Region name for connecting to neutron in admin context
+# (string value)
+#region_name=<None>
+region_name=RegionOne
+
+# Authorization URL for connecting to neutron in admin
+# context. DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_auth_url=http://localhost:5000/v2.0
+admin_auth_url=http://192.168.0.10:35357/v2.0
+
+# Authorization strategy for connecting to neutron in admin
+# context. DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. If an auth_plugin is specified strategy
+# will be ignored. (string value)
+#auth_strategy=keystone
+auth_strategy=keystone
+
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#ovs_bridge=br-int
+ovs_bridge=br-int
+
+# Number of seconds before querying neutron for extensions
+# (integer value)
+#extension_sync_interval=600
+extension_sync_interval=600
+
+# DEPRECATED: Allow an instance to have multiple vNICs
+# attached to the same Neutron network. This option is
+# deprecated in the 2015.1 release and will be removed in the
+# 2015.2 release where the default behavior will be to always
+# allow multiple ports from the same network to be attached to
+# an instance. (boolean value)
+#allow_duplicate_networks=false
+url_timeout=30
+default_tenant_id=default
+
+
+[osapi_v3]
+
+#
+# Options defined in nova.api.openstack
+#
+
+# Whether the V3 API is enabled or not (boolean value)
+#enabled=false
+
+# A list of v3 API extensions to never load. Specify the
+# extension aliases here. (list value)
+#extensions_blacklist=
+
+# If the list is not empty then a v3 API extension will only
+# be loaded if it exists in this list. Specify the extension
+# aliases here. (list value)
+#extensions_whitelist=
+
+
+[rdp]
+
+#
+# Options defined in nova.rdp
+#
+
+# Location of RDP html5 console proxy, in the form
+# "http://127.0.0.1:6083/" (string value)
+#html5_proxy_base_url=http://127.0.0.1:6083/
+
+# Enable RDP related features (boolean value)
+#enabled=false
+
+
+[serial_console]
+
+#
+# Options defined in nova.cmd.serialproxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#serialproxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#serialproxy_port=6083
+
+
+#
+# Options defined in nova.console.serial
+#
+
+# Enable serial console related features (boolean value)
+#enabled=false
+
+# Range of TCP ports to use for serial ports on compute hosts
+# (string value)
+#port_range=10000:20000
+
+# Location of serial console proxy. (string value)
+#base_url=ws://127.0.0.1:6083/
+
+# IP address on which instance serial console should listen
+# (string value)
+#listen=127.0.0.1
+
+# The address to which proxy clients (like nova-serialproxy)
+# should connect (string value)
+#proxyclient_address=127.0.0.1
+
+
+[spice]
+
+#
+# Options defined in nova.cmd.spicehtml5proxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#html5proxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#html5proxy_port=6082
+
+
+#
+# Options defined in nova.spice
+#
+
+# Location of spice HTML5 console proxy, in the form
+# "http://127.0.0.1:6082/spice_auto.html" (string value)
+#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html
+
+# IP address on which instance spice server should listen
+# (string value)
+#server_listen=127.0.0.1
+
+# The address to which proxy clients (like nova-
+# spicehtml5proxy) should connect (string value)
+#server_proxyclient_address=127.0.0.1
+
+# Enable spice related features (boolean value)
+#enabled=false
+
+# Enable spice guest agent support (boolean value)
+#agent_enabled=true
+
+# Keymap for spice (string value)
+#keymap=en-us
+
+
+[ssl]
+
+#
+# Options defined in nova.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients.
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely.
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely.
+# (string value)
+#key_file=<None>
+
+
+[trusted_computing]
+
+#
+# Options defined in nova.scheduler.filters.trusted_filter
+#
+
+# Attestation server HTTP (string value)
+#attestation_server=<None>
+
+# Attestation server Cert file for Identity verification
+# (string value)
+#attestation_server_ca_file=<None>
+
+# Attestation server port (string value)
+#attestation_port=8443
+
+# Attestation web API URL (string value)
+#attestation_api_url=/OpenAttestationWebServices/V1.0
+
+# Attestation authorization blob - must change (string value)
+#attestation_auth_blob=<None>
+
+# Attestation status cache valid period length (integer value)
+#attestation_auth_timeout=60
+
+# Disable SSL cert verification for Attestation service
+# (boolean value)
+#attestation_insecure_ssl=false
+
+
+[upgrade_levels]
+
+#
+# Options defined in nova.baserpc
+#
+
+# Set a version cap for messages sent to the base api in any
+# service (string value)
+#baseapi=<None>
+
+
+#
+# Options defined in nova.cells.rpc_driver
+#
+
+# Set a version cap for messages sent between cells services
+# (string value)
+#intercell=<None>
+
+
+#
+# Options defined in nova.cells.rpcapi
+#
+
+# Set a version cap for messages sent to local cells services
+# (string value)
+#cells=<None>
+
+
+#
+# Options defined in nova.cert.rpcapi
+#
+
+# Set a version cap for messages sent to cert services (string
+# value)
+#cert=<None>
+
+
+#
+# Options defined in nova.compute.rpcapi
+#
+
+# Set a version cap for messages sent to compute services. If
+# you plan to do a live upgrade from havana to icehouse, you
+# should set this option to "icehouse-compat" before beginning
+# the live upgrade procedure. (string value)
+#compute=<None>
+
+
+#
+# Options defined in nova.conductor.rpcapi
+#
+
+# Set a version cap for messages sent to conductor services
+# (string value)
+#conductor=<None>
+
+
+#
+# Options defined in nova.console.rpcapi
+#
+
+# Set a version cap for messages sent to console services
+# (string value)
+#console=<None>
+
+
+#
+# Options defined in nova.consoleauth.rpcapi
+#
+
+# Set a version cap for messages sent to consoleauth services
+# (string value)
+#consoleauth=<None>
+
+
+#
+# Options defined in nova.network.rpcapi
+#
+
+# Set a version cap for messages sent to network services
+# (string value)
+#network=<None>
+
+
+#
+# Options defined in nova.scheduler.rpcapi
+#
+
+# Set a version cap for messages sent to scheduler services
+# (string value)
+#scheduler=<None>
+
+
+[vmware]
+
+#
+# Options defined in nova.virt.vmwareapi.driver
+#
+
+# The PBM status. (boolean value)
+#pbm_enabled=false
+
+# PBM service WSDL file location URL. e.g.
+# file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this
+# will disable storage policy based placement of instances.
+# (string value)
+#pbm_wsdl_location=<None>
+
+# The PBM default policy. If pbm_wsdl_location is set and
+# there is no defined storage policy for the specific request
+# then this policy will be used. (string value)
+#pbm_default_policy=<None>
+
+# Hostname or IP address for connection to VMware VC host.
+# (string value)
+#host_ip=<None>
+
+# Port for connection to VMware VC host. (integer value)
+#host_port=443
+
+# Username for connection to VMware VC host. (string value)
+#host_username=<None>
+
+# Password for connection to VMware VC host. (string value)
+#host_password=<None>
+
+# Name of a VMware Cluster ComputeResource. (multi valued)
+#cluster_name=<None>
+
+# Regex to match the name of a datastore. (string value)
+#datastore_regex=<None>
+
+# The interval used for polling of remote tasks. (floating
+# point value)
+#task_poll_interval=0.5
+
+# The number of times we retry on failures, e.g., socket
+# error, etc. (integer value)
+#api_retry_count=10
+
+# VNC starting port (integer value)
+#vnc_port=5900
+
+# Total number of VNC ports (integer value)
+#vnc_port_total=10000
+
+# Whether to use linked clone (boolean value)
+#use_linked_clone=true
+
+# Optional VIM Service WSDL Location e.g
+# http://<server>/vimService.wsdl. Optional over-ride to
+# default location for bug work-arounds (string value)
+#wsdl_location=<None>
+
+
+#
+# Options defined in nova.virt.vmwareapi.vif
+#
+
+# Physical ethernet adapter name for vlan networking (string
+# value)
+#vlan_interface=vmnic0
+
+# Name of Integration Bridge (string value)
+#integration_bridge=br-int
+
+
+#
+# Options defined in nova.virt.vmwareapi.vim_util
+#
+
+# The maximum number of ObjectContent data objects that should
+# be returned in a single result. A positive value will cause
+# the operation to suspend the retrieval when the count of
+# objects reaches the specified maximum. The server may still
+# limit the count to something less than the configured value.
+# Any remaining objects may be retrieved with additional
+# requests. (integer value)
+#maximum_objects=100
+
+
+#
+# Options defined in nova.virt.vmwareapi.vmops
+#
+
+# The prefix for Where cached images are stored. This is NOT
+# the full path - just a folder prefix. This should only be
+# used when a datastore cache should be shared between compute
+# nodes. Note: this should only be used when the compute nodes
+# have a shared file system. (string value)
+#cache_prefix=<None>
+
+
+[workarounds]
+
+#
+# Options defined in nova.utils
+#
+
+# This option allows a fallback to sudo for performance
+# reasons. For example see
+# https://bugs.launchpad.net/nova/+bug/1415106 (boolean value)
+#disable_rootwrap=false
+
+# When using libvirt 1.2.2 fails live snapshots intermittently
+# under load. This config option provides mechanism to
+# disable livesnapshot while this is resolved. See
+# https://bugs.launchpad.net/nova/+bug/1334398 (boolean value)
+#disable_libvirt_livesnapshot=true
+
+# Whether to destroy instances on startup when we suspect they
+# have previously been evacuated. This can result in data loss
+# if undesired. See https://launchpad.net/bugs/1419785
+# (boolean value)
+#destroy_after_evacuate=true
+
+
+[xenserver]
+
+#
+# Options defined in nova.virt.xenapi.agent
+#
+
+# Number of seconds to wait for agent reply (integer value)
+#agent_timeout=30
+
+# Number of seconds to wait for agent to be fully operational
+# (integer value)
+#agent_version_timeout=300
+
+# Number of seconds to wait for agent reply to resetnetwork
+# request (integer value)
+#agent_resetnetwork_timeout=60
+
+# Specifies the path in which the XenAPI guest agent should be
+# located. If the agent is present, network configuration is
+# not injected into the image. Used if
+# compute_driver=xenapi.XenAPIDriver and flat_injected=True
+# (string value)
+#agent_path=usr/sbin/xe-update-networking
+
+# Disables the use of the XenAPI agent in any image regardless
+# of what image properties are present. (boolean value)
+#disable_agent=false
+
+# Determines if the XenAPI agent should be used when the image
+# used does not contain a hint to declare if the agent is
+# present or not. The hint is a glance property
+# "xenapi_use_agent" that has the value "True" or "False".
+# Note that waiting for the agent when it is not present will
+# significantly increase server boot times. (boolean value)
+#use_agent_default=false
+
+
+#
+# Options defined in nova.virt.xenapi.client.session
+#
+
+# Timeout in seconds for XenAPI login. (integer value)
+#login_timeout=10
+
+# Maximum number of concurrent XenAPI connections. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#connection_concurrent=5
+
+
+#
+# Options defined in nova.virt.xenapi.driver
+#
+
+# URL for connection to XenServer/Xen Cloud Platform. A
+# special value of unix://local can be used to connect to the
+# local unix socket. Required if
+# compute_driver=xenapi.XenAPIDriver (string value)
+#connection_url=<None>
+
+# Username for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#connection_username=root
+
+# Password for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#connection_password=<None>
+
+# The interval used for polling of coalescing vhds. Used only
+# if compute_driver=xenapi.XenAPIDriver (floating point value)
+#vhd_coalesce_poll_interval=5.0
+
+# Ensure compute service is running on host XenAPI connects
+# to. (boolean value)
+#check_host=true
+
+# Max number of times to poll for VHD to coalesce. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#vhd_coalesce_max_attempts=20
+
+# Base path to the storage repository (string value)
+#sr_base_path=/var/run/sr-mount
+
+# The iSCSI Target Host (string value)
+#target_host=<None>
+
+# The iSCSI Target Port, default is port 3260 (string value)
+#target_port=3260
+
+# IQN Prefix (string value)
+#iqn_prefix=iqn.2010-10.org.openstack
+
+# Used to enable the remapping of VBD dev (Works around an
+# issue in Ubuntu Maverick) (boolean value)
+#remap_vbd_dev=false
+
+# Specify prefix to remap VBD dev to (ex. /dev/xvdb ->
+# /dev/sdb) (string value)
+#remap_vbd_dev_prefix=sd
+
+
+#
+# Options defined in nova.virt.xenapi.image.bittorrent
+#
+
+# Base URL for torrent files. (string value)
+#torrent_base_url=<None>
+
+# Probability that peer will become a seeder. (1.0 = 100%)
+# (floating point value)
+#torrent_seed_chance=1.0
+
+# Number of seconds after downloading an image via BitTorrent
+# that it should be seeded for other peers. (integer value)
+#torrent_seed_duration=3600
+
+# Cached torrent files not accessed within this number of
+# seconds can be reaped (integer value)
+#torrent_max_last_accessed=86400
+
+# Beginning of port range to listen on (integer value)
+#torrent_listen_port_start=6881
+
+# End of port range to listen on (integer value)
+#torrent_listen_port_end=6891
+
+# Number of seconds a download can remain at the same progress
+# percentage w/o being considered a stall (integer value)
+#torrent_download_stall_cutoff=600
+
+# Maximum number of seeder processes to run concurrently
+# within a given dom0. (-1 = no limit) (integer value)
+#torrent_max_seeder_processes_per_host=1
+
+
+#
+# Options defined in nova.virt.xenapi.pool
+#
+
+# To use for hosts with different CPUs (boolean value)
+#use_join_force=true
+
+
+#
+# Options defined in nova.virt.xenapi.vif
+#
+
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#ovs_integration_bridge=xapi1
+
+
+#
+# Options defined in nova.virt.xenapi.vm_utils
+#
+
+# Cache glance images locally. `all` will cache all images,
+# `some` will only cache images that have the image_property
+# `cache_in_nova=True`, and `none` turns off caching entirely
+# (string value)
+#cache_images=all
+
+# Compression level for images, e.g., 9 for gzip -9. Range is
+# 1-9, 9 being most compressed but most CPU intensive on dom0.
+# (integer value)
+#image_compression_level=<None>
+
+# Default OS type (string value)
+#default_os_type=linux
+
+# Time to wait for a block device to be created (integer
+# value)
+#block_device_creation_timeout=10
+
+# Maximum size in bytes of kernel or ramdisk images (integer
+# value)
+#max_kernel_ramdisk_size=16777216
+
+# Filter for finding the SR to be used to install guest
+# instances on. To use the Local Storage in default
+# XenServer/XCP installations set this flag to other-config
+# :i18n-key=local-storage. To select an SR with a different
+# matching criteria, you could set it to other-
+# config:my_favorite_sr=true. On the other hand, to fall back
+# on the Default SR, as displayed by XenCenter, set this flag
+# to: default-sr:true (string value)
+#sr_matching_filter=default-sr:true
+
+# Whether to use sparse_copy for copying data on a resize down
+# (False will use standard dd). This speeds up resizes down
+# considerably since large runs of zeros won't have to be
+# rsynced (boolean value)
+#sparse_copy=true
+
+# Maximum number of retries to unplug VBD. if <=0, should try
+# once and no retry (integer value)
+#num_vbd_unplug_retries=10
+
+# Whether or not to download images via Bit Torrent
+# (all|some|none). (string value)
+#torrent_images=none
+
+# Name of network to use for booting iPXE ISOs (string value)
+#ipxe_network_name=<None>
+
+# URL to the iPXE boot menu (string value)
+#ipxe_boot_menu_url=<None>
+
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#ipxe_mkisofs_cmd=mkisofs
+
+
+#
+# Options defined in nova.virt.xenapi.vmops
+#
+
+# Number of seconds to wait for instance to go to running
+# state (integer value)
+#running_timeout=60
+
+# The XenAPI VIF driver using XenServer Network APIs. (string
+# value)
+#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
+
+# Dom0 plugin driver used to handle image uploads. (string
+# value)
+#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore
+
+
+#
+# Options defined in nova.virt.xenapi.volume_utils
+#
+
+# Number of seconds to wait for an SR to settle if the VDI
+# does not exist when first introduced (integer value)
+#introduce_vdi_retry_wait=20
+
+
+[zookeeper]
+
+#
+# Options defined in nova.servicegroup.drivers.zk
+#
+
+# The ZooKeeper addresses for servicegroup service in the
+# format of host1:port,host2:port,host3:port (string value)
+#address=<None>
+
+# The recv_timeout parameter for the zk session (integer
+# value)
+#recv_timeout=4000
+
+# The prefix used in ZooKeeper to store ephemeral nodes
+# (string value)
+#sg_prefix=/servicegroups
+
+# Number of seconds to wait until retrying to join the session
+# (integer value)
+#sg_retry_interval=5
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# From oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group;name - DEFAULT;matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[oslo_concurrency]
+
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+# Deprecated group;name - DEFAULT;disable_process_locking
+#disable_process_locking=false
+
+# Directory to use for lock files. For security, the specified directory
+# should only be writable by the user running the processes that need locking.
+# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
+# a lock path must be set. (string value)
+# Deprecated group;name - DEFAULT;lock_path
+#lock_path=/var/lib/nova/tmp
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group;name - [amqp1]/server_request_prefix
+#server_request_prefix=exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group;name - [amqp1]/broadcast_prefix
+#broadcast_prefix=broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group;name - [amqp1]/group_request_prefix
+#group_request_prefix=unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group;name - [amqp1]/container_name
+#container_name=<None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group;name - [amqp1]/idle_timeout
+#idle_timeout=0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group;name - [amqp1]/trace
+#trace=false
+
+# CA certificate PEM file for verifing server certificate (string value)
+# Deprecated group;name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string value)
+# Deprecated group;name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string value)
+# Deprecated group;name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group;name - [amqp1]/ssl_key_password
+#ssl_key_password=<None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group;name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients=false
+
+
+[oslo_messaging_qpid]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group;name - DEFAULT;rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group;name - DEFAULT;amqp_auto_delete
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group;name - DEFAULT;rpc_conn_pool_size
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+# Deprecated group;name - DEFAULT;qpid_hostname
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+# Deprecated group;name - DEFAULT;qpid_port
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+# Deprecated group;name - DEFAULT;qpid_hosts
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+# Deprecated group;name - DEFAULT;qpid_username
+#qpid_username =
+
+# Password for Qpid connection. (string value)
+# Deprecated group;name - DEFAULT;qpid_password
+#qpid_password =
+
+# Space separated list of SASL mechanisms to use for auth. (string value)
+# Deprecated group;name - DEFAULT;qpid_sasl_mechanisms
+#qpid_sasl_mechanisms =
+
+# Seconds between connection keepalive heartbeats. (integer value)
+# Deprecated group;name - DEFAULT;qpid_heartbeat
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+# Deprecated group;name - DEFAULT;qpid_protocol
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+# Deprecated group;name - DEFAULT;qpid_tcp_nodelay
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer value)
+# Deprecated group;name - DEFAULT;qpid_receiver_capacity
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was originally used by
+# impl_qpid. Version 2 includes some backwards-incompatible changes that allow
+# broker federation to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break. (integer value)
+# Deprecated group;name - DEFAULT;qpid_topology_version
+#qpid_topology_version=1
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group;name - DEFAULT;rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group;name - DEFAULT;amqp_auto_delete
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group;name - DEFAULT;rpc_conn_pool_size
+#rpc_conn_pool_size=30
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group;name - DEFAULT;kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group;name - DEFAULT;kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group;name - DEFAULT;kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group;name - DEFAULT;kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+# Deprecated group;name - DEFAULT;kombu_reconnect_delay
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used. (string value)
+# Deprecated group;name - DEFAULT;rabbit_host
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used. (integer value)
+# Deprecated group;name - DEFAULT;rabbit_port
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group;name - DEFAULT;rabbit_hosts
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group;name - DEFAULT;rabbit_use_ssl
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group;name - DEFAULT;rabbit_userid
+#rabbit_userid=guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group;name - DEFAULT;rabbit_password
+#rabbit_password=guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group;name - DEFAULT;rabbit_login_method
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group;name - DEFAULT;rabbit_virtual_host
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+# Deprecated group;name - DEFAULT;rabbit_retry_backoff
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
+# count). (integer value)
+# Deprecated group;name - DEFAULT;rabbit_max_retries
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
+# must wipe the RabbitMQ database. (boolean value)
+# Deprecated group;name - DEFAULT;rabbit_ha_queues
+#rabbit_ha_queues=false
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). (integer value)
+#heartbeat_timeout_threshold=60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate=2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
+# Deprecated group;name - DEFAULT;fake_rabbit
+#fake_rabbit=false
openstack/ansible/files/nova-compute.conf.j2
@@ -0,0 +1,103 @@
+[DEFAULT]
+notification_driver=ceilometer.compute.nova_notifier
+notification_driver=nova.openstack.common.notifier.rpc_notifier
+notification_driver =
+notification_topics=notifications
+rpc_backend=rabbit
+internal_service_availability_zone=internal
+default_availability_zone=nova
+notify_api_faults=False
+state_path=/var/lib/nova
+report_interval=10
+compute_manager=nova.compute.manager.ComputeManager
+service_down_time=60
+rootwrap_config=/etc/nova/rootwrap.conf
+auth_strategy=keystone
+heal_instance_info_cache_interval=60
+reserved_host_memory_mb=512
+network_api_class=nova.network.neutronv2.api.API
+default_floating_pool=public
+force_snat_range=0.0.0.0/0
+metadata_host=192.168.0.10
+dhcp_domain=novalocal
+security_group_api=neutron
+debug=False
+verbose=True
+log_dir=/var/log/nova
+use_syslog=False
+compute_driver=libvirt.LibvirtDriver
+vif_plugging_is_fatal=True
+vif_plugging_timeout=300
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+force_raw_images=True
+novncproxy_base_url=https://{{ controller_public_ip }}:6080/vnc_auto.html
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address={{ compute_ip }}
+vnc_enabled=True
+vnc_keymap=en-us
+volume_api_class=nova.volume.cinder.API
+amqp_durable_queues=False
+rabbit_hosts=192.168.0.10:5672
+rabbit_use_ssl=False
+rabbit_userid=guest
+rabbit_ha_queues=False
+rabbit_password=guest
+rabbit_host=192.168.0.10
+sql_connection=mysql://nova@192.168.0.10/nova
+rabbit_virtual_host=/
+image_service=nova.image.glance.GlanceImageService
+rabbit_port=5672
+lock_path=/var/lib/nova/tmp
+[api_database]
+[barbican]
+[cells]
+[cinder]
+[conductor]
+[database]
+[ephemeral_storage_encryption]
+[glance]
+api_servers=192.168.0.10:9292
+[guestfs]
+[hyperv]
+[image_file_url]
+[ironic]
+[keymgr]
+[keystone_authtoken]
+[libvirt]
+virt_type=qemu
+inject_password=False
+inject_key=False
+inject_partition=-1
+live_migration_uri=qemu+tcp://nova@%s/system
+cpu_mode=none
+vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
+[metrics]
+[neutron]
+url=http://192.168.0.10:9696
+admin_username=neutron
+admin_password=b80e24e5e8fe4939
+admin_tenant_name=services
+region_name=RegionOne
+admin_auth_url=http://192.168.0.10:35357/v2.0
+auth_strategy=keystone
+ovs_bridge=br-int
+extension_sync_interval=600
+url_timeout=30
+default_tenant_id=default
+[osapi_v3]
+[rdp]
+[serial_console]
+[spice]
+[ssl]
+[trusted_computing]
+[upgrade_levels]
+[vmware]
+[workarounds]
+[xenserver]
+[zookeeper]
+[matchmaker_redis]
+[matchmaker_ring]
+[oslo_concurrency]
+[oslo_messaging_amqp]
+[oslo_messaging_qpid]
+[oslo_messaging_rabbit]
openstack/ansible/files/nova-controller.conf
@@ -0,0 +1,4083 @@
+[DEFAULT]
+
+#
+# From oslo.messaging
+#
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo_messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host=localhost
+
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+# (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC thread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi valued)
+#notification_driver =
+notification_driver =
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group;name - [rpc_notifier2]/topics
+#notification_topics=notifications
+notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full configuration. If
+# not set, we fall back to the rpc_backend option and driver specific
+# configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers include qpid
+# and zmq. (string value)
+#rpc_backend=rabbit
+rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in nova.availability_zones
+#
+
+# The availability_zone to show internal services under
+# (string value)
+#internal_service_availability_zone=internal
+
+# Default compute node availability_zone (string value)
+#default_availability_zone=nova
+
+
+#
+# Options defined in nova.crypto
+#
+
+# Filename of root CA (string value)
+#ca_file=cacert.pem
+
+# Filename of private key (string value)
+#key_file=private/cakey.pem
+
+# Filename of root Certificate Revocation List (string value)
+#crl_file=crl.pem
+
+# Where we keep our keys (string value)
+#keys_path=$state_path/keys
+
+# Where we keep our root CA (string value)
+#ca_path=$state_path/CA
+
+# Should we use a CA for each project? (boolean value)
+#use_project_ca=false
+
+# Subject for certificate for users, %s for project, user,
+# timestamp (string value)
+#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s
+
+# Subject for certificate for projects, %s for project,
+# timestamp (string value)
+#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s
+
+
+#
+# Options defined in nova.exception
+#
+
+# Make exception message format errors fatal (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in nova.netconf
+#
+
+# IP address of this host (string value)
+#my_ip=10.0.0.1
+
+# Block storage IP address of this host (string value)
+#my_block_storage_ip=$my_ip
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address (string value)
+#host=nova
+
+# Use IPv6 (boolean value)
+#use_ipv6=false
+
+
+#
+# Options defined in nova.notifications
+#
+
+# If set, send compute.instance.update notifications on
+# instance state changes. Valid values are None for no
+# notifications, "vm_state" for notifications on VM state
+# changes, or "vm_and_task_state" for notifications on VM and
+# task state changes. (string value)
+#notify_on_state_change=<None>
+
+# If set, send api.fault notifications on caught exceptions in
+# the API service. (boolean value)
+#notify_api_faults=false
+notify_api_faults=False
+
+# Default notification level for outgoing notifications
+# (string value)
+#default_notification_level=INFO
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in nova.paths
+#
+
+# Directory where the nova python module is installed (string
+# value)
+#pybasedir=/usr/lib/python/site-packages
+
+# Directory where nova binaries are installed (string value)
+#bindir=/usr/local/bin
+
+# Top-level directory for maintaining nova's state (string
+# value)
+#state_path=/var/lib/nova
+state_path=/var/lib/nova
+
+
+#
+# Options defined in nova.quota
+#
+
+# Number of instances allowed per project (integer value)
+#quota_instances=10
+
+# Number of instance cores allowed per project (integer value)
+#quota_cores=20
+
+# Megabytes of instance RAM allowed per project (integer
+# value)
+#quota_ram=51200
+
+# Number of floating IPs allowed per project (integer value)
+#quota_floating_ips=10
+
+# Number of fixed IPs allowed per project (this should be at
+# least the number of instances allowed) (integer value)
+#quota_fixed_ips=-1
+
+# Number of metadata items allowed per instance (integer
+# value)
+#quota_metadata_items=128
+
+# Number of injected files allowed (integer value)
+#quota_injected_files=5
+
+# Number of bytes allowed per injected file (integer value)
+#quota_injected_file_content_bytes=10240
+
+# Length of injected file path (integer value)
+#quota_injected_file_path_length=255
+
+# Number of security groups per project (integer value)
+#quota_security_groups=10
+
+# Number of security rules per security group (integer value)
+#quota_security_group_rules=20
+
+# Number of key pairs per user (integer value)
+#quota_key_pairs=100
+
+# Number of server groups per project (integer value)
+#quota_server_groups=10
+
+# Number of servers per server group (integer value)
+#quota_server_group_members=10
+
+# Number of seconds until a reservation expires (integer
+# value)
+#reservation_expire=86400
+
+# Count of reservations until usage is refreshed. This
+# defaults to 0(off) to avoid additional load but it is useful
+# to turn on to help keep quota usage up to date and reduce
+# the impact of out of sync usage issues. (integer value)
+#until_refresh=0
+
+# Number of seconds between subsequent usage refreshes. This
+# defaults to 0(off) to avoid additional load but it is useful
+# to turn on to help keep quota usage up to date and reduce
+# the impact of out of sync usage issues. Note that quotas are
+# not updated on a periodic task, they will update on a new
+# reservation if max_age has passed since the last reservation
+# (integer value)
+#max_age=0
+
+# Default driver to use for quota checks (string value)
+#quota_driver=nova.quota.DbQuotaDriver
+
+
+#
+# Options defined in nova.service
+#
+
+# Seconds between nodes reporting state to datastore (integer
+# value)
+#report_interval=10
+report_interval=10
+
+# Enable periodic tasks (boolean value)
+#periodic_enable=true
+
+# Range of seconds to randomly delay when starting the
+# periodic task scheduler to reduce stampeding. (Disable by
+# setting to 0) (integer value)
+#periodic_fuzzy_delay=60
+
+# A list of APIs to enable by default (list value)
+#enabled_apis=ec2,osapi_compute,metadata
+enabled_apis=ec2,osapi_compute,metadata
+
+# A list of APIs with enabled SSL (list value)
+#enabled_ssl_apis=
+
+# The IP address on which the EC2 API will listen. (string
+# value)
+#ec2_listen=0.0.0.0
+ec2_listen=0.0.0.0
+
+# The port on which the EC2 API will listen. (integer value)
+#ec2_listen_port=8773
+
+# Number of workers for EC2 API service. The default will be
+# equal to the number of CPUs available. (integer value)
+#ec2_workers=<None>
+ec2_workers=4
+
+# The IP address on which the OpenStack API will listen.
+# (string value)
+#osapi_compute_listen=0.0.0.0
+osapi_compute_listen=0.0.0.0
+
+# The port on which the OpenStack API will listen. (integer
+# value)
+#osapi_compute_listen_port=8774
+
+# Number of workers for OpenStack API service. The default
+# will be the number of CPUs available. (integer value)
+#osapi_compute_workers=<None>
+osapi_compute_workers=4
+
+# OpenStack metadata service manager (string value)
+#metadata_manager=nova.api.manager.MetadataManager
+
+# The IP address on which the metadata API will listen.
+# (string value)
+#metadata_listen=0.0.0.0
+metadata_listen=0.0.0.0
+
+# The port on which the metadata API will listen. (integer
+# value)
+#metadata_listen_port=8775
+
+# Number of workers for metadata service. The default will be
+# the number of CPUs available. (integer value)
+#metadata_workers=<None>
+metadata_workers=4
+
+# Full class name for the Manager for compute (string value)
+#compute_manager=nova.compute.manager.ComputeManager
+
+# Full class name for the Manager for console proxy (string
+# value)
+#console_manager=nova.console.manager.ConsoleProxyManager
+
+# Manager for console auth (string value)
+#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager
+
+# Full class name for the Manager for cert (string value)
+#cert_manager=nova.cert.manager.CertManager
+
+# Full class name for the Manager for network (string value)
+#network_manager=nova.network.manager.FlatDHCPManager
+
+# Full class name for the Manager for scheduler (string value)
+#scheduler_manager=nova.scheduler.manager.SchedulerManager
+
+# Maximum time since last check-in for up service (integer
+# value)
+#service_down_time=60
+service_down_time=60
+
+
+#
+# Options defined in nova.utils
+#
+
+# Whether to log monkey patching (boolean value)
+#monkey_patch=false
+
+# List of modules/decorators to monkey patch (list value)
+#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator
+
+# Length of generated instance admin passwords (integer value)
+#password_length=12
+
+# Time period to generate instance usages for. Time period
+# must be hour, day, month or year (string value)
+#instance_usage_audit_period=month
+
+# Path to the rootwrap configuration file to use for running
+# commands as root (string value)
+#rootwrap_config=/etc/nova/rootwrap.conf
+rootwrap_config=/etc/nova/rootwrap.conf
+
+# Explicitly specify the temporary working directory (string
+# value)
+#tempdir=<None>
+
+
+#
+# Options defined in nova.wsgi
+#
+
+# File name for the paste.deploy config for nova-api (string
+# value)
+#api_paste_config=api-paste.ini
+
+# A python format string that is used as the template to
+# generate log lines. The following values can be formatted
+# into it: client_ip, date_time, request_line, status_code,
+# body_length, wall_seconds. (string value)
+#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ssl_ca_file=<None>
+
+# SSL certificate of API server (string value)
+#ssl_cert_file=<None>
+
+# SSL private key of API server (string value)
+#ssl_key_file=<None>
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Not supported on OS X. (integer value)
+#tcp_keepidle=600
+
+# Size of the pool of greenthreads used by wsgi (integer
+# value)
+#wsgi_default_pool_size=1000
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large
+# tokens (typically those generated by the Keystone v3 API
+# with big service catalogs). (integer value)
+#max_header_line=16384
+
+# If False, closes the client socket connection explicitly.
+# (boolean value)
+#wsgi_keep_alive=true
+
+# Timeout for client connections' socket operations. If an
+# incoming connection is idle for this number of seconds it
+# will be closed. A value of '0' means wait forever. (integer
+# value)
+#client_socket_timeout=900
+
+
+#
+# Options defined in nova.api.auth
+#
+
+# Whether to use per-user rate limiting for the api. This
+# option is only used by v2 api. Rate limiting is removed from
+# v3 api. (boolean value)
+#api_rate_limit=false
+
+# The strategy to use for auth: keystone, noauth
+# (deprecated), or noauth2. Both noauth and noauth2 are
+# designed for testing only, as they do no actual credential
+# checking. noauth provides administrative credentials
+# regardless of the passed in user, noauth2 only does if
+# 'admin' is specified as the username. (string value)
+#auth_strategy=keystone
+auth_strategy=keystone
+
+# Treat X-Forwarded-For as the canonical remote address. Only
+# enable this if you have a sanitizing proxy. (boolean value)
+#use_forwarded_for=false
+use_forwarded_for=False
+
+
+#
+# Options defined in nova.api.ec2
+#
+
+# Number of failed auths before lockout. (integer value)
+#lockout_attempts=5
+
+# Number of minutes to lockout if triggered. (integer value)
+#lockout_minutes=15
+
+# Number of minutes for lockout window. (integer value)
+#lockout_window=15
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+# Return the IP address as private dns hostname in describe
+# instances (boolean value)
+#ec2_private_dns_show_ip=false
+
+# Validate security group names according to EC2 specification
+# (boolean value)
+#ec2_strict_validation=true
+
+# Time in seconds before ec2 timestamp expires (integer value)
+#ec2_timestamp_expiry=300
+
+# Disable SSL certificate verification. (boolean value)
+#keystone_ec2_insecure=false
+
+
+#
+# Options defined in nova.api.ec2.cloud
+#
+
+# The IP address of the EC2 API server (string value)
+#ec2_host=$my_ip
+
+# The internal IP address of the EC2 API server (string value)
+#ec2_dmz_host=$my_ip
+
+# The port of the EC2 API server (integer value)
+#ec2_port=8773
+
+# The protocol to use when connecting to the EC2 API server
+# (http, https) (string value)
+#ec2_scheme=http
+
+# The path prefix used to call the ec2 API server (string
+# value)
+#ec2_path=/
+
+# List of region=fqdn pairs separated by commas (list value)
+#region_list=
+
+
+#
+# Options defined in nova.api.metadata.base
+#
+
+# List of metadata versions to skip placing into the config
+# drive (string value)
+#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
+
+# Driver to use for vendor data (string value)
+#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData
+
+
+#
+# Options defined in nova.api.metadata.handler
+#
+
+# Time in seconds to cache metadata; 0 to disable metadata
+# caching entirely (not recommended). Increasingthis should
+# improve response times of the metadata API when under heavy
+# load. Higher values may increase memoryusage and result in
+# longer times for host metadata changes to take effect.
+# (integer value)
+#metadata_cache_expiration=15
+
+
+#
+# Options defined in nova.api.metadata.vendordata_json
+#
+
+# File to load JSON formatted vendor data from (string value)
+#vendordata_jsonfile_path=<None>
+
+
+#
+# Options defined in nova.api.openstack.common
+#
+
+# The maximum number of items returned in a single response
+# from a collection resource (integer value)
+#osapi_max_limit=1000
+
+# Base URL that will be presented to users in links to the
+# OpenStack Compute API (string value)
+#osapi_compute_link_prefix=<None>
+
+# Base URL that will be presented to users in links to glance
+# resources (string value)
+#osapi_glance_link_prefix=<None>
+
+
+#
+# Options defined in nova.api.openstack.compute
+#
+
+# Permit instance snapshot operations. (boolean value)
+#allow_instance_snapshots=true
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib
+#
+
+# Specify list of extensions to load when using
+# osapi_compute_extension option with
+# nova.api.openstack.compute.contrib.select_extensions (list
+# value)
+#osapi_compute_ext_list=
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib.fping
+#
+
+# Full path to fping. (string value)
+#fping_path=/usr/sbin/fping
+
+
+#
+# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks
+#
+
+# Enables or disables quota checking for tenant networks
+# (boolean value)
+#enable_network_quota=false
+
+# Control for checking for default networks (string value)
+#use_neutron_default_nets=False
+
+# Default tenant id when creating neutron networks (string
+# value)
+#neutron_default_tenant_id=default
+
+# Number of private networks allowed per project (integer
+# value)
+#quota_networks=3
+
+
+#
+# Options defined in nova.api.openstack.compute.extensions
+#
+
+# osapi compute extension to load (multi valued)
+#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
+
+
+#
+# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses
+#
+
+# List of instance states that should hide network info (list
+# value)
+#osapi_hide_server_address_states=building
+
+
+#
+# Options defined in nova.api.openstack.compute.servers
+#
+
+# Enables returning of the instance password by the relevant
+# server API calls such as create, rebuild or rescue, If the
+# hypervisor does not support password injection then the
+# password returned will not be correct (boolean value)
+#enable_instance_password=true
+
+
+#
+# Options defined in nova.cert.rpcapi
+#
+
+# The topic cert nodes listen on (string value)
+#cert_topic=cert
+
+
+#
+# Options defined in nova.cloudpipe.pipelib
+#
+
+# Image ID used when starting up a cloudpipe vpn server
+# (string value)
+#vpn_image_id=0
+
+# Flavor for vpn instances (string value)
+#vpn_flavor=m1.tiny
+
+# Template for cloudpipe instance boot script (string value)
+#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template
+
+# Network to push into openvpn config (string value)
+#dmz_net=10.0.0.0
+
+# Netmask to push into openvpn config (string value)
+#dmz_mask=255.255.255.0
+
+# Suffix to add to project name for vpn key and secgroups
+# (string value)
+#vpn_key_suffix=-vpn
+
+
+#
+# Options defined in nova.cmd.novnc
+#
+
+# Record sessions to FILE.[session_number] (boolean value)
+#record=false
+
+# Become a daemon (background process) (boolean value)
+#daemon=false
+
+# Disallow non-encrypted connections (boolean value)
+#ssl_only=false
+ssl_only=True
+
+# Source is ipv6 (boolean value)
+#source_is_ipv6=false
+
+# SSL certificate file (string value)
+#cert=self.pem
+cert=/etc/nova/nova.crt
+
+# SSL key file (if separate from cert) (string value)
+#key=<None>
+key=/etc/nova/nova.key
+
+# Run webserver on same port. Serve files from DIR. (string
+# value)
+#web=/usr/share/spice-html5
+
+
+#
+# Options defined in nova.cmd.novncproxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#novncproxy_host=0.0.0.0
+novncproxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#novncproxy_port=6080
+novncproxy_port=6080
+
+
+#
+# Options defined in nova.compute.api
+#
+
+# Allow destination machine to match source for resize. Useful
+# when testing in single-host environments. (boolean value)
+#allow_resize_to_same_host=false
+
+# Allow migrate machine to the same host. Useful when testing
+# in single-host environments. (boolean value)
+#allow_migrate_to_same_host=false
+
+# Availability zone to use when user doesn't specify one
+# (string value)
+#default_schedule_zone=<None>
+
+# These are image properties which a snapshot should not
+# inherit from an instance (list value)
+#non_inheritable_image_properties=cache_in_nova,bittorrent
+
+# Kernel image that indicates not to use a kernel, but to use
+# a raw disk image instead (string value)
+#null_kernel=nokernel
+
+# When creating multiple instances with a single request using
+# the os-multiple-create API extension, this template will be
+# used to build the display name for each instance. The
+# benefit is that the instances end up with different
+# hostnames. To restore legacy behavior of every instance
+# having the same name, set this option to "%(name)s". Valid
+# keys for the template are: name, uuid, count. (string value)
+#multi_instance_display_name_template=%(name)s-%(count)d
+
+# Maximum number of devices that will result in a local image
+# being created on the hypervisor node. Setting this to 0
+# means nova will allow only boot from volume. A negative
+# number means unlimited. (integer value)
+#max_local_block_devices=3
+
+
+#
+# Options defined in nova.compute.flavors
+#
+
+# Default flavor to use for the EC2 API only. The Nova API
+# does not support a default flavor. (string value)
+#default_flavor=m1.small
+
+
+#
+# Options defined in nova.compute.manager
+#
+
+# Console proxy host to use to connect to instances on this
+# host. (string value)
+#console_host=nova
+
+# Name of network to use to set access IPs for instances
+# (string value)
+#default_access_ip_network_name=<None>
+
+# Whether to batch up the application of IPTables rules during
+# a host restart and apply all at the end of the init phase
+# (boolean value)
+#defer_iptables_apply=false
+
+# Where instances are stored on disk (string value)
+#instances_path=$state_path/instances
+
+# Generate periodic compute.instance.exists notifications
+# (boolean value)
+#instance_usage_audit=false
+
+# Number of 1 second retries needed in live_migration (integer
+# value)
+#live_migration_retry_count=30
+
+# Whether to start guests that were running before the host
+# rebooted (boolean value)
+#resume_guests_state_on_host_boot=false
+
+# Number of times to retry network allocation on failures
+# (integer value)
+#network_allocate_retries=0
+
+# Maximum number of instance builds to run concurrently
+# (integer value)
+#max_concurrent_builds=10
+
+# Number of times to retry block device allocation on failures
+# (integer value)
+#block_device_allocate_retries=60
+
+# The number of times to attempt to reap an instance's files.
+# (integer value)
+#maximum_instance_delete_attempts=5
+
+# Interval to pull network bandwidth usage info. Not supported
+# on all hypervisors. Set to -1 to disable. Setting this to 0
+# will run at the default rate. (integer value)
+#bandwidth_poll_interval=600
+
+# Interval to sync power states between the database and the
+# hypervisor. Set to -1 to disable. Setting this to 0 will run
+# at the default rate. (integer value)
+#sync_power_state_interval=600
+
+# Number of seconds between instance network information cache
+# updates (integer value)
+#heal_instance_info_cache_interval=60
+
+# Interval in seconds for reclaiming deleted instances
+# (integer value)
+#reclaim_instance_interval=0
+
+# Interval in seconds for gathering volume usages (integer
+# value)
+#volume_usage_poll_interval=0
+
+# Interval in seconds for polling shelved instances to
+# offload. Set to -1 to disable.Setting this to 0 will run at
+# the default rate. (integer value)
+#shelved_poll_interval=3600
+
+# Time in seconds before a shelved instance is eligible for
+# removing from a host. -1 never offload, 0 offload when
+# shelved (integer value)
+#shelved_offload_time=0
+
+# Interval in seconds for retrying failed instance file
+# deletes. Set to -1 to disable. Setting this to 0 will run at
+# the default rate. (integer value)
+#instance_delete_interval=300
+
+# Waiting time interval (seconds) between block device
+# allocation retries on failures (integer value)
+#block_device_allocate_retries_interval=3
+
+# Waiting time interval (seconds) between sending the
+# scheduler a list of current instance UUIDs to verify that
+# its view of instances is in sync with nova. If the CONF
+# option `scheduler_tracks_instance_changes` is False,
+# changing this option will have no effect. (integer value)
+#scheduler_instance_sync_interval=120
+
+# Action to take if a running deleted instance is detected.
+# Valid options are 'noop', 'log', 'shutdown', or 'reap'. Set
+# to 'noop' to take no action. (string value)
+#running_deleted_instance_action=reap
+
+# Number of seconds to wait between runs of the cleanup task.
+# (integer value)
+#running_deleted_instance_poll_interval=1800
+
+# Number of seconds after being deleted when a running
+# instance should be considered eligible for cleanup. (integer
+# value)
+#running_deleted_instance_timeout=0
+
+# Automatically hard reboot an instance if it has been stuck
+# in a rebooting state longer than N seconds. Set to 0 to
+# disable. (integer value)
+#reboot_timeout=0
+
+# Amount of time in seconds an instance can be in BUILD before
+# going into ERROR status. Set to 0 to disable. (integer
+# value)
+#instance_build_timeout=0
+
+# Automatically unrescue an instance after N seconds. Set to 0
+# to disable. (integer value)
+#rescue_timeout=0
+
+# Automatically confirm resizes after N seconds. Set to 0 to
+# disable. (integer value)
+#resize_confirm_window=0
+
+# Total amount of time to wait in seconds for an instance to
+# perform a clean shutdown. (integer value)
+#shutdown_timeout=60
+
+
+#
+# Options defined in nova.compute.monitors
+#
+
+# Monitor classes available to the compute which may be
+# specified more than once. (multi valued)
+#compute_available_monitors=nova.compute.monitors.all_monitors
+
+# A list of monitors that can be used for getting compute
+# metrics. (list value)
+#compute_monitors=
+
+
+#
+# Options defined in nova.compute.resource_tracker
+#
+
+# Amount of disk in MB to reserve for the host (integer value)
+#reserved_host_disk_mb=0
+
+# Amount of memory in MB to reserve for the host (integer
+# value)
+#reserved_host_memory_mb=512
+
+# Class that will manage stats for the local compute host
+# (string value)
+#compute_stats_class=nova.compute.stats.Stats
+
+# The names of the extra resources to track. (list value)
+#compute_resources=vcpu
+
+
+#
+# Options defined in nova.compute.rpcapi
+#
+
+# The topic compute nodes listen on (string value)
+#compute_topic=compute
+
+
+#
+# Options defined in nova.conductor.tasks.live_migrate
+#
+
+# Number of times to retry live-migration before failing. If
+# == -1, try until out of hosts. If == 0, only try once, no
+# retries. (integer value)
+#migrate_max_retries=-1
+
+
+#
+# Options defined in nova.console.manager
+#
+
+# Driver to use for the console proxy (string value)
+#console_driver=nova.console.xvp.XVPConsoleProxy
+
+# Stub calls to compute worker for tests (boolean value)
+#stub_compute=false
+
+# Publicly visible name for this console host (string value)
+#console_public_hostname=nova
+
+
+#
+# Options defined in nova.console.rpcapi
+#
+
+# The topic console proxy nodes listen on (string value)
+#console_topic=console
+
+
+#
+# Options defined in nova.console.xvp
+#
+
+# XVP conf template (string value)
+#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template
+
+# Generated XVP conf file (string value)
+#console_xvp_conf=/etc/xvp.conf
+
+# XVP master process pid file (string value)
+#console_xvp_pid=/var/run/xvp.pid
+
+# XVP log file (string value)
+#console_xvp_log=/var/log/xvp.log
+
+# Port for XVP to multiplex VNC connections on (integer value)
+#console_xvp_multiplex_port=5900
+
+
+#
+# Options defined in nova.consoleauth
+#
+
+# The topic console auth proxy nodes listen on (string value)
+#consoleauth_topic=consoleauth
+
+
+#
+# Options defined in nova.consoleauth.manager
+#
+
+# How many seconds before deleting tokens (integer value)
+#console_token_ttl=600
+
+
+#
+# Options defined in nova.db.api
+#
+
+# Services to be added to the available pool on create
+# (boolean value)
+#enable_new_services=true
+
+# Template string to be used to generate instance names
+# (string value)
+#instance_name_template=instance-%08x
+
+# Template string to be used to generate snapshot names
+# (string value)
+#snapshot_name_template=snapshot-%s
+
+
+#
+# Options defined in nova.db.base
+#
+
+# The driver to use for database access (string value)
+#db_driver=nova.db
+
+
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+
+# When set, compute API will consider duplicate hostnames
+# invalid within the specified scope, regardless of case.
+# Should be empty, "project" or "global". (string value)
+#osapi_compute_unique_server_name_scope=
+
+
+#
+# Options defined in nova.image.s3
+#
+
+# Parent directory for tempdir used for image decryption
+# (string value)
+#image_decryption_dir=/tmp
+
+# Hostname or IP for OpenStack to use when accessing the S3
+# api (string value)
+#s3_host=$my_ip
+
+# Port used when accessing the S3 api (integer value)
+#s3_port=3333
+
+# Access key to use for S3 server for images (string value)
+#s3_access_key=notchecked
+
+# Secret key to use for S3 server for images (string value)
+#s3_secret_key=notchecked
+
+# Whether to use SSL when talking to S3 (boolean value)
+#s3_use_ssl=false
+
+# Whether to affix the tenant id to the access key when
+# downloading from S3 (boolean value)
+#s3_affix_tenant=false
+
+
+#
+# Options defined in nova.ipv6.api
+#
+
+# Backend to use for IPv6 generation (string value)
+#ipv6_backend=rfc2462
+
+
+#
+# Options defined in nova.network
+#
+
+# The full class name of the network API class to use (string
+# value)
+#network_api_class=nova.network.api.API
+network_api_class=nova.network.neutronv2.api.API
+
+
+#
+# Options defined in nova.network.driver
+#
+
+# Driver to use for network creation (string value)
+#network_driver=nova.network.linux_net
+
+
+#
+# Options defined in nova.network.floating_ips
+#
+
+# Default pool for floating IPs (string value)
+#default_floating_pool=nova
+default_floating_pool=public
+
+# Autoassigning floating IP to VM (boolean value)
+#auto_assign_floating_ip=false
+
+# Full class name for the DNS Manager for floating IPs (string
+# value)
+#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# Full class name for the DNS Manager for instance IPs (string
+# value)
+#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# Full class name for the DNS Zone for instance IPs (string
+# value)
+#instance_dns_domain=
+
+
+#
+# Options defined in nova.network.ldapdns
+#
+
+# URL for LDAP server which will store DNS entries (string
+# value)
+#ldap_dns_url=ldap://ldap.example.com:389
+
+# User for LDAP DNS (string value)
+#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org
+
+# Password for LDAP DNS (string value)
+#ldap_dns_password=password
+
+# Hostmaster for LDAP DNS driver Statement of Authority
+# (string value)
+#ldap_dns_soa_hostmaster=hostmaster@example.org
+
+# DNS Servers for LDAP DNS driver (multi valued)
+#ldap_dns_servers=dns.example.org
+
+# Base DN for DNS entries in LDAP (string value)
+#ldap_dns_base_dn=ou=hosts,dc=example,dc=org
+
+# Refresh interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_refresh=1800
+
+# Retry interval (in seconds) for LDAP DNS driver Statement of
+# Authority (string value)
+#ldap_dns_soa_retry=3600
+
+# Expiry interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_expiry=86400
+
+# Minimum interval (in seconds) for LDAP DNS driver Statement
+# of Authority (string value)
+#ldap_dns_soa_minimum=7200
+
+
+#
+# Options defined in nova.network.linux_net
+#
+
+# Location of flagfiles for dhcpbridge (multi valued)
+#dhcpbridge_flagfile=/etc/nova/nova.conf
+
+# Location to keep network config files (string value)
+#networks_path=$state_path/networks
+
+# Interface for public IP addresses (string value)
+#public_interface=eth0
+
+# Location of nova-dhcpbridge (string value)
+#dhcpbridge=/usr/bin/nova-dhcpbridge
+
+# Public IP of network host (string value)
+#routing_source_ip=$my_ip
+
+# Lifetime of a DHCP lease in seconds (integer value)
+#dhcp_lease_time=86400
+
+# If set, uses specific DNS server for dnsmasq. Can be
+# specified multiple times. (multi valued)
+#dns_server=
+
+# If set, uses the dns1 and dns2 from the network ref. as dns
+# servers. (boolean value)
+#use_network_dns_servers=false
+
+# A list of dmz ranges that should be accepted (list value)
+#dmz_cidr=
+
+# Traffic to this range will always be snatted to the fallback
+# ip, even if it would normally be bridged out of the node.
+# Can be specified multiple times. (multi valued)
+#force_snat_range=
+force_snat_range=0.0.0.0/0
+
+# Override the default dnsmasq settings with this file (string
+# value)
+#dnsmasq_config_file=
+
+# Driver used to create ethernet devices. (string value)
+#linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver
+
+# Name of Open vSwitch bridge used with linuxnet (string
+# value)
+#linuxnet_ovs_integration_bridge=br-int
+
+# Send gratuitous ARPs for HA setup (boolean value)
+#send_arp_for_ha=false
+
+# Send this many gratuitous ARPs for HA setup (integer value)
+#send_arp_for_ha_count=3
+
+# Use single default gateway. Only first nic of vm will get
+# default gateway from dhcp server (boolean value)
+#use_single_default_gateway=false
+
+# An interface that bridges can forward to. If this is set to
+# all then all traffic will be forwarded. Can be specified
+# multiple times. (multi valued)
+#forward_bridge_interface=all
+
+# The IP address for the metadata API server (string value)
+#metadata_host=$my_ip
+metadata_host=192.168.0.10
+
+# The port for the metadata API port (integer value)
+#metadata_port=8775
+
+# Regular expression to match the iptables rule that should
+# always be on the top. (string value)
+#iptables_top_regex=
+
+# Regular expression to match the iptables rule that should
+# always be on the bottom. (string value)
+#iptables_bottom_regex=
+
+# The table that iptables to jump to when a packet is to be
+# dropped. (string value)
+#iptables_drop_action=DROP
+
+# Amount of time, in seconds, that ovs_vsctl should wait for a
+# response from the database. 0 is to wait forever. (integer
+# value)
+#ovs_vsctl_timeout=120
+
+# If passed, use fake network devices and addresses (boolean
+# value)
+#fake_network=false
+
+# Number of times to retry ebtables commands on failure.
+# (integer value)
+#ebtables_exec_attempts=3
+
+# Number of seconds to wait between ebtables retries.
+# (floating point value)
+#ebtables_retry_interval=1.0
+
+
+#
+# Options defined in nova.network.manager
+#
+
+# Bridge for simple network instances (string value)
+#flat_network_bridge=<None>
+
+# DNS server for simple network (string value)
+#flat_network_dns=8.8.4.4
+
+# Whether to attempt to inject network setup into guest
+# (boolean value)
+#flat_injected=false
+
+# FlatDhcp will bridge into this interface if set (string
+# value)
+#flat_interface=<None>
+
+# First VLAN for private networks (integer value)
+#vlan_start=100
+
+# VLANs will bridge into this interface if set (string value)
+#vlan_interface=<None>
+
+# Number of networks to support (integer value)
+#num_networks=1
+
+# Public IP for the cloudpipe VPN servers (string value)
+#vpn_ip=$my_ip
+
+# First Vpn port for private networks (integer value)
+#vpn_start=1000
+
+# Number of addresses in each private subnet (integer value)
+#network_size=256
+
+# Fixed IPv6 address block (string value)
+#fixed_range_v6=fd00::/48
+
+# Default IPv4 gateway (string value)
+#gateway=<None>
+
+# Default IPv6 gateway (string value)
+#gateway_v6=<None>
+
+# Number of addresses reserved for vpn clients (integer value)
+#cnt_vpn_clients=0
+
+# Seconds after which a deallocated IP is disassociated
+# (integer value)
+#fixed_ip_disassociate_timeout=600
+
+# Number of attempts to create unique mac address (integer
+# value)
+#create_unique_mac_address_attempts=5
+
+# If True, skip using the queue and make local calls (boolean
+# value)
+#fake_call=false
+
+# If True, unused gateway devices (VLAN and bridge) are
+# deleted in VLAN network mode with multi hosted networks
+# (boolean value)
+#teardown_unused_network_gateway=false
+
+# If True, send a dhcp release on instance termination
+# (boolean value)
+#force_dhcp_release=True
+
+# If True, when a DNS entry must be updated, it sends a fanout
+# cast to all network hosts to update their DNS entries in
+# multi host mode (boolean value)
+#update_dns_entries=false
+
+# Number of seconds to wait between runs of updates to DNS
+# entries. (integer value)
+#dns_update_periodic_interval=-1
+
+# Domain to use for building the hostnames (string value)
+#dhcp_domain=novalocal
+dhcp_domain=novalocal
+
+# Indicates underlying L3 management library (string value)
+#l3_lib=nova.network.l3.LinuxNetL3
+
+
+#
+# Options defined in nova.network.rpcapi
+#
+
+# The topic network nodes listen on (string value)
+#network_topic=network
+
+# Default value for multi_host in networks. Also, if set, some
+# rpc network calls will be sent directly to host. (boolean
+# value)
+#multi_host=false
+
+
+#
+# Options defined in nova.network.security_group.openstack_driver
+#
+
+# The full class name of the security API class (string value)
+#security_group_api=nova
+security_group_api=neutron
+
+
+#
+# Options defined in nova.objects.network
+#
+
+# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE
+# NETWORK. If True in multi_host mode, all compute hosts share
+# the same dhcp address. The same IP address used for DHCP
+# will be added on each nova-network node which is only
+# visible to the vms on the same host. (boolean value)
+#share_dhcp_address=false
+
+# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE
+# NETWORK. MTU setting for network interface. (integer value)
+#network_device_mtu=<None>
+
+
+#
+# Options defined in nova.objectstore.s3server
+#
+
+# Path to S3 buckets (string value)
+#buckets_path=$state_path/buckets
+
+# IP address for S3 API to listen (string value)
+#s3_listen=0.0.0.0
+
+# Port for S3 API to listen (integer value)
+#s3_listen_port=3333
+
+
+#
+# From oslo.log
+#
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING
+# level). (boolean value)
+#debug=false
+debug=False
+
+# Print more verbose output (set logging level to INFO instead of default
+# WARNING level). (boolean value)
+#verbose=false
+verbose=True
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. (string value)
+# Deprecated group;name - DEFAULT;log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string which may use any
+# of the available logging.LogRecord attributes. This option is deprecated.
+# Please use logging_context_format_string and logging_default_format_string
+# instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default: %(default)s . (string
+# value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is set, logging will
+# go to stdout. (string value)
+# Deprecated group;name - DEFAULT;logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file paths. (string
+# value)
+# Deprecated group;name - DEFAULT;logdir
+#log_dir=/var/log/nova
+log_dir=/var/log/nova
+
+# Use syslog for logging. Existing syslog format is DEPRECATED during I, and
+# will change in J to honor RFC5424. (boolean value)
+#use_syslog=false
+use_syslog=False
+
+# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled,
+# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The
+# format without the APP-NAME is deprecated in I, and will be removed in J.
+# (boolean value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+# Log output to standard error. (boolean value)
+#use_stderr=False
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context. (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+
+#
+# Options defined in nova.pci.request
+#
+
+# An alias for a PCI passthrough device requirement. This
+# allows users to specify the alias in the extra_spec for a
+# flavor, without needing to repeat all the PCI property
+# requirements. For example: pci_alias = { "name":
+# "QuicAssist", "product_id": "0443", "vendor_id": "8086",
+# "device_type": "ACCEL" } defines an alias for the Intel
+# QuickAssist card. (multi valued) (multi valued)
+#pci_alias=
+
+
+#
+# Options defined in nova.pci.whitelist
+#
+
+# White list of PCI devices available to VMs. For example:
+# pci_passthrough_whitelist = [{"vendor_id": "8086",
+# "product_id": "0443"}] (multi valued)
+#pci_passthrough_whitelist=
+
+
+#
+# Options defined in nova.scheduler.driver
+#
+
+# The scheduler host manager class to use (string value)
+#scheduler_host_manager=nova.scheduler.host_manager.HostManager
+
+
+#
+# Options defined in nova.scheduler.filter_scheduler
+#
+
+# New instances will be scheduled on a host chosen randomly
+# from a subset of the N best hosts. This property defines the
+# subset size that a host is chosen from. A value of 1 chooses
+# the first host returned by the weighing functions. This
+# value must be at least 1. Any value less than 1 will be
+# ignored, and 1 will be used instead (integer value)
+#scheduler_host_subset_size=1
+
+
+#
+# Options defined in nova.scheduler.filters.aggregate_image_properties_isolation
+#
+
+# Force the filter to consider only keys matching the given
+# namespace. (string value)
+#aggregate_image_properties_isolation_namespace=<None>
+
+# The separator used between the namespace and keys (string
+# value)
+#aggregate_image_properties_isolation_separator=.
+
+
+#
+# Options defined in nova.scheduler.filters.core_filter
+#
+
+# Virtual CPU to physical CPU allocation ratio which affects
+# all CPU filters. This configuration specifies a global ratio
+# for CoreFilter. For AggregateCoreFilter, it will fall back
+# to this configuration value if no per-aggregate setting
+# found. (floating point value)
+#cpu_allocation_ratio=16.0
+cpu_allocation_ratio=16.0
+
+
+#
+# Options defined in nova.scheduler.filters.disk_filter
+#
+
+# Virtual disk to physical disk allocation ratio (floating
+# point value)
+#disk_allocation_ratio=1.0
+
+
+#
+# Options defined in nova.scheduler.filters.io_ops_filter
+#
+
+# Tells filters to ignore hosts that have this many or more
+# instances currently in build, resize, snapshot, migrate,
+# rescue or unshelve task states (integer value)
+#max_io_ops_per_host=8
+
+
+#
+# Options defined in nova.scheduler.filters.isolated_hosts_filter
+#
+
+# Images to run on isolated host (list value)
+#isolated_images=
+
+# Host reserved for specific images (list value)
+#isolated_hosts=
+
+# Whether to force isolated hosts to run only isolated images
+# (boolean value)
+#restrict_isolated_hosts_to_isolated_images=true
+
+
+#
+# Options defined in nova.scheduler.filters.num_instances_filter
+#
+
+# Ignore hosts that have too many instances (integer value)
+#max_instances_per_host=50
+
+
+#
+# Options defined in nova.scheduler.filters.ram_filter
+#
+
+# Virtual ram to physical ram allocation ratio which affects
+# all ram filters. This configuration specifies a global ratio
+# for RamFilter. For AggregateRamFilter, it will fall back to
+# this configuration value if no per-aggregate setting found.
+# (floating point value)
+#ram_allocation_ratio=1.5
+ram_allocation_ratio=1.5
+
+
+#
+# Options defined in nova.scheduler.host_manager
+#
+
+# Filter classes available to the scheduler which may be
+# specified more than once. An entry of
+# "nova.scheduler.filters.all_filters" maps to all filters
+# included with nova. (multi valued)
+#scheduler_available_filters=nova.scheduler.filters.all_filters
+
+# Which filter class names to use for filtering hosts when not
+# specified in the request. (list value)
+#scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
+scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,CoreFilter
+
+# Which weight class names to use for weighing hosts (list
+# value)
+#scheduler_weight_classes=nova.scheduler.weights.all_weighers
+
+# Determines if the Scheduler tracks changes to instances to
+# help with its filtering decisions. (boolean value)
+#scheduler_tracks_instance_changes=true
+
+
+#
+# Options defined in nova.scheduler.ironic_host_manager
+#
+
+# Which filter class names to use for filtering baremetal
+# hosts when not specified in the request. (list value)
+#baremetal_scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
+
+# Flag to decide whether to use
+# baremetal_scheduler_default_filters or not. (boolean value)
+#scheduler_use_baremetal_filters=false
+
+
+#
+# Options defined in nova.scheduler.manager
+#
+
+# Default driver to use for the scheduler (string value)
+#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
+
+# How often (in seconds) to run periodic tasks in the
+# scheduler driver of your choice. Please note this is likely
+# to interact with the value of service_down_time, but exactly
+# how they interact will depend on your choice of scheduler
+# driver. (integer value)
+#scheduler_driver_task_period=60
+
+
+#
+# Options defined in nova.scheduler.rpcapi
+#
+
+# The topic scheduler nodes listen on (string value)
+#scheduler_topic=scheduler
+
+
+#
+# Options defined in nova.scheduler.scheduler_options
+#
+
+# Absolute path to scheduler configuration JSON file. (string
+# value)
+#scheduler_json_config_location=
+
+
+#
+# Options defined in nova.scheduler.utils
+#
+
+# Maximum number of attempts to schedule an instance (integer
+# value)
+#scheduler_max_attempts=3
+
+
+#
+# Options defined in nova.scheduler.weights.io_ops
+#
+
+# Multiplier used for weighing host io ops. Negative numbers
+# mean a preference to choose light workload compute hosts.
+# (floating point value)
+#io_ops_weight_multiplier=-1.0
+
+
+#
+# Options defined in nova.scheduler.weights.ram
+#
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=1.0
+
+
+#
+# Options defined in nova.servicegroup.api
+#
+
+# The driver for servicegroup service (valid options are: db,
+# zk, mc) (string value)
+#servicegroup_driver=db
+
+
+#
+# Options defined in nova.virt.configdrive
+#
+
+# Config drive format. One of iso9660 (default) or vfat
+# (string value)
+#config_drive_format=iso9660
+
+# Set to "always" to force injection to take place on a config
+# drive. NOTE: The "always" will be deprecated in the Liberty
+# release cycle. (string value)
+#force_config_drive=<None>
+
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#mkisofs_cmd=genisoimage
+
+
+#
+# Options defined in nova.virt.disk.api
+#
+
+# Name of the mkfs commands for ephemeral device. The format
+# is <os_type>=<mkfs command> (multi valued)
+#virt_mkfs=
+
+# Attempt to resize the filesystem by accessing the image over
+# a block device. This is done by the host and may not be
+# necessary if the image contains a recent version of cloud-
+# init. Possible mechanisms require the nbd driver (for qcow
+# and raw), or loop (for raw). (boolean value)
+#resize_fs_using_block_device=false
+
+
+#
+# Options defined in nova.virt.disk.mount.nbd
+#
+
+# Amount of time, in seconds, to wait for NBD device start up.
+# (integer value)
+#timeout_nbd=10
+
+
+#
+# Options defined in nova.virt.driver
+#
+
+# Driver to use for controlling virtualization. Options
+# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
+# fake.FakeDriver, baremetal.BareMetalDriver,
+# vmwareapi.VMwareVCDriver, hyperv.HyperVDriver (string value)
+#compute_driver=libvirt.LibvirtDriver
+
+# The default format an ephemeral_volume will be formatted
+# with on creation. (string value)
+#default_ephemeral_format=<None>
+
+# VM image preallocation mode: "none" => no storage
+# provisioning is done up front, "space" => storage is fully
+# allocated at instance start (string value)
+#preallocate_images=none
+
+# Whether to use cow images (boolean value)
+#use_cow_images=true
+
+# Fail instance boot if vif plugging fails (boolean value)
+#vif_plugging_is_fatal=true
+vif_plugging_is_fatal=True
+
+# Number of seconds to wait for neutron vif plugging events to
+# arrive before continuing or failing (see
+# vif_plugging_is_fatal). If this is set to zero and
+# vif_plugging_is_fatal is False, events should not be
+# expected to arrive at all. (integer value)
+#vif_plugging_timeout=300
+vif_plugging_timeout=300
+
+
+#
+# Options defined in nova.virt.firewall
+#
+
+# Firewall driver (defaults to hypervisor specific iptables
+# driver) (string value)
+#firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+
+# Whether to allow network traffic from same network (boolean
+# value)
+#allow_same_net_traffic=true
+
+
+#
+# Options defined in nova.virt.hardware
+#
+
+# Defines which pcpus that instance vcpus can use. For
+# example, "4-12,^8,15" (string value)
+#vcpu_pin_set=<None>
+
+
+#
+# Options defined in nova.virt.imagecache
+#
+
+# Number of seconds to wait between runs of the image cache
+# manager. Set to -1 to disable. Setting this to 0 will run at
+# the default rate. (integer value)
+#image_cache_manager_interval=2400
+
+# Where cached images are stored under $instances_path. This
+# is NOT the full path - just a folder name. For per-compute-
+# host cached images, set to _base_$my_ip (string value)
+#image_cache_subdirectory_name=_base
+
+# Should unused base images be removed? (boolean value)
+#remove_unused_base_images=true
+
+# Unused unresized base images younger than this will not be
+# removed (integer value)
+#remove_unused_original_minimum_age_seconds=86400
+
+
+#
+# Options defined in nova.virt.images
+#
+
+# Force backing images to raw format (boolean value)
+#force_raw_images=true
+
+
+#
+# Options defined in nova.virt.netutils
+#
+
+# Template file for injected network (string value)
+#injected_network_template=/usr/share/nova/interfaces.template
+
+
+#
+# Options defined in nova.vnc
+#
+
+# Location of VNC console proxy, in the form
+# "http://127.0.0.1:6080/vnc_auto.html" (string value)
+#novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html
+novncproxy_base_url=http://153.92.32.134:6080/vnc_auto.html
+
+# Location of nova xvp VNC console proxy, in the form
+# "http://127.0.0.1:6081/console" (string value)
+#xvpvncproxy_base_url=http://127.0.0.1:6081/console
+
+# IP address on which instance vncservers should listen
+# (string value)
+vncserver_listen=0.0.0.0
+
+# The address to which proxy clients (like nova-xvpvncproxy)
+# should connect (string value)
+vncserver_proxyclient_address=153.92.32.13
+
+# Enable VNC related features (boolean value)
+#vnc_enabled=true
+
+# Keymap for VNC (string value)
+#vnc_keymap=en-us
+
+
+#
+# Options defined in nova.vnc.xvp_proxy
+#
+
+# Port that the XCP VNC proxy should bind to (integer value)
+#xvpvncproxy_port=6081
+
+# Address that the XCP VNC proxy should bind to (string value)
+#xvpvncproxy_host=0.0.0.0
+
+
+#
+# Options defined in nova.volume
+#
+
+# The full class name of the volume API class to use (string
+# value)
+#volume_api_class=nova.volume.cinder.API
+volume_api_class=nova.volume.cinder.API
+
+
+#
+# Options defined in nova.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in nova.openstack.common.memorycache
+#
+
+# Memcached servers or None for in process cache. (list value)
+#memcached_servers=<None>
+
+
+#
+# Options defined in nova.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
+# Options defined in nova.openstack.common.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+# Directories where policy configuration files are stored.
+# They can be relative to any directory in the search path
+# defined by the config_dir option, or absolute paths. The
+# file defined by policy_file must exist for these directories
+# to be searched. Missing or empty directories are ignored.
+# (multi valued)
+#policy_dirs=policy.d
+
+
+#
+# Options defined in nova.openstack.common.versionutils
+#
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+amqp_durable_queues=False
+rabbit_hosts=192.168.0.10:5672
+rabbit_use_ssl=False
+rabbit_userid=guest
+rabbit_ha_queues=False
+rabbit_password=guest
+rabbit_host=192.168.0.10
+sql_connection=mysql://nova:5dc7786f29f7444a@192.168.0.10/nova
+rabbit_virtual_host=/
+image_service=nova.image.glance.GlanceImageService
+rabbit_port=5672
+lock_path=/var/lib/nova/tmp
+osapi_volume_listen=0.0.0.0
+
+
+[api_database]
+
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+
+# The SQLAlchemy connection string to use to connect to the
+# Nova API database. (string value)
+#connection=mysql://nova:nova@localhost/nova
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+#idle_timeout=3600
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+#max_pool_size=<None>
+
+# Maximum number of database connection retries during
+# startup. Set to -1 to specify an infinite retry count.
+# (integer value)
+#max_retries=-1
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+#pool_timeout=<None>
+
+
+[barbican]
+
+#
+# Options defined in nova.keymgr.barbican
+#
+
+# Info to match when looking for barbican in the service
+# catalog. Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#catalog_info=key-manager:barbican:public
+
+# Override service catalog lookup with template for barbican
+# endpoint e.g. http://localhost:9311/v1/%(project_id)s
+# (string value)
+#endpoint_template=<None>
+
+# Region name of this node (string value)
+#os_region_name=<None>
+
+
+#
+# Options defined in nova.volume.cinder
+#
+
+# Region name of this node (string value)
+#os_region_name=<None>
+
+
+[cells]
+
+#
+# Options defined in nova.cells.manager
+#
+
+# Cells communication driver to use (string value)
+#driver=nova.cells.rpc_driver.CellsRPCDriver
+
+# Number of seconds after an instance was updated or deleted
+# to continue to update cells (integer value)
+#instance_updated_at_threshold=3600
+
+# Number of instances to update per periodic task run (integer
+# value)
+#instance_update_num_instances=1
+
+
+#
+# Options defined in nova.cells.messaging
+#
+
+# Maximum number of hops for cells routing. (integer value)
+#max_hop_count=10
+
+# Cells scheduler to use (string value)
+#scheduler=nova.cells.scheduler.CellsScheduler
+
+
+#
+# Options defined in nova.cells.opts
+#
+
+# Enable cell functionality (boolean value)
+#enable=false
+
+# The topic cells nodes listen on (string value)
+#topic=cells
+
+# Manager for cells (string value)
+#manager=nova.cells.manager.CellsManager
+
+# Name of this cell (string value)
+#name=nova
+
+# Key/Multi-value list with the capabilities of the cell (list
+# value)
+#capabilities=hypervisor=xenserver;kvm,os=linux;windows
+
+# Seconds to wait for response from a call to a cell. (integer
+# value)
+#call_timeout=60
+
+# Percentage of cell capacity to hold in reserve. Affects both
+# memory and disk utilization (floating point value)
+#reserve_percent=10.0
+
+# Type of cell: api or compute (string value)
+#cell_type=compute
+
+# Number of seconds after which a lack of capability and
+# capacity updates signals the child cell is to be treated as
+# a mute. (integer value)
+#mute_child_interval=300
+
+# Seconds between bandwidth updates for cells. (integer value)
+#bandwidth_update_interval=600
+
+
+#
+# Options defined in nova.cells.rpc_driver
+#
+
+# Base queue name to use when communicating between cells.
+# Various topics by message type will be appended to this.
+# (string value)
+#rpc_driver_queue_base=cells.intercell
+
+
+#
+# Options defined in nova.cells.scheduler
+#
+
+# Filter classes the cells scheduler should use. An entry of
+# "nova.cells.filters.all_filters" maps to all cells filters
+# included with nova. (list value)
+#scheduler_filter_classes=nova.cells.filters.all_filters
+
+# Weigher classes the cells scheduler should use. An entry of
+# "nova.cells.weights.all_weighers" maps to all cell weighers
+# included with nova. (list value)
+#scheduler_weight_classes=nova.cells.weights.all_weighers
+
+# How many retries when no cells are available. (integer
+# value)
+#scheduler_retries=10
+
+# How often to retry in seconds when no cells are available.
+# (integer value)
+#scheduler_retry_delay=2
+
+
+#
+# Options defined in nova.cells.state
+#
+
+# Interval, in seconds, for getting fresh cell information
+# from the database. (integer value)
+#db_check_interval=60
+
+# Configuration file from which to read cells configuration.
+# If given, overrides reading cells from the database. (string
+# value)
+#cells_config=<None>
+
+
+#
+# Options defined in nova.cells.weights.mute_child
+#
+
+# Multiplier used to weigh mute children. (The value should be
+# negative.) (floating point value)
+#mute_weight_multiplier=-10.0
+
+# Weight value assigned to mute children. (The value should be
+# positive.) (floating point value)
+#mute_weight_value=1000.0
+
+
+#
+# Options defined in nova.cells.weights.ram_by_instance_type
+#
+
+# Multiplier used for weighing ram. Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=10.0
+
+
+#
+# Options defined in nova.cells.weights.weight_offset
+#
+
+# Multiplier used to weigh offset weigher. (floating point
+# value)
+#offset_weight_multiplier=1.0
+
+
+[cinder]
+
+#
+# Options defined in nova.volume.cinder
+#
+
+# Info to match when looking for cinder in the service
+# catalog. Format is: separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#catalog_info=volumev2:cinderv2:publicURL
+
+# Override service catalog lookup with template for cinder
+# endpoint e.g. http://localhost:8776/v1/%(project_id)s
+# (string value)
+#endpoint_template=<None>
+
+# Number of cinderclient retries on failed http calls (integer
+# value)
+#http_retries=3
+
+# Allow attach between instance and volume in different
+# availability zones. (boolean value)
+#cross_az_attach=true
+
+
+[conductor]
+
+#
+# Options defined in nova.conductor.api
+#
+
+# Perform nova-conductor operations locally (boolean value)
+#use_local=false
+
+# The topic on which conductor nodes listen (string value)
+#topic=conductor
+
+# Full class name for the Manager for conductor (string value)
+#manager=nova.conductor.manager.ConductorManager
+
+# Number of workers for OpenStack Conductor service. The
+# default will be the number of CPUs available. (integer
+# value)
+#workers=<None>
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group;name - DEFAULT;sqlite_db
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group;name - DEFAULT;sqlite_synchronous
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group;name - DEFAULT;db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group;name - DEFAULT;sql_connection
+# Deprecated group;name - [DATABASE]/sql_connection
+# Deprecated group;name - [sql]/connection
+#connection=<None>
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group;name - DEFAULT;sql_idle_timeout
+# Deprecated group;name - [DATABASE]/sql_idle_timeout
+# Deprecated group;name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group;name - DEFAULT;sql_min_pool_size
+# Deprecated group;name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group;name - DEFAULT;sql_max_pool_size
+# Deprecated group;name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group;name - DEFAULT;sql_max_retries
+# Deprecated group;name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group;name - DEFAULT;sql_retry_interval
+# Deprecated group;name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group;name - DEFAULT;sql_max_overflow
+# Deprecated group;name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Deprecated group;name - DEFAULT;sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group;name - DEFAULT;sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group;name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect=false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval=10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries=20
+
+
+#
+# Options defined in nova.db.sqlalchemy.api
+#
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+
+[ephemeral_storage_encryption]
+
+#
+# Options defined in nova.compute.api
+#
+
+# Whether to encrypt ephemeral storage (boolean value)
+#enabled=false
+
+# The cipher and mode to be used to encrypt ephemeral storage.
+# Which ciphers are available ciphers depends on kernel
+# support. See /proc/crypto for the list of available options.
+# (string value)
+#cipher=aes-xts-plain64
+
+# The bit length of the encryption key to be used to encrypt
+# ephemeral storage (in XTS mode only half of the bits are
+# used for encryption key) (integer value)
+#key_size=512
+
+
+[glance]
+
+#
+# Options defined in nova.image.glance
+#
+
+# Default glance hostname or IP address (string value)
+#host=$my_ip
+
+# Default glance port (integer value)
+#port=9292
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#protocol=http
+
+# A list of the glance api servers available to nova. Prefix
+# with https:// for ssl-based glance api servers.
+# ([hostname|ip]:port) (list value)
+#api_servers=<None>
+api_servers=192.168.0.10:9292
+
+# Allow to perform insecure SSL (https) requests to glance
+# (boolean value)
+#api_insecure=false
+
+# Number of retries when uploading / downloading an image to /
+# from glance. (integer value)
+#num_retries=0
+
+# A list of url scheme that can be downloaded directly via the
+# direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+
+[guestfs]
+
+#
+# Options defined in nova.virt.disk.vfs.guestfs
+#
+
+# Enable guestfs debug (boolean value)
+#debug=false
+
+
+[hyperv]
+
+#
+# Options defined in nova.virt.hyperv.pathutils
+#
+
+# The name of a Windows share name mapped to the
+# "instances_path" dir and used by the resize feature to copy
+# files to the target host. If left blank, an administrative
+# share will be used, looking for the same "instances_path"
+# used locally (string value)
+#instances_path_share=
+
+
+#
+# Options defined in nova.virt.hyperv.utilsfactory
+#
+
+# Force V1 WMI utility classes (boolean value)
+#force_hyperv_utils_v1=false
+
+# Force V1 volume utility class (boolean value)
+#force_volumeutils_v1=false
+
+
+#
+# Options defined in nova.virt.hyperv.vif
+#
+
+# External virtual switch Name, if not provided, the first
+# external virtual switch is used (string value)
+#vswitch_name=<None>
+
+
+#
+# Options defined in nova.virt.hyperv.vmops
+#
+
+# Required for live migration among hosts with different CPU
+# features (boolean value)
+#limit_cpu_features=false
+
+# Sets the admin password in the config drive image (boolean
+# value)
+#config_drive_inject_password=false
+
+# Path of qemu-img command which is used to convert between
+# different image types (string value)
+#qemu_img_cmd=qemu-img.exe
+
+# Attaches the Config Drive image as a cdrom drive instead of
+# a disk drive (boolean value)
+#config_drive_cdrom=false
+
+# Enables metrics collections for an instance by using
+# Hyper-V's metric APIs. Collected data can by retrieved by
+# other apps and services, e.g.: Ceilometer. Requires Hyper-V
+# / Windows Server 2012 and above (boolean value)
+#enable_instance_metrics_collection=false
+
+# Enables dynamic memory allocation (ballooning) when set to a
+# value greater than 1. The value expresses the ratio between
+# the total RAM assigned to an instance and its startup RAM
+# amount. For example a ratio of 2.0 for an instance with
+# 1024MB of RAM implies 512MB of RAM allocated at startup
+# (floating point value)
+#dynamic_memory_ratio=1.0
+
+# Number of seconds to wait for instance to shut down after
+# soft reboot request is made. We fall back to hard reboot if
+# instance does not shutdown within this window. (integer
+# value)
+#wait_soft_reboot_seconds=60
+
+
+#
+# Options defined in nova.virt.hyperv.volumeops
+#
+
+# The number of times to retry to attach a volume (integer
+# value)
+#volume_attach_retry_count=10
+
+# Interval between volume attachment attempts, in seconds
+# (integer value)
+#volume_attach_retry_interval=5
+
+# The number of times to retry checking for a disk mounted via
+# iSCSI. (integer value)
+#mounted_disk_query_retry_count=10
+
+# Interval between checks for a mounted iSCSI disk, in
+# seconds. (integer value)
+#mounted_disk_query_retry_interval=5
+
+
+[image_file_url]
+
+#
+# Options defined in nova.image.download.file
+#
+
+# List of file systems that are configured in this file in the
+# image_file_url:<list entry name> sections (list value)
+#filesystems=
+
+
+[ironic]
+
+#
+# Options defined in nova.virt.ironic.driver
+#
+
+# Version of Ironic API service endpoint. (integer value)
+#api_version=1
+
+# URL for Ironic API endpoint. (string value)
+#api_endpoint=<None>
+
+# Ironic keystone admin name (string value)
+#admin_username=<None>
+
+# Ironic keystone admin password. (string value)
+#admin_password=%SERVICE_PASSWORD%
+
+# Ironic keystone auth token. (string value)
+#admin_auth_token=<None>
+
+# Keystone public API endpoint. (string value)
+#admin_url=<None>
+
+# Log level override for ironicclient. Set this in order to
+# override the global "default_log_levels", "verbose", and
+# "debug" settings. DEPRECATED: use standard logging
+# configuration. (string value)
+#client_log_level=<None>
+
+# Ironic keystone tenant name. (string value)
+#admin_tenant_name=%SERVICE_TENANT_NAME%
+
+# How many retries when a request does conflict. (integer
+# value)
+#api_max_retries=60
+
+# How often to retry in seconds when a request does conflict
+# (integer value)
+#api_retry_interval=2
+
+
+[keymgr]
+
+#
+# Options defined in nova.keymgr
+#
+
+# The full class name of the key manager API class (string
+# value)
+#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager
+
+
+#
+# Options defined in nova.keymgr.conf_key_mgr
+#
+
+# Fixed key returned by key manager, specified in hex (string
+# value)
+#fixed_key=<None>
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+#auth_uri=<None>
+auth_uri=http://192.168.0.10:5000/
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version=v2.0
+
+# Do not handle authorization requests within the middleware, but delegate the
+# authorization decision to downstream WSGI components. (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API server. (integer
+# value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating with Identity
+# API Server. (integer value)
+#http_request_max_retries=3
+
+# Env key for the swift cache. (string value)
+#cache=<None>
+
+# Required if identity server requires client certificate (string value)
+#certfile=<None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens. (string value)
+#signing_dir=<None>
+
+# Optionally specify a list of memcached server(s) to use for caching. If left
+# undefined, tokens will instead be cached in-process. (list value)
+# Deprecated group;name - DEFAULT;memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive effort spent validating tokens, the middleware
+# caches previously-seen tokens for a configurable duration (in seconds). Set
+# to -1 to disable caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens is retrieved
+# from the Identity service (in seconds). A high number of revocation events
+# combined with a low cache duration may significantly reduce performance.
+# (integer value)
+#revocation_cache_time=10
+
+# (Optional) If defined, indicate whether token data should be authenticated or
+# authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC,
+# token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data
+# is encrypted and authenticated in the cache. If the value is not one of these
+# options or empty, auth_token will raise an exception on initialization.
+# (string value)
+#memcache_security_strategy=<None>
+
+# (Optional, mandatory if memcache_security_strategy is defined) This string is
+# used for key derivation. (string value)
+#memcache_secret_key=<None>
+
+# (Optional) Number of seconds memcached server is considered dead before it is
+# tried again. (integer value)
+#memcache_pool_dead_retry=300
+
+# (Optional) Maximum total number of open connections to every memcached
+# server. (integer value)
+#memcache_pool_maxsize=10
+
+# (Optional) Socket timeout in seconds for communicating with a memcache
+# server. (integer value)
+#memcache_pool_socket_timeout=3
+
+# (Optional) Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (integer value)
+#memcache_pool_unused_timeout=60
+
+# (Optional) Number of seconds that an operation will wait to get a memcache
+# client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout=10
+
+# (Optional) Use the advanced (eventlet safe) memcache client pool. The
+# advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool=false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
+# middleware will not ask for service catalog on token validation and will not
+# set the X-Service-Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be set to: "disabled"
+# to not check token binding. "permissive" (default) to validate binding
+# information if the bind type is of a form known to the server and ignore it
+# if not. "strict" like "permissive" but if the bind type is unknown the token
+# will be rejected. "required" any form of token binding is needed to be
+# allowed. Finally the name of a binding method that must be present in tokens.
+# (string value)
+#enforce_token_bind=permissive
+
+# If true, the revocation list will be checked for cached tokens. This requires
+# that PKI tokens are configured on the identity server. (boolean value)
+#check_revocations_for_cached=false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
+# or multiple. The algorithms are those supported by Python standard
+# hashlib.new(). The hashes will be tried in the order given, so put the
+# preferred one first for performance. The result of the first hash will be
+# stored in the cache. This will typically be set to multiple values only while
+# migrating from a less secure algorithm to a more secure one. Once all the old
+# tokens are expired this option should be set to a single value for better
+# performance. (list value)
+#hash_algorithms=md5
+
+# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
+# (string value)
+#auth_admin_prefix =
+
+# Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
+# (string value)
+#auth_host=127.0.0.1
+auth_host=192.168.0.10
+
+# Port of the admin Identity API endpoint. Deprecated, use identity_uri.
+# (integer value)
+#auth_port=35357
+auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https). Deprecated, use
+# identity_uri. (string value)
+#auth_protocol=http
+auth_protocol=http
+
+# Complete admin Identity API endpoint. This should specify the unversioned
+# root endpoint e.g. https://localhost:35357/ (string value)
+#identity_uri=<None>
+
+# This option is deprecated and may be removed in a future release. Single
+# shared secret with the Keystone configuration used for bootstrapping a
+# Keystone installation, or otherwise bypassing the normal authentication
+# process. This option should not be used, use `admin_user` and
+# `admin_password` instead. (string value)
+#admin_token=<None>
+
+# Service username. (string value)
+#admin_user=%SERVICE_USER%
+admin_user=nova
+
+# Service user password. (string value)
+#admin_password=<None>
+admin_password=ec0e7532149c4c1b
+
+# Service tenant name. (string value)
+#admin_tenant_name=admin
+admin_tenant_name=services
+
+
+[libvirt]
+
+#
+# Options defined in nova.virt.libvirt.driver
+#
+
+# Rescue ami image. This will not be used if an image id is
+# provided by the user. (string value)
+#rescue_image_id=<None>
+
+# Rescue aki image (string value)
+#rescue_kernel_id=<None>
+
+# Rescue ari image (string value)
+#rescue_ramdisk_id=<None>
+
+# Libvirt domain type (valid options are: kvm, lxc, qemu, uml,
+# xen and parallels) (string value)
+#virt_type=kvm
+
+# Override the default libvirt URI (which is dependent on
+# virt_type) (string value)
+#connection_uri=
+
+# Inject the admin password at boot time, without an agent.
+# (boolean value)
+#inject_password=false
+
+# Inject the ssh public key at boot time (boolean value)
+#inject_key=false
+
+# The partition to inject to : -2 => disable, -1 => inspect
+# (libguestfs only), 0 => not partitioned, >0 => partition
+# number (integer value)
+#inject_partition=-2
+
+# Sync virtual and real mouse cursors in Windows VMs (boolean
+# value)
+#use_usb_tablet=true
+
+# Migration target URI (any included "%s" is replaced with the
+# migration target hostname) (string value)
+#live_migration_uri=qemu+tcp://%s/system
+
+# Migration flags to be set for live migration (string value)
+#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED
+
+# Migration flags to be set for block migration (string value)
+#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_INC
+
+# Maximum bandwidth to be used during migration, in Mbps
+# (integer value)
+#live_migration_bandwidth=0
+
+# Snapshot image format (valid options are : raw, qcow2, vmdk,
+# vdi). Defaults to same as source image (string value)
+#snapshot_image_format=<None>
+
+# Override the default disk prefix for the devices attached to
+# a server, which is dependent on virt_type. (valid options
+# are: sd, xvd, uvd, vd) (string value)
+#disk_prefix=<None>
+
+# Number of seconds to wait for instance to shut down after
+# soft reboot request is made. We fall back to hard reboot if
+# instance does not shutdown within this window. (integer
+# value)
+#wait_soft_reboot_seconds=120
+
+# Set to "host-model" to clone the host CPU feature flags; to
+# "host-passthrough" to use the host CPU model exactly; to
+# "custom" to use a named CPU model; to "none" to not set any
+# CPU model. If virt_type="kvm|qemu", it will default to
+# "host-model", otherwise it will default to "none" (string
+# value)
+#cpu_mode=<None>
+
+# Set to a named libvirt CPU model (see names listed in
+# /usr/share/libvirt/cpu_map.xml). Only has effect if
+# cpu_mode="custom" and virt_type="kvm|qemu" (string value)
+#cpu_model=<None>
+
+# Location where libvirt driver will store snapshots before
+# uploading them to image service (string value)
+#snapshots_directory=$instances_path/snapshots
+
+# Location where the Xen hvmloader is kept (string value)
+#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader
+
+# Specific cachemodes to use for different disk types e.g:
+# file=directsync,block=none (list value)
+#disk_cachemodes=
+
+# A path to a device that will be used as source of entropy on
+# the host. Permitted options are: /dev/random or /dev/hwrng
+# (string value)
+#rng_dev_path=<None>
+
+# For qemu or KVM guests, set this option to specify a default
+# machine type per host architecture. You can find a list of
+# supported machine types in your environment by checking the
+# output of the "virsh capabilities"command. The format of the
+# value for this config option is host-arch=machine-type. For
+# example: x86_64=machinetype1,armv7l=machinetype2 (list
+# value)
+#hw_machine_type=<None>
+
+# The data source used to the populate the host "serial" UUID
+# exposed to guest in the virtual BIOS. Permitted options are
+# "hardware", "os", "none" or "auto" (default). (string value)
+#sysinfo_serial=auto
+
+# A number of seconds to memory usage statistics period. Zero
+# or negative value mean to disable memory usage statistics.
+# (integer value)
+#mem_stats_period_seconds=10
+
+# List of uid targets and ranges.Syntax is guest-uid:host-
+# uid:countMaximum of 5 allowed. (list value)
+#uid_maps=
+
+# List of guid targets and ranges.Syntax is guest-gid:host-
+# gid:countMaximum of 5 allowed. (list value)
+#gid_maps=
+
+
+#
+# Options defined in nova.virt.libvirt.imagebackend
+#
+
+# VM Images format. Acceptable values are: raw, qcow2, lvm,
+# rbd, default. If default is specified, then use_cow_images
+# flag is used instead of this one. (string value)
+#images_type=default
+
+# LVM Volume Group that is used for VM images, when you
+# specify images_type=lvm. (string value)
+#images_volume_group=<None>
+
+# Create sparse logical volumes (with virtualsize) if this
+# flag is set to True. (boolean value)
+#sparse_logical_volumes=false
+
+# The RADOS pool in which rbd volumes are stored (string
+# value)
+#images_rbd_pool=rbd
+
+# Path to the ceph configuration file to use (string value)
+#images_rbd_ceph_conf=
+
+# Discard option for nova managed disks (valid options are:
+# ignore, unmap). Need Libvirt(1.0.6) Qemu1.5 (raw format)
+# Qemu1.6(qcow2 format) (string value)
+#hw_disk_discard=<None>
+
+
+#
+# Options defined in nova.virt.libvirt.imagecache
+#
+
+# Allows image information files to be stored in non-standard
+# locations (string value)
+#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info
+
+# Should unused kernel images be removed? This is only safe to
+# enable if all compute nodes have been updated to support
+# this option. This will be enabled by default in future.
+# (boolean value)
+#remove_unused_kernels=false
+
+# Unused resized base images younger than this will not be
+# removed (integer value)
+#remove_unused_resized_minimum_age_seconds=3600
+
+# Write a checksum for files in _base to disk (boolean value)
+#checksum_base_images=false
+
+# How frequently to checksum base images (integer value)
+#checksum_interval_seconds=3600
+
+
+#
+# Options defined in nova.virt.libvirt.lvm
+#
+
+# Method used to wipe old volumes (valid options are: none,
+# zero, shred) (string value)
+#volume_clear=zero
+
+# Size in MiB to wipe at start of old volumes. 0 => all
+# (integer value)
+#volume_clear_size=0
+
+
+#
+# Options defined in nova.virt.libvirt.utils
+#
+
+# Compress snapshot images when possible. This currently
+# applies exclusively to qcow2 images (boolean value)
+#snapshot_compression=false
+
+
+#
+# Options defined in nova.virt.libvirt.vif
+#
+
+# Use virtio for bridge interfaces with KVM/QEMU (boolean
+# value)
+#use_virtio_for_bridges=true
+
+
+#
+# Options defined in nova.virt.libvirt.volume
+#
+
+# Number of times to rescan iSCSI target to find volume
+# (integer value)
+#num_iscsi_scan_tries=5
+
+# Number of times to rescan iSER target to find volume
+# (integer value)
+#num_iser_scan_tries=5
+
+# The RADOS client name for accessing rbd volumes (string
+# value)
+#rbd_user=<None>
+
+# The libvirt UUID of the secret for the rbd_uservolumes
+# (string value)
+#rbd_secret_uuid=<None>
+
+# Directory where the NFS volume is mounted on the compute
+# node (string value)
+#nfs_mount_point_base=$state_path/mnt
+
+# Mount options passed to the NFS client. See section of the
+# nfs man page for details (string value)
+#nfs_mount_options=<None>
+
+# Directory where the SMBFS shares are mounted on the compute
+# node (string value)
+#smbfs_mount_point_base=$state_path/mnt
+
+# Mount options passed to the SMBFS client. See mount.cifs man
+# page for details. Note that the libvirt-qemu uid and gid
+# must be specified. (string value)
+#smbfs_mount_options=
+
+# Number of times to rediscover AoE target to find volume
+# (integer value)
+#num_aoe_discover_tries=3
+
+# Directory where the glusterfs volume is mounted on the
+# compute node (string value)
+#glusterfs_mount_point_base=$state_path/mnt
+
+# Use multipath connection of the iSCSI volume (boolean value)
+#iscsi_use_multipath=false
+
+# Use multipath connection of the iSER volume (boolean value)
+#iser_use_multipath=false
+
+# Path or URL to Scality SOFS configuration file (string
+# value)
+#scality_sofs_config=<None>
+
+# Base dir where Scality SOFS shall be mounted (string value)
+#scality_sofs_mount_point=$state_path/scality
+
+# Protocols listed here will be accessed directly from QEMU.
+# Currently supported protocols: [gluster] (list value)
+#qemu_allowed_storage_drivers=
+
+# Directory where the Quobyte volume is mounted on the compute
+# node (string value)
+#quobyte_mount_point_base=$state_path/mnt
+
+# Path to a Quobyte Client configuration file. (string value)
+#quobyte_client_cfg=<None>
+
+# The iSCSI transport iface to use to connect to target in
+# case offload support is desired. Supported transports are
+# be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx and ocs. Default
+# format is transport_name.hwaddress and can be generated
+# manually or via iscsiadm -m iface (string value)
+# Deprecated group;name - DEFAULT;iscsi_transport
+#iscsi_iface=<None>
+vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
+
+
+[metrics]
+
+#
+# Options defined in nova.scheduler.weights.metrics
+#
+
+# Multiplier used for weighing metrics. (floating point value)
+#weight_multiplier=1.0
+
+# How the metrics are going to be weighed. This should be in
+# the form of "<name1>=<ratio1>, <name2>=<ratio2>, ...", where
+# <nameX> is one of the metrics to be weighed, and <ratioX> is
+# the corresponding ratio. So for "name1=1.0, name2=-1.0" The
+# final weight would be name1.value * 1.0 + name2.value *
+# -1.0. (list value)
+#weight_setting=
+
+# How to treat the unavailable metrics. When a metric is NOT
+# available for a host, if it is set to be True, it would
+# raise an exception, so it is recommended to use the
+# scheduler filter MetricFilter to filter out those hosts. If
+# it is set to be False, the unavailable metric would be
+# treated as a negative factor in weighing process, the
+# returned value would be set by the option
+# weight_of_unavailable. (boolean value)
+#required=true
+
+# The final weight value to be returned if required is set to
+# False and any one of the metrics set by weight_setting is
+# unavailable. (floating point value)
+#weight_of_unavailable=-10000.0
+
+
+[neutron]
+
+#
+# Options defined in nova.api.metadata.handler
+#
+
+# Set flag to indicate Neutron will proxy metadata requests
+# and resolve instance ids. (boolean value)
+#service_metadata_proxy=false
+service_metadata_proxy=True
+
+# Shared secret to validate proxies Neutron metadata requests
+# (string value)
+#metadata_proxy_shared_secret=
+metadata_proxy_shared_secret=4f67c8d5ca4e41b7
+
+
+#
+# Options defined in nova.network.neutronv2.api
+#
+
+# URL for connecting to neutron (string value)
+#url=http://127.0.0.1:9696
+url=http://192.168.0.10:9696
+
+# User id for connecting to neutron in admin context.
+# DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_user_id=<None>
+
+# Username for connecting to neutron in admin context
+# DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_username=<None>
+admin_username=neutron
+
+# Password for connecting to neutron in admin context
+# DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_password=<None>
+admin_password=b80e24e5e8fe4939
+
+# Tenant id for connecting to neutron in admin context
+# DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_tenant_id=<None>
+
+# Tenant name for connecting to neutron in admin context. This
+# option will be ignored if neutron_admin_tenant_id is set.
+# Note that with Keystone V3 tenant names are only unique
+# within a domain. DEPRECATED: specify an auth_plugin and
+# appropriate credentials instead. (string value)
+#admin_tenant_name=<None>
+admin_tenant_name=services
+
+# Region name for connecting to neutron in admin context
+# (string value)
+#region_name=<None>
+region_name=RegionOne
+
+# Authorization URL for connecting to neutron in admin
+# context. DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. (string value)
+#admin_auth_url=http://localhost:5000/v2.0
+admin_auth_url=http://192.168.0.10:35357/v2.0
+
+# Authorization strategy for connecting to neutron in admin
+# context. DEPRECATED: specify an auth_plugin and appropriate
+# credentials instead. If an auth_plugin is specified strategy
+# will be ignored. (string value)
+#auth_strategy=keystone
+auth_strategy=keystone
+
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#ovs_bridge=br-int
+ovs_bridge=br-int
+
+# Number of seconds before querying neutron for extensions
+# (integer value)
+#extension_sync_interval=600
+extension_sync_interval=600
+
+# DEPRECATED: Allow an instance to have multiple vNICs
+# attached to the same Neutron network. This option is
+# deprecated in the 2015.1 release and will be removed in the
+# 2015.2 release where the default behavior will be to always
+# allow multiple ports from the same network to be attached to
+# an instance. (boolean value)
+#allow_duplicate_networks=false
+url_timeout=30
+default_tenant_id=default
+
+
+[osapi_v3]
+
+#
+# Options defined in nova.api.openstack
+#
+
+# Whether the V3 API is enabled or not (boolean value)
+#enabled=false
+enabled=False
+
+# A list of v3 API extensions to never load. Specify the
+# extension aliases here. (list value)
+#extensions_blacklist=
+
+# If the list is not empty then a v3 API extension will only
+# be loaded if it exists in this list. Specify the extension
+# aliases here. (list value)
+#extensions_whitelist=
+
+
+[rdp]
+
+#
+# Options defined in nova.rdp
+#
+
+# Location of RDP html5 console proxy, in the form
+# "http://127.0.0.1:6083/" (string value)
+#html5_proxy_base_url=http://127.0.0.1:6083/
+
+# Enable RDP related features (boolean value)
+#enabled=false
+
+
+[serial_console]
+
+#
+# Options defined in nova.cmd.serialproxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#serialproxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#serialproxy_port=6083
+
+
+#
+# Options defined in nova.console.serial
+#
+
+# Enable serial console related features (boolean value)
+#enabled=false
+
+# Range of TCP ports to use for serial ports on compute hosts
+# (string value)
+#port_range=10000:20000
+
+# Location of serial console proxy. (string value)
+#base_url=ws://127.0.0.1:6083/
+
+# IP address on which instance serial console should listen
+# (string value)
+#listen=127.0.0.1
+
+# The address to which proxy clients (like nova-serialproxy)
+# should connect (string value)
+#proxyclient_address=127.0.0.1
+
+
+[spice]
+
+#
+# Options defined in nova.cmd.spicehtml5proxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#html5proxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#html5proxy_port=6082
+
+
+#
+# Options defined in nova.spice
+#
+
+# Location of spice HTML5 console proxy, in the form
+# "http://127.0.0.1:6082/spice_auto.html" (string value)
+#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html
+
+# IP address on which instance spice server should listen
+# (string value)
+#server_listen=127.0.0.1
+
+# The address to which proxy clients (like nova-
+# spicehtml5proxy) should connect (string value)
+#server_proxyclient_address=127.0.0.1
+
+# Enable spice related features (boolean value)
+#enabled=false
+
+# Enable spice guest agent support (boolean value)
+#agent_enabled=true
+
+# Keymap for spice (string value)
+#keymap=en-us
+
+
+[ssl]
+
+#
+# Options defined in nova.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients.
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely.
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely.
+# (string value)
+#key_file=<None>
+
+
+[trusted_computing]
+
+#
+# Options defined in nova.scheduler.filters.trusted_filter
+#
+
+# Attestation server HTTP (string value)
+#attestation_server=<None>
+
+# Attestation server Cert file for Identity verification
+# (string value)
+#attestation_server_ca_file=<None>
+
+# Attestation server port (string value)
+#attestation_port=8443
+
+# Attestation web API URL (string value)
+#attestation_api_url=/OpenAttestationWebServices/V1.0
+
+# Attestation authorization blob - must change (string value)
+#attestation_auth_blob=<None>
+
+# Attestation status cache valid period length (integer value)
+#attestation_auth_timeout=60
+
+# Disable SSL cert verification for Attestation service
+# (boolean value)
+#attestation_insecure_ssl=false
+
+
+[upgrade_levels]
+
+#
+# Options defined in nova.baserpc
+#
+
+# Set a version cap for messages sent to the base api in any
+# service (string value)
+#baseapi=<None>
+
+
+#
+# Options defined in nova.cells.rpc_driver
+#
+
+# Set a version cap for messages sent between cells services
+# (string value)
+#intercell=<None>
+
+
+#
+# Options defined in nova.cells.rpcapi
+#
+
+# Set a version cap for messages sent to local cells services
+# (string value)
+#cells=<None>
+
+
+#
+# Options defined in nova.cert.rpcapi
+#
+
+# Set a version cap for messages sent to cert services (string
+# value)
+#cert=<None>
+
+
+#
+# Options defined in nova.compute.rpcapi
+#
+
+# Set a version cap for messages sent to compute services. If
+# you plan to do a live upgrade from havana to icehouse, you
+# should set this option to "icehouse-compat" before beginning
+# the live upgrade procedure. (string value)
+#compute=<None>
+
+
+#
+# Options defined in nova.conductor.rpcapi
+#
+
+# Set a version cap for messages sent to conductor services
+# (string value)
+#conductor=<None>
+
+
+#
+# Options defined in nova.console.rpcapi
+#
+
+# Set a version cap for messages sent to console services
+# (string value)
+#console=<None>
+
+
+#
+# Options defined in nova.consoleauth.rpcapi
+#
+
+# Set a version cap for messages sent to consoleauth services
+# (string value)
+#consoleauth=<None>
+
+
+#
+# Options defined in nova.network.rpcapi
+#
+
+# Set a version cap for messages sent to network services
+# (string value)
+#network=<None>
+
+
+#
+# Options defined in nova.scheduler.rpcapi
+#
+
+# Set a version cap for messages sent to scheduler services
+# (string value)
+#scheduler=<None>
+
+
+[vmware]
+
+#
+# Options defined in nova.virt.vmwareapi.driver
+#
+
+# The PBM status. (boolean value)
+#pbm_enabled=false
+
+# PBM service WSDL file location URL. e.g.
+# file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this
+# will disable storage policy based placement of instances.
+# (string value)
+#pbm_wsdl_location=<None>
+
+# The PBM default policy. If pbm_wsdl_location is set and
+# there is no defined storage policy for the specific request
+# then this policy will be used. (string value)
+#pbm_default_policy=<None>
+
+# Hostname or IP address for connection to VMware VC host.
+# (string value)
+#host_ip=<None>
+
+# Port for connection to VMware VC host. (integer value)
+#host_port=443
+
+# Username for connection to VMware VC host. (string value)
+#host_username=<None>
+
+# Password for connection to VMware VC host. (string value)
+#host_password=<None>
+
+# Name of a VMware Cluster ComputeResource. (multi valued)
+#cluster_name=<None>
+
+# Regex to match the name of a datastore. (string value)
+#datastore_regex=<None>
+
+# The interval used for polling of remote tasks. (floating
+# point value)
+#task_poll_interval=0.5
+
+# The number of times we retry on failures, e.g., socket
+# error, etc. (integer value)
+#api_retry_count=10
+
+# VNC starting port (integer value)
+#vnc_port=5900
+
+# Total number of VNC ports (integer value)
+#vnc_port_total=10000
+
+# Whether to use linked clone (boolean value)
+#use_linked_clone=true
+
+# Optional VIM Service WSDL Location e.g
+# http://<server>/vimService.wsdl. Optional over-ride to
+# default location for bug work-arounds (string value)
+#wsdl_location=<None>
+
+
+#
+# Options defined in nova.virt.vmwareapi.vif
+#
+
+# Physical ethernet adapter name for vlan networking (string
+# value)
+#vlan_interface=vmnic0
+
+# Name of Integration Bridge (string value)
+#integration_bridge=br-int
+
+
+#
+# Options defined in nova.virt.vmwareapi.vim_util
+#
+
+# The maximum number of ObjectContent data objects that should
+# be returned in a single result. A positive value will cause
+# the operation to suspend the retrieval when the count of
+# objects reaches the specified maximum. The server may still
+# limit the count to something less than the configured value.
+# Any remaining objects may be retrieved with additional
+# requests. (integer value)
+#maximum_objects=100
+
+
+#
+# Options defined in nova.virt.vmwareapi.vmops
+#
+
+# The prefix for Where cached images are stored. This is NOT
+# the full path - just a folder prefix. This should only be
+# used when a datastore cache should be shared between compute
+# nodes. Note: this should only be used when the compute nodes
+# have a shared file system. (string value)
+#cache_prefix=<None>
+
+
+[workarounds]
+
+#
+# Options defined in nova.utils
+#
+
+# This option allows a fallback to sudo for performance
+# reasons. For example see
+# https://bugs.launchpad.net/nova/+bug/1415106 (boolean value)
+#disable_rootwrap=false
+
+# When using libvirt 1.2.2 fails live snapshots intermittently
+# under load. This config option provides mechanism to
+# disable livesnapshot while this is resolved. See
+# https://bugs.launchpad.net/nova/+bug/1334398 (boolean value)
+#disable_libvirt_livesnapshot=true
+
+# Whether to destroy instances on startup when we suspect they
+# have previously been evacuated. This can result in data loss
+# if undesired. See https://launchpad.net/bugs/1419785
+# (boolean value)
+#destroy_after_evacuate=true
+
+
+[xenserver]
+
+#
+# Options defined in nova.virt.xenapi.agent
+#
+
+# Number of seconds to wait for agent reply (integer value)
+#agent_timeout=30
+
+# Number of seconds to wait for agent to be fully operational
+# (integer value)
+#agent_version_timeout=300
+
+# Number of seconds to wait for agent reply to resetnetwork
+# request (integer value)
+#agent_resetnetwork_timeout=60
+
+# Specifies the path in which the XenAPI guest agent should be
+# located. If the agent is present, network configuration is
+# not injected into the image. Used if
+# compute_driver=xenapi.XenAPIDriver and flat_injected=True
+# (string value)
+#agent_path=usr/sbin/xe-update-networking
+
+# Disables the use of the XenAPI agent in any image regardless
+# of what image properties are present. (boolean value)
+#disable_agent=false
+
+# Determines if the XenAPI agent should be used when the image
+# used does not contain a hint to declare if the agent is
+# present or not. The hint is a glance property
+# "xenapi_use_agent" that has the value "True" or "False".
+# Note that waiting for the agent when it is not present will
+# significantly increase server boot times. (boolean value)
+#use_agent_default=false
+
+
+#
+# Options defined in nova.virt.xenapi.client.session
+#
+
+# Timeout in seconds for XenAPI login. (integer value)
+#login_timeout=10
+
+# Maximum number of concurrent XenAPI connections. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#connection_concurrent=5
+
+
+#
+# Options defined in nova.virt.xenapi.driver
+#
+
+# URL for connection to XenServer/Xen Cloud Platform. A
+# special value of unix://local can be used to connect to the
+# local unix socket. Required if
+# compute_driver=xenapi.XenAPIDriver (string value)
+#connection_url=<None>
+
+# Username for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#connection_username=root
+
+# Password for connection to XenServer/Xen Cloud Platform.
+# Used only if compute_driver=xenapi.XenAPIDriver (string
+# value)
+#connection_password=<None>
+
+# The interval used for polling of coalescing vhds. Used only
+# if compute_driver=xenapi.XenAPIDriver (floating point value)
+#vhd_coalesce_poll_interval=5.0
+
+# Ensure compute service is running on host XenAPI connects
+# to. (boolean value)
+#check_host=true
+
+# Max number of times to poll for VHD to coalesce. Used only
+# if compute_driver=xenapi.XenAPIDriver (integer value)
+#vhd_coalesce_max_attempts=20
+
+# Base path to the storage repository (string value)
+#sr_base_path=/var/run/sr-mount
+
+# The iSCSI Target Host (string value)
+#target_host=<None>
+
+# The iSCSI Target Port, default is port 3260 (string value)
+#target_port=3260
+
+# IQN Prefix (string value)
+#iqn_prefix=iqn.2010-10.org.openstack
+
+# Used to enable the remapping of VBD dev (Works around an
+# issue in Ubuntu Maverick) (boolean value)
+#remap_vbd_dev=false
+
+# Specify prefix to remap VBD dev to (ex. /dev/xvdb ->
+# /dev/sdb) (string value)
+#remap_vbd_dev_prefix=sd
+
+
+#
+# Options defined in nova.virt.xenapi.image.bittorrent
+#
+
+# Base URL for torrent files. (string value)
+#torrent_base_url=<None>
+
+# Probability that peer will become a seeder. (1.0 = 100%)
+# (floating point value)
+#torrent_seed_chance=1.0
+
+# Number of seconds after downloading an image via BitTorrent
+# that it should be seeded for other peers. (integer value)
+#torrent_seed_duration=3600
+
+# Cached torrent files not accessed within this number of
+# seconds can be reaped (integer value)
+#torrent_max_last_accessed=86400
+
+# Beginning of port range to listen on (integer value)
+#torrent_listen_port_start=6881
+
+# End of port range to listen on (integer value)
+#torrent_listen_port_end=6891
+
+# Number of seconds a download can remain at the same progress
+# percentage w/o being considered a stall (integer value)
+#torrent_download_stall_cutoff=600
+
+# Maximum number of seeder processes to run concurrently
+# within a given dom0. (-1 = no limit) (integer value)
+#torrent_max_seeder_processes_per_host=1
+
+
+#
+# Options defined in nova.virt.xenapi.pool
+#
+
+# To use for hosts with different CPUs (boolean value)
+#use_join_force=true
+
+
+#
+# Options defined in nova.virt.xenapi.vif
+#
+
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#ovs_integration_bridge=xapi1
+
+
+#
+# Options defined in nova.virt.xenapi.vm_utils
+#
+
+# Cache glance images locally. `all` will cache all images,
+# `some` will only cache images that have the image_property
+# `cache_in_nova=True`, and `none` turns off caching entirely
+# (string value)
+#cache_images=all
+
+# Compression level for images, e.g., 9 for gzip -9. Range is
+# 1-9, 9 being most compressed but most CPU intensive on dom0.
+# (integer value)
+#image_compression_level=<None>
+
+# Default OS type (string value)
+#default_os_type=linux
+
+# Time to wait for a block device to be created (integer
+# value)
+#block_device_creation_timeout=10
+
+# Maximum size in bytes of kernel or ramdisk images (integer
+# value)
+#max_kernel_ramdisk_size=16777216
+
+# Filter for finding the SR to be used to install guest
+# instances on. To use the Local Storage in default
+# XenServer/XCP installations set this flag to other-config
+# :i18n-key=local-storage. To select an SR with a different
+# matching criteria, you could set it to other-
+# config:my_favorite_sr=true. On the other hand, to fall back
+# on the Default SR, as displayed by XenCenter, set this flag
+# to: default-sr:true (string value)
+#sr_matching_filter=default-sr:true
+
+# Whether to use sparse_copy for copying data on a resize down
+# (False will use standard dd). This speeds up resizes down
+# considerably since large runs of zeros won't have to be
+# rsynced (boolean value)
+#sparse_copy=true
+
+# Maximum number of retries to unplug VBD. if <=0, should try
+# once and no retry (integer value)
+#num_vbd_unplug_retries=10
+
+# Whether or not to download images via Bit Torrent
+# (all|some|none). (string value)
+#torrent_images=none
+
+# Name of network to use for booting iPXE ISOs (string value)
+#ipxe_network_name=<None>
+
+# URL to the iPXE boot menu (string value)
+#ipxe_boot_menu_url=<None>
+
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#ipxe_mkisofs_cmd=mkisofs
+
+
+#
+# Options defined in nova.virt.xenapi.vmops
+#
+
+# Number of seconds to wait for instance to go to running
+# state (integer value)
+#running_timeout=60
+
+# The XenAPI VIF driver using XenServer Network APIs. (string
+# value)
+#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
+
+# Dom0 plugin driver used to handle image uploads. (string
+# value)
+#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore
+
+
+#
+# Options defined in nova.virt.xenapi.volume_utils
+#
+
+# Number of seconds to wait for an SR to settle if the VDI
+# does not exist when first introduced (integer value)
+#introduce_vdi_retry_wait=20
+
+
+[zookeeper]
+
+#
+# Options defined in nova.servicegroup.drivers.zk
+#
+
+# The ZooKeeper addresses for servicegroup service in the
+# format of host1:port,host2:port,host3:port (string value)
+#address=<None>
+
+# The recv_timeout parameter for the zk session (integer
+# value)
+#recv_timeout=4000
+
+# The prefix used in ZooKeeper to store ephemeral nodes
+# (string value)
+#sg_prefix=/servicegroups
+
+# Number of seconds to wait until retrying to join the session
+# (integer value)
+#sg_retry_interval=5
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# From oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group;name - DEFAULT;matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[oslo_concurrency]
+
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+# Deprecated group;name - DEFAULT;disable_process_locking
+#disable_process_locking=false
+
+# Directory to use for lock files. For security, the specified directory
+# should only be writable by the user running the processes that need locking.
+# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
+# a lock path must be set. (string value)
+# Deprecated group;name - DEFAULT;lock_path
+#lock_path=/var/lib/nova/tmp
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group;name - [amqp1]/server_request_prefix
+#server_request_prefix=exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group;name - [amqp1]/broadcast_prefix
+#broadcast_prefix=broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group;name - [amqp1]/group_request_prefix
+#group_request_prefix=unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group;name - [amqp1]/container_name
+#container_name=<None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group;name - [amqp1]/idle_timeout
+#idle_timeout=0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group;name - [amqp1]/trace
+#trace=false
+
+# CA certificate PEM file for verifing server certificate (string value)
+# Deprecated group;name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string value)
+# Deprecated group;name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string value)
+# Deprecated group;name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group;name - [amqp1]/ssl_key_password
+#ssl_key_password=<None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group;name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients=false
+
+
+[oslo_messaging_qpid]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group;name - DEFAULT;rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group;name - DEFAULT;amqp_auto_delete
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group;name - DEFAULT;rpc_conn_pool_size
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+# Deprecated group;name - DEFAULT;qpid_hostname
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+# Deprecated group;name - DEFAULT;qpid_port
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+# Deprecated group;name - DEFAULT;qpid_hosts
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+# Deprecated group;name - DEFAULT;qpid_username
+#qpid_username =
+
+# Password for Qpid connection. (string value)
+# Deprecated group;name - DEFAULT;qpid_password
+#qpid_password =
+
+# Space separated list of SASL mechanisms to use for auth. (string value)
+# Deprecated group;name - DEFAULT;qpid_sasl_mechanisms
+#qpid_sasl_mechanisms =
+
+# Seconds between connection keepalive heartbeats. (integer value)
+# Deprecated group;name - DEFAULT;qpid_heartbeat
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+# Deprecated group;name - DEFAULT;qpid_protocol
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+# Deprecated group;name - DEFAULT;qpid_tcp_nodelay
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer value)
+# Deprecated group;name - DEFAULT;qpid_receiver_capacity
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was originally used by
+# impl_qpid. Version 2 includes some backwards-incompatible changes that allow
+# broker federation to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break. (integer value)
+# Deprecated group;name - DEFAULT;qpid_topology_version
+#qpid_topology_version=1
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group;name - DEFAULT;rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group;name - DEFAULT;amqp_auto_delete
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group;name - DEFAULT;rpc_conn_pool_size
+#rpc_conn_pool_size=30
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group;name - DEFAULT;kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group;name - DEFAULT;kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group;name - DEFAULT;kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group;name - DEFAULT;kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+# Deprecated group;name - DEFAULT;kombu_reconnect_delay
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used. (string value)
+# Deprecated group;name - DEFAULT;rabbit_host
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used. (integer value)
+# Deprecated group;name - DEFAULT;rabbit_port
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group;name - DEFAULT;rabbit_hosts
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group;name - DEFAULT;rabbit_use_ssl
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group;name - DEFAULT;rabbit_userid
+#rabbit_userid=guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group;name - DEFAULT;rabbit_password
+#rabbit_password=guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group;name - DEFAULT;rabbit_login_method
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group;name - DEFAULT;rabbit_virtual_host
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+# Deprecated group;name - DEFAULT;rabbit_retry_backoff
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
+# count). (integer value)
+# Deprecated group;name - DEFAULT;rabbit_max_retries
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
+# must wipe the RabbitMQ database. (boolean value)
+# Deprecated group;name - DEFAULT;rabbit_ha_queues
+#rabbit_ha_queues=false
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). (integer value)
+#heartbeat_timeout_threshold=60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate=2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
+# Deprecated group;name - DEFAULT;fake_rabbit
+#fake_rabbit=false
openstack/ansible/files/nova-controller.conf.j2
@@ -0,0 +1,113 @@
+[DEFAULT]
+notification_driver =
+notification_topics=notifications
+rpc_backend=rabbit
+notify_api_faults=False
+state_path=/var/lib/nova
+report_interval=10
+enabled_apis=ec2,osapi_compute,metadata
+ec2_listen=0.0.0.0
+ec2_workers=4
+osapi_compute_listen=0.0.0.0
+osapi_compute_workers=4
+metadata_listen=0.0.0.0
+metadata_workers=4
+service_down_time=60
+rootwrap_config=/etc/nova/rootwrap.conf
+auth_strategy=keystone
+use_forwarded_for=False
+ssl_only=True
+cert=/etc/nova/nova.crt
+key=/etc/nova/nova.key
+novncproxy_host=0.0.0.0
+novncproxy_port=6080
+network_api_class=nova.network.neutronv2.api.API
+default_floating_pool=public
+force_snat_range=0.0.0.0/0
+metadata_host=192.168.0.10
+dhcp_domain=novalocal
+security_group_api=neutron
+debug=False
+verbose=True
+log_dir=/var/log/nova
+use_syslog=False
+cpu_allocation_ratio=16.0
+ram_allocation_ratio=1.5
+scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,CoreFilter
+vif_plugging_is_fatal=True
+vif_plugging_timeout=300
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+novncproxy_base_url=https://{{ public_ip.stdout }}:6080/vnc_auto.html
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address={{ public_ip.stdout }}
+volume_api_class=nova.volume.cinder.API
+amqp_durable_queues=False
+rabbit_hosts=192.168.0.10:5672
+rabbit_use_ssl=False
+rabbit_userid=guest
+rabbit_ha_queues=False
+rabbit_password=guest
+rabbit_host=192.168.0.10
+sql_connection=mysql://nova:5dc7786f29f7444a@192.168.0.10/nova
+rabbit_virtual_host=/
+image_service=nova.image.glance.GlanceImageService
+rabbit_port=5672
+lock_path=/var/lib/nova/tmp
+osapi_volume_listen=0.0.0.0
+[api_database]
+[barbican]
+[cells]
+[cinder]
+[conductor]
+[database]
+[ephemeral_storage_encryption]
+[glance]
+api_servers=192.168.0.10:9292
+[guestfs]
+[hyperv]
+[image_file_url]
+[ironic]
+[keymgr]
+[keystone_authtoken]
+auth_uri=http://192.168.0.10:5000/
+auth_host=192.168.0.10
+auth_port=35357
+auth_protocol=http
+admin_user=nova
+admin_password=ec0e7532149c4c1b
+admin_tenant_name=services
+[libvirt]
+vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
+[metrics]
+[neutron]
+service_metadata_proxy=True
+metadata_proxy_shared_secret=4f67c8d5ca4e41b7
+url=http://192.168.0.10:9696
+admin_username=neutron
+admin_password=b80e24e5e8fe4939
+admin_tenant_name=services
+region_name=RegionOne
+admin_auth_url=http://192.168.0.10:35357/v2.0
+auth_strategy=keystone
+ovs_bridge=br-int
+extension_sync_interval=600
+url_timeout=30
+default_tenant_id=default
+[osapi_v3]
+enabled=False
+[rdp]
+[serial_console]
+[spice]
+[ssl]
+[trusted_computing]
+[upgrade_levels]
+[vmware]
+[workarounds]
+[xenserver]
+[zookeeper]
+[matchmaker_redis]
+[matchmaker_ring]
+[oslo_concurrency]
+[oslo_messaging_amqp]
+[oslo_messaging_qpid]
+[oslo_messaging_rabbit]
openstack/ansible/files/ravelo_test.pub
@@ -0,0 +1,1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCv3sAeFXkQg8YpeDqDywDrxWwd23roS7k53r5i/+FW0FER+BnVtF1Ouj42rdx/D6SqbJhrPeiG6PJCwh+OE3XPXS9sjAbCwo5JufSgYLOkBK25SAkLgSfhrYMnoVtu83/PjlVfePY10IoyJVNYhPB8ERpmDBL2posdt5xCtjSzfA+jQnHujbulAzjsY+NHCL6cL24j2GlMlyTztrdq7UGAHqm9Nlw3tpei1k81Q5WpC7Bvl6hzfrzbRORdC+zOUcEwvtqhboTdiG4Q8kYjBRsibFOhg1NGRs9pLujoM0Sr842WtMV9qXlS7du13ZiFJyTZmkuLApYm5fqXvE1UwL4r
openstack/ansible/files/ssh_config
@@ -0,0 +1,21 @@
+Host neutron
+ Hostname neutron
+ User centos
+Host controller
+ Hostname controller
+ User centos
+Host compute1
+ Hostname compute1
+ User centos
+Host compute2
+ Hostname compute2
+ User centos
+Host storage1
+ Hostname storage1
+ User ceph
+Host storage2
+ Hostname storage2
+ User ceph
+Host storage3
+ Hostname storage3
+ User ceph
openstack/ansible/files/ssl_vhost.conf
@@ -0,0 +1,45 @@
+# ************************************
+# Vhost template in module puppetlabs-apache
+# Managed by Puppet
+# ************************************
+
+<VirtualHost *:443>
+ ServerName controller.localdomain
+
+ ## Vhost docroot
+ DocumentRoot "/var/www/"
+ ## Alias declarations for resources outside the DocumentRoot
+ Alias /dashboard/static "/usr/share/openstack-dashboard/static"
+
+ ## Directories, there should at least be a declaration for /var/www/
+
+ <Directory "/var/www/">
+ Options Indexes FollowSymLinks MultiViews
+ AllowOverride None
+ Require all granted
+ </Directory>
+
+ ## Logging
+ ErrorLog "/var/log/httpd/horizon_ssl_error.log"
+ ServerSignature Off
+ CustomLog "/var/log/httpd/horizon_ssl_access.log" combined
+
+ ## RedirectMatch rules
+ RedirectMatch permanent ^/$ /dashboard
+
+ ## Server aliases X
+ ServerAlias *
+ ServerAlias 192.168.0.10
+ ServerAlias controller.localdomain
+ ServerAlias localhost
+
+ ## SSL directives
+ SSLEngine on
+ SSLCertificateFile "/etc/pki/tls/certs/ssl_ps_server.crt"
+ SSLCertificateKeyFile "/etc/pki/tls/private/ssl_ps_server.key"
+ SSLCACertificatePath "/etc/pki/tls/certs"
+ SSLCACertificateFile "/etc/pki/tls/certs/ssl_ps_chain.crt"
+ WSGIDaemonProcess horizon-ssl group=apache processes=3 threads=10 user=apache
+ WSGIProcessGroup horizon-ssl
+ WSGIScriptAlias /dashboard "/usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi"
+</VirtualHost>
openstack/ansible/files/vhost.conf
@@ -0,0 +1,38 @@
+# ************************************
+# Vhost template in module puppetlabs-apache
+# Managed by Puppet
+# ************************************
+
+<VirtualHost *:80>
+ ServerName controller.localdomain
+
+ ## Vhost docroot
+ DocumentRoot "/var/www/"
+ ## Alias declarations for resources outside the DocumentRoot
+ Alias /dashboard/static "/usr/share/openstack-dashboard/static"
+
+ ## Directories, there should at least be a declaration for /var/www/
+
+ <Directory "/var/www/">
+ Options Indexes FollowSymLinks MultiViews
+ AllowOverride None
+ Require all granted
+ </Directory>
+
+ ## Logging
+ ErrorLog "/var/log/httpd/horizon_error.log"
+ ServerSignature Off
+ CustomLog "/var/log/httpd/horizon_access.log" combined
+
+ ## RedirectMatch rules
+ RedirectMatch permanent ^/$ /dashboard
+
+ ## Server aliases X
+ ServerAlias *
+ ServerAlias 192.168.0.10
+ ServerAlias controller.localdomain
+ ServerAlias localhost
+ WSGIDaemonProcess dashboard group=apache processes=3 threads=10 user=apache
+ WSGIProcessGroup dashboard
+ WSGIScriptAlias /dashboard "/usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi"
+</VirtualHost>
openstack/ansible/basic.yml
@@ -0,0 +1,99 @@
+---
+- hosts:
+ - controller
+ remote_user: centos
+ tasks:
+ - name: Generate SSH key (centos on controller)
+ user: name=centos generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa
+
+ - name: Collect SSH public key (centos on controller)
+ fetch: src=/home/centos/.ssh/id_rsa.pub dest=/tmp/controller_id_rsa.pub flat=yes
+
+ - name: Collect SSH private key (centos on controller)
+ fetch: src=/home/centos/.ssh/id_rsa dest=/tmp/controller_id_rsa flat=yes
+
+ - name: Record public IP
+ shell: curl http://ipecho.net/plain;
+ register: public_ip
+
+ - name: Deploy Horizon Vhost
+ copy: src=files/vhost.conf
+ dest=/etc/httpd/conf.d/15-horizon_vhost.conf
+
+ - name: Deploy Horizon Vhost
+ copy: src=files/ssl_vhost.conf
+ dest=/etc/httpd/conf.d/15-horizon_ssl_vhost.conf
+
+- hosts: openstack-lab
+ remote_user: root
+ tasks:
+ - name: Setup /etc/hosts
+ copy: src=files/hosts dest=/etc/hosts
+
+ - name: Place ssh config file
+ copy: src=files/ssh_config
+ dest={{ item.home }}/.ssh/config
+ owner={{ item.user }}
+ group={{ item.user }}
+ with_items:
+ - { home: '/root', user: 'root' }
+ - { home: '/home/centos', user: 'centos' }
+ - { home: '/home/ceph', user: 'ceph' }
+ ignore_errors: yes
+
+ - name: Place ssh key
+ copy: src=/tmp/controller_id_rsa
+ dest={{ item.home }}/.ssh/id_rsa
+ owner={{ item.user }}
+ group={{ item.user }}
+ mode=0400
+ with_items:
+ - { home: '/root', user: 'root' }
+ - { home: '/home/centos', user: 'centos' }
+ - { home: '/home/ceph', user: 'ceph' }
+ ignore_errors: yes
+
+ - name: Add authorized key
+ authorized_key:
+ user: "{{ item }}"
+ key: "{{ lookup('file', '/tmp/controller_id_rsa.pub') }}"
+ with_items:
+ - ceph
+ - centos
+ - root
+ ignore_errors: yes
+
+ - name: Add bashrc
+ template: src=files/bashrc.j2
+ dest={{ item.home }}/.bashrc
+ owner={{ item.user }}
+ group={{ item.user }}
+ with_items:
+ - { home: '/root', user: 'root' }
+ - { home: '/home/centos', user: 'centos' }
+ - { home: '/home/ceph', user: 'ceph' }
+ ignore_errors: yes
+
+ - name: Place known_hosts
+ copy: src=files/known_hosts
+ dest={{ item.home }}/.ssh/known_hosts
+ owner={{ item.user }}
+ group={{ item.user }}
+ with_items:
+ - { home: '/root', user: 'root' }
+ - { home: '/home/centos', user: 'centos' }
+ - { home: '/home/ceph', user: 'ceph' }
+ ignore_errors: yes
+
+ - name: tools
+ yum: name={{ item }} state=latest
+ update_cache=yes
+ with_items:
+ - vim
+ - nano
+ - htop
+ - screen
+ - git
+ - ansible
+ - yum-plugin-priorities
+ - bash-completion
openstack/ansible/ceph.yml
@@ -0,0 +1,39 @@
+---
+# Based on http://docs.ceph.com/docs/master/start/quick-start-preflight/
+
+- hosts:
+ - storage1
+ - storage2
+ - storage3
+ remote_user: root
+ vars:
+ ceph_release: hammer
+ distro: el7
+ tasks:
+
+ - name: Ceph Dependencies and ceph-deploy
+ yum: name={{ item }} state=latest
+ update_cache=yes
+ with_items:
+ - ntp
+ - ntpdate
+ - ntp-doc
+
+ - name: Make wheel group passwordless sudoers
+ lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL' validate='visudo -cf %s'"
+ - name: Fix requiretty issue in sudoers
+ lineinfile: "dest=/etc/sudoers state=present regexp='^Defaults.*requiretty' line='Defaults:ceph !requiretty' validate='visudo -cf %s'"
+
+ - name: Add ceph user
+ user: name=ceph groups=wheel append=yes
+
+ - name: Add authorized key
+ authorized_key:
+ user=ceph
+ key="{{ lookup('file', '/tmp/controller_id_rsa.pub') }}"
+
+ - name: Disable SELinux
+ selinux: state=disabled
+
+
+
openstack/ansible/novnc.yml
@@ -0,0 +1,40 @@
+---
+- hosts: controller
+ remote_user: root
+ vars:
+ tasks:
+ - name: Record public IP
+ shell: curl http://ipecho.net/plain;
+ register: public_ip
+
+ - name: Deploy Nova Compute Template
+ template: src=files/nova-controller.conf.j2 dest=/etc/nova/nova.conf
+
+ - name: Restart httpd
+ service: name=httpd state=restarted
+
+- hosts:
+ - compute1
+ - compute2
+ remote_user: root
+ vars:
+ controller_public_ip: "{{ hostvars['controller'].public_ip.stdout }}"
+ compute_ip: "{{ ansible_default_ipv4.address }}"
+ tasks:
+
+
+ - name: Deploy Nova Compute Template
+ template: src=files/nova-compute.conf.j2 dest=/etc/nova/nova.conf
+
+ - name: Restart Nova Compute Service
+ service: name={{ item }} state=restarted
+ with_items:
+ - openstack-nova-compute
+
+- hosts: controller
+ remote_user: root
+ tasks:
+ - name: Restart Nova Compute and VNC Proxy Service
+ service: name={{ item }} state=restarted
+ with_items:
+ - openstack-nova-novncproxy
openstack/devstack/lab-01/ansible/local.conf.j2
@@ -0,0 +1,35 @@
+[[local|localrc]]
+
+HOST_IP={{ host_ip }}
+
+SERVICE_TOKEN=DKS3MQMX72MSLQP231N
+ADMIN_PASSWORD=supersecret
+MYSQL_PASSWORD=radicallyrelational
+RABBIT_PASSWORD=rasciallyrabbit
+SERVICE_PASSWORD=supersecret
+
+FLAT_INTERFACE=eth0
+
+# private IPs
+FIXED_RANGE=10.0.0.0/16
+FIXED_NETWORK_SIZE=65534
+# public IPs
+FLOATING_RANGE=192.168.0.128/25
+
+NOVA_VNC_ENABLED=True
+NOVNCPROXY_URL="http://{{ public_ip.stdout }}:6080/vnc_auto.html"
+VNCSERVER_LISTEN={{ ansible_default_ipv4.address }}
+VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
+
+MULTI_HOST=True
+Q_PLUGIN=ml2
+ENABLE_TENANT_TUNNELS=True
+
+LOGFILE=$DEST/logs/stack.sh.log
+LOGDAYS=2
+
+SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
+SWIFT_REPLICAS=1
+SWIFT_DATA_DIR=$DEST/data
+
+enable_service tempest
openstack/devstack/lab-01/ansible/main.yml
@@ -0,0 +1,38 @@
+---
+- hosts: dev.controller
+ vars:
+ host_ip: "{{ ansible_default_ipv4.address }}"
+ tasks:
+ - name: Record public IP
+ shell: wget -qO- http://ipecho.net/plain ; echo
+ register: public_ip
+
+ - name: Install tools
+ apt: name={{item}} state=latest
+ sudo: yes
+ with_items:
+ - git
+ - htop
+ - vim
+
+ - name: Checkout DevStack
+ git:
+ repo: https://git.openstack.org/openstack-dev/devstack
+ dest: /home/ubuntu/devstack
+
+ - name: Setup local.conf
+ template:
+ src: local.conf.j2
+ dest: /home/ubuntu/devstack/local.conf
+
+ - name: UnStack (idempotency)
+ shell: ./unstack.sh chdir=/home/ubuntu/devstack
+ async: 120
+ poll: 5
+ ignore_errors: yes
+
+ - name: Install DevStack (async)
+ shell: ./stack.sh chdir=/home/ubuntu/devstack
+ async: 900
+ poll: 5
+
openstack/devstack/lab-01/img/horizon-dashboard.png
Binary file
openstack/devstack/lab-01/img/horizon-hypervisors.png
Binary file
openstack/devstack/lab-01/img/horizon-hypervisors2.png
Binary file
openstack/devstack/lab-01/img/horizon-login.png
Binary file
openstack/devstack/lab-01/.DS_Store
Binary file
openstack/devstack/lab-01/common-errors.md
@@ -0,0 +1,8 @@
+# Common Errors:
+
+
+### Install DevStack, `stack.sh` fails "Keystone did not start!"
+
+ This error is usally happends after stack.sh has been going, the reason keystone can't start up is likely because the HOST_IP parameter in local.conf is not correct, double check the address and make sure it matches the eth0 interface (`ip addr show dev eth0`)
+
+ **Solution:** fix the config typo and run `./unstack.sh` and re-run `./stack.sh`
openstack/devstack/lab-01/example-local.conf
@@ -0,0 +1,63 @@
+[[local|localrc]]
+
+# Passwords
+# ----------------
+
+SERVICE_TOKEN=DKS3MQMX72MSLQP231N
+ADMIN_PASSWORD=supersecret
+MYSQL_PASSWORD=radicallyrelational
+RABBIT_PASSWORD=rasciallyrabbit
+SERVICE_PASSWORD=supersecret
+
+# Customized Networing Configurations
+-------------------------------------
+FLAT_INTERFACE=eth0
+
+# private IPs
+FIXED_RANGE=10.0.0.0/16
+FIXED_NETWORK_SIZE=65534
+
+# public IPs
+FLOATING_RANGE=192.168.0.0/24
+
+
+MULTI_HOST=True
+
+# Logging
+# -------
+
+# By default ``stack.sh`` output only goes to the terminal where it runs. It can
+# be configured to additionally log to a file by setting ``LOGFILE`` to the full
+# path of the destination log file. A timestamp will be appended to the given name.
+
+# $DEST is the install location (default /opt/stack)
+LOGFILE=$DEST/logs/stack.sh.log
+
+# Old log files are automatically removed after 7 days to keep things neat. Change
+# the number of days by setting ``LOGDAYS``.
+LOGDAYS=2
+
+# Swift
+# -----
+
+# Swift is now used as the back-end for the S3-like object store. If Nova's
+# objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT
+# run if Swift is enabled. Setting the hash value is required and you will
+# be prompted for it if Swift is enabled so just set it to something already:
+SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
+
+# For development purposes the default of 3 replicas is usually not required.
+# Set this to 1 to save some resources:
+SWIFT_REPLICAS=1
+
+# The data for Swift is stored by default in (``$DEST/data/swift``),
+# or (``$DATA_DIR/swift``) if ``DATA_DIR`` has been set, and can be
+# moved by setting ``SWIFT_DATA_DIR``. The directory will be created
+# if it does not exist.
+SWIFT_DATA_DIR=$DEST/data
+
+# Tempest
+# -------
+
+# Install the tempest test suite
+enable_service tempest
openstack/devstack/lab-01/README.md
@@ -0,0 +1,92 @@
+# Lab 1 - DevStack Controller
+
+ Lab Objectives:
+
+ 0. Install a DevStack controller
+
+## Connect to your _controller_ instance:
+
+ 0. `chmod 400 student.pem`
+ 0. `ssh ubuntu@<CONTROLLER IP> -i student.pem`
+ 0. `sudo whoami`
+
+## Install DevStack:
+
+:red_circle: TODO: brief paragraph about packstack (i.e. that it is puppet based)
+
+ 0. `sudo apt-get install git`
+ 0. `git clone https://git.openstack.org/openstack-dev/devstack`
+ 0. `cd devstack`
+ 0. `cp samples/local.conf local.conf`
+ 0. Edit the sample 'local.conf` file to match the complete config provided below; additional configurations not providied in the sample are called out at the end. Read the comments provided in the sample config file. Be sure to replace the **<CONTROLLER PUBLIC IP>** with the appropriate IP address.
+
+ `nano local.conf` or `vim local.conf`
+
+ ``` shell
+[[local|localrc]]
+
+# tokens and passwords
+SERVICE_TOKEN=DKS3MQMX72MSLQP231N # an alphanumeric token
+ADMIN_PASSWORD=supersecret
+MYSQL_PASSWORD=radicallyrelational
+RABBIT_PASSWORD=rascallyrabbit
+SERVICE_PASSWORD=supersecret
+
+# $DEST is the install location (default /opt/stack)
+LOGFILE=$DEST/logs/stack.sh.log
+LOGDAYS=2
+
+SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
+SWIFT_REPLICAS=1
+SWIFT_DATA_DIR=$DEST/data
+enable_service tempest
+
+## -- Configs not provided in sample -- ##
+FLAT_INTERFACE=eth0
+FIXED_RANGE=10.0.0.0/16 # private IP range
+FIXED_NETWORK_SIZE=65534
+FLOATING_RANGE=192.168.0.0/24 # public IP range
+
+# Please replace x.x.x.x with the controller public IP
+NOVNCPROXY_URL="http://x.x.x.x:6080/vnc_auto.html"
+NOVA_VNC_ENABLED=True
+
+# Please replace x.x.x.x with the controller internal IP
+VNCSERVER_LISTEN=x.x.x.x
+VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
+
+MULTI_HOST=True
+## ----------------------------------- ##
+ ```
+
+ 0. Run stack.sh, this will take awhile, see [common errors](common-errors.md) if anything fails.
+
+ `./stack.sh 2>&1 | tee stack.log`
+
+ > :white_check_mark: **Additional Info**:
+ >
+ > * `stack.sh` is a very long, but well documented script. Check it out [here](http://docs.openstack.org/developer/devstack/stack.sh.html).
+ >
+ > * In order to capture the printed results of `stack.sh` we pipe it to a file
+ > * `2>&1` pipes stderr to stdout (allowing us to capture both)
+ > * `|` is a pipe, it sends values to the next command
+ > * `tee` is splits a pipe (like a "T) it sends printed results to a file `stack.log` in addition to printing to the screen
+ > * Logs are also available for `tail -f`-ing at:
+ > * `/opt/stack/logs/stack.sh.log`
+ > * `/opt/stack/logs/stack.sh.log.summary`
+
+ 0. Login to the OpenStack Horizon Web Interface by navigating your browser to the public IP address of your instance, explore the accessible pages and fill in the table of information
+
+ 
+
+ 
+
+ :red_circle: TODO items from interface
+
+ | Info to find | Value |
+ | -------------| ----- |
+ | Item 1 | |
+ | Item 2 | |
+
+
+#### [Next Lab](../lab-02)
openstack/devstack/lab-02/img/.DS_Store
Binary file
openstack/devstack/lab-02/img/horizon-active.png
Binary file
openstack/devstack/lab-02/img/horizon-console.png
Binary file
openstack/devstack/lab-02/img/horizon-console2.png
Binary file
openstack/devstack/lab-02/img/horizon-details.png
Binary file
openstack/devstack/lab-02/img/horizon-launch.png
Binary file
openstack/devstack/lab-02/img/horizon-spawn.png
Binary file
openstack/devstack/lab-02/.DS_Store
Binary file
openstack/devstack/lab-02/README.md
@@ -0,0 +1,41 @@
+# Lab 2 - DevStack Horizon Interface
+
+ Lab Objectives:
+
+ 0. Become familiar with the OpenStack Horizon Interface
+ 0. Launch an instance from the Horizon Interface
+
+## Add a User:
+
+ :red_circle: TODO, currently broken in DevStack, launching instance should change to be user logged in
+
+## Add a Project:
+
+ :red_circle: TODO, currently broken in DevStack
+
+## Launch an Instance:
+
+ 0. Open the Horizon interface (navigate your browser to the public IP address of the controller)
+ 0. Project > Compute > Instances > Launch Instance
+
+ 
+
+ 0. Set details and Launch
+
+ 
+ 
+
+ 0. Open console, login and interact with the newly launched instance
+
+ 
+ 
+ 
+
+
+ 0. Use `ssh` to access and interact with the newly launched instance
+
+ * Ensure you are currently ssh'ed into the controller instance
+ * If not follow steps from previous lab
+ * `ssh cirros@10.0.0.2` (password `cubswin:)`)
+
+#### [Next Lab](../lab-03)
openstack/devstack/lab-03/ansible/local.conf.j2
@@ -0,0 +1,45 @@
+[[local|localrc]]
+
+# new in compute node
+#--------------------
+HOST_IP={{ host_ip }}
+FIRST_HOST={{ first_ip }}
+FIRST_HOST_PUBLIC={{ first_public_ip }}
+
+SERVICE_HOST=$FIRST_HOST
+MYSQL_HOST=$FIRST_HOST
+RABBIT_HOST=$FIRST_HOST
+GLANCE_HOSTPORT=$FIRST_HOST:9292
+
+ENABLED_SERVICES=n-cpu,n-net,n-api,c-vol
+
+NOVA_VNC_ENABLED=True
+NOVNCPROXY_URL="http://$FIRST_HOST_PUBLIC:6080/vnc_auto.html"
+VNCSERVER_LISTEN=$HOST_IP
+VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
+
+DATABASE_TYPE=mysql
+
+# -------------------
+
+SERVICE_TOKEN=DKS3MQMX72MSLQP231N
+ADMIN_PASSWORD=supersecret
+MYSQL_PASSWORD=radicallyrelational
+RABBIT_PASSWORD=rasciallyrabbit
+SERVICE_PASSWORD=supersecret
+
+FLAT_INTERFACE=eth0
+
+# private IPs
+FIXED_RANGE=10.0.0.0/16
+FIXED_NETWORK_SIZE=65534
+# public IPs
+FLOATING_RANGE=192.168.0.128/25
+
+MULTI_HOST=True
+Q_PLUGIN=ml2
+ENABLE_TENANT_TUNNELS=True
+TENANT_TUNNEL_RANGE=50:100
+
+LOGFILE=$DEST/logs/stack.sh.log
+LOGDAYS=2
openstack/devstack/lab-03/ansible/main.yml
@@ -0,0 +1,44 @@
+---
+- hosts: dev.controller
+ tasks:
+ - name: Record public IP
+ shell: wget -qO- http://ipecho.net/plain ; echo
+ register: public_ip
+
+- hosts: dev.compute
+ vars:
+ host_ip: "{{ ansible_default_ipv4.address }}"
+ first_ip: "{{ hostvars['dev.controller'].ansible_default_ipv4.address }}"
+ first_public_ip: "{{ hostvars['dev.controller'].public_ip.stdout }}"
+ tasks:
+ - name: Install tools
+ apt: name={{item}} state=latest
+ sudo: yes
+ with_items:
+ - git
+ - htop
+ - vim
+
+ - name: Checkout DevStack
+ git:
+ repo: https://git.openstack.org/openstack-dev/devstack
+ dest: /home/ubuntu/devstack
+
+ - name: Setup local.conf
+ template:
+ src: local.conf.j2
+ dest: /home/ubuntu/devstack/local.conf
+
+ - name: UnStack (idempotency)
+ shell: ./unstack.sh chdir=/home/ubuntu/devstack
+ async: 120
+ poll: 5
+ ignore_errors: yes
+
+ - name: Install DevStack compute node (async)
+ shell: ./stack.sh chdir=/home/ubuntu/devstack
+ async: 900
+ poll: 5
+
+
+
openstack/devstack/lab-03/img/horizon-instances.png
Binary file
openstack/devstack/lab-03/img/horizon-ping.png
Binary file
openstack/devstack/lab-03/img/horizon-pong.png
Binary file
openstack/devstack/lab-03/img/horizon-twohyper.png
Binary file
openstack/devstack/lab-03/.DS_Store
Binary file
openstack/devstack/lab-03/example-local.conf
@@ -0,0 +1,38 @@
+[[local|localrc]]
+
+HOST_IP=W.X.Y.Z # The compute node's internal IP address
+FIRST_HOST=W.X.Y.Z # The controller's internal IP address
+FIRST_HOST_PUBLIC=A.B.C.D # The controller's public IP address
+
+SERVICE_HOST=$FIRST_HOST
+MYSQL_HOST=$FIRST_HOST
+RABBIT_HOST=$FIRST_HOST
+GLANCE_HOSTPORT=$FIRST_HOST:9292
+
+ENABLED_SERVICES=n-cpu,n-net,n-api,c-vol
+
+NOVA_VNC_ENABLED=True
+NOVNCPROXY_URL="http://$FIRST_HOST_PUBLIC:6080/vnc_auto.html"
+VNCSERVER_LISTEN=$HOST_IP
+VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
+
+DATABASE_TYPE=mysql
+
+SERVICE_TOKEN=DKS3MQMX72MSLQP231N
+ADMIN_PASSWORD=supersecret
+MYSQL_PASSWORD=radicallyrelational
+RABBIT_PASSWORD=rasciallyrabbit
+SERVICE_PASSWORD=supersecret
+
+FLAT_INTERFACE=eth0
+
+# private IPs
+FIXED_RANGE=10.0.0.0/16
+FIXED_NETWORK_SIZE=65534
+# public IPs
+FLOATING_RANGE=192.168.0.0/24
+
+MULTI_HOST=True
+
+LOGFILE=$DEST/logs/stack.sh.log
+LOGDAYS=2
openstack/devstack/lab-03/README.md
@@ -0,0 +1,90 @@
+# Lab 3 - DevStack Compute
+
+ Lab Objectives:
+
+ 0. Install and link a DevStack Compute node
+
+## Connect to your _compute_ instance:
+ 0. `ssh ubuntu@<COMPUTE IP> -i student.pem`
+ 0. `sudo whoami`
+
+## Install DevStack:
+ 0. `sudo apt-get install git`
+ 0. `git clone https://git.openstack.org/openstack-dev/devstack`
+ 0. `cd devstack`
+ 0. `cp samples/local.conf local.conf`
+ 0. Edit the 'local.conf` file. The sample config file will have useful comments which are not replicated below.
+
+
+ `nano local.conf` or `vim local.conf`
+ ``` shell
+[[local|localrc]]
+
+# Please replace x.x.x.x with the compute node's internal IP address
+HOST_IP=x.x.x.x
+# Please replace x.x.x.x with the controller's internal IP address
+FIRST_HOST=x.x.x.x
+# Please replace x.x.x.x with the controller's public IP address
+FIRST_HOST_PUBLIC=x.x.x.x
+
+SERVICE_HOST=$FIRST_HOST
+MYSQL_HOST=$FIRST_HOST
+RABBIT_HOST=$FIRST_HOST
+GLANCE_HOSTPORT=$FIRST_HOST:9292
+
+ENABLED_SERVICES=n-cpu,n-net,n-api,c-vol
+
+NOVA_VNC_ENABLED=True
+NOVNCPROXY_URL="http://$FIRST_HOST_PUBLIC:6080/vnc_auto.html"
+VNCSERVER_LISTEN=$HOST_IP
+VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
+
+DATABASE_TYPE=mysql
+
+# tokens and passwords
+SERVICE_TOKEN=DKS3MQMX72MSLQP231N # an alphanumeric token
+ADMIN_PASSWORD=supersecret
+MYSQL_PASSWORD=radicallyrelational
+RABBIT_PASSWORD=rasciallyrabbit
+SERVICE_PASSWORD=supersecret
+
+FLAT_INTERFACE=eth0
+
+# private IPs
+FIXED_RANGE=10.0.0.0/16
+FIXED_NETWORK_SIZE=65534
+# public IPs
+FLOATING_RANGE=192.168.0.0/24
+
+MULTI_HOST=True
+
+LOGFILE=$DEST/logs/stack.sh.log
+LOGDAYS=2
+```
+
+ * Be sure to remove config lines from the sample config about Swift and Tempest
+
+ 0. Run `ip addr show`
+ 0. Run stack.sh, this will take a little bit less time that Lab 1
+
+ `./stack.sh 2>&1 | tee stack.log`
+
+ > :white_check_mark: **Additional Info**:
+ >
+ > * `stack.sh`-ing a compute node should take much less time than our previous full devstack install
+
+ 0. Run `ip addr show` again and notice the added interfaces and bridges
+ 0. Login to the OpenStack Horizon Web Interface by navigating your browser to the public IP address of the controller
+ 0. Admin > System > Hypervisors and marvel at the number of Hypervisors
+
+ 
+
+ 0. Launch another Instance and ping between the two currently running instances
+
+ Project > Compute > Instances > Launch Instance
+
+ 
+ 
+ 
+
+#### [Next Lab](../lab-04)
openstack/devstack/lab-04/img/floating-add.png
Binary file
openstack/devstack/lab-04/img/floating-add2.png
Binary file
openstack/devstack/lab-04/img/security-add.png
Binary file
openstack/devstack/lab-04/img/security-associate.png
Binary file
openstack/devstack/lab-04/img/security-associate2.png
Binary file
openstack/devstack/lab-04/img/security-create.png
Binary file
openstack/devstack/lab-04/img/security-create2.png
Binary file
openstack/devstack/lab-04/img/security-manage-rule.png
Binary file
openstack/devstack/lab-04/img/security-rules.png
Binary file
openstack/devstack/lab-04/img/security-ssh.png
Binary file
openstack/devstack/lab-04/.DS_Store
Binary file
openstack/devstack/lab-04/README.md
@@ -0,0 +1,56 @@
+# Lab 4 - DevStack Security Groups & Floating IP
+
+ Lab Objectives:
+
+ 0. Become familiar with adding a Floating IP Address
+ 0. Become familiar with adding Security Groups
+
+## Create and apply a Floating IP address
+
+ 0. Navigate to: Project > Compute > Instances
+ 0. Add a floating IP address to a running instance
+
+ 
+ 
+
+ 0. From the hypervisor that the instance is running inside ssh into the node using the Floating IP address
+
+ :red_circle: TODO: get a screen shot of this
+ 
+
+## Create and apply a Security Group
+
+ 0. Login to the OpenStack Horizon Web Interface by navigating your browser to the public IP address of the controller
+ 0. :red_circle: TODO: login as user?
+ 0. Navigate to: Project > Compute > Access > Create Security Group
+
+ 
+ 
+
+ 0. Manage the new group and add an SSH rule
+
+ 
+ 
+ 
+
+ 0. Create another rule for HTTPS, the resulting Security Group should look like this:
+
+ 
+
+ 0. Add the security group to a running instance
+
+
+ 
+ 
+
+ Click the `+` on the Basic line and then Save
+
+## Rinse && Repeat
+
+ 0. Associate a Floating IP and Security Group with your other instance
+ 0. Log in to both and test connectivity between both Floating IP addresses for your other instance
+
+ :red_circle: TODO: get a screenshot of this
+ 
+
+#### [Next Lab](../lab-05)
openstack/devstack/lab-05/img/nova-boot.png
Binary file
openstack/devstack/lab-05/img/nova-float.png
Binary file
openstack/devstack/lab-05/img/nova-list.png
Binary file
openstack/devstack/lab-05/img/nova-list2.png
Binary file
openstack/devstack/lab-05/img/os-catalog.png
Binary file
openstack/devstack/lab-05/img/os-flavor.png
Binary file
openstack/devstack/lab-05/img/os-host.png
Binary file
openstack/devstack/lab-05/img/os-hyperv.png
Binary file
openstack/devstack/lab-05/img/os-image.png
Binary file
openstack/devstack/lab-05/img/os-project.png
Binary file
openstack/devstack/lab-05/img/os-projects.png
Binary file
openstack/devstack/lab-05/img/os-user.png
Binary file
openstack/devstack/lab-05/img/os-user2.png
Binary file
openstack/devstack/lab-05/.DS_Store
Binary file
openstack/devstack/lab-05/README.md
@@ -0,0 +1,83 @@
+# Lab 5 - DevStack CLI
+
+ Lab Objectives:
+
+ 0. The objective of this lab is to demonstrate that any mouseclicks made in the Openstack GUI may also be issued from the OpenStack CLI. We will accomplish this goal by performing the same tasks we just performed in the OpenStack GUI, but instead After this lab, you should feel familiar with launching OpenStack command line utilities.
+
+## Connect to controller CLI:
+
+ 0. Connect (ssh) into the controller
+ 0. `cd ~/devstack`
+ 0. `source openrc admin`
+ 0. `openstack` (starts openstack promt)
+
+## List all the things!
+
+ 0. `endpoint list` - service endpoints and their ID's
+
+ :red_circle: TODO: DevStack Broken? get screen shot
+ 
+
+ 0. `hypervisor list` - compute hypervisors
+
+ 
+
+ 0. `host list` - openstack services and the host they are running on
+
+ 
+
+ 0. `image list` - glance OS images (from basic stack.sh install)
+
+ 
+
+ 0. `flavor list` - instance flavors (resources)
+
+ 
+
+ 0. `user list` - user IDs
+
+ 
+
+ 0. `project list` - projects
+
+ 
+
+ 0. `catalog list` - API endponts
+
+ 
+
+ 0. `help` - all commands
+
+## Create a Project and User:
+
+ 0. `project create Alta3Project`
+
+ 
+
+ 0. `user create --email ubuntu@localhost --project Alta3Project --password supersecret student1`
+
+ 
+
+ 0. `exit`
+
+## Boot an Instance and assign a Floating IP:
+
+ 0. `nova boot --flavor m1.tiny --image cirros-0.3.4-x86_64-uec cliboot`
+
+ 
+
+ 0. `nova list`
+ :red_circle: TODO: this image should be updated to reflect the 3 instances that would now be running, not 2
+
+ 
+
+ 0. `nova floating-ip-create`
+ 0. `nova floating-ip-associate cliboot x.x.x.x cliboot` (replace x.x.x.x with the floating IP created in the above step)
+
+ 
+ 
+
+ 0. Now log back into the DevStack GUI (accessed by navigating to the IP address of your controller on your webbrowser) and make sure that the new instance you populated at the OpenStackCLI (cliboot) is displayed, along with its newly assocaited floating IP address.
+
+#### [Next Lab](../lab-06)
+
openstack/devstack/lab-06/README.md
@@ -0,0 +1,10 @@
+# Lab 6 - Decommissioning and Removing Compute Resources
+
+ Lab Objectives:
+
+ 0. Become familiar with disabling a compute node
+ 0. Become familiar with removing a compute node
+
+
+ * :red_circle: TODO evaluate if this is appropriate for the level of lab complexity
+ * :red_circle: TODO Write this
openstack/devstack/.DS_Store
Binary file
openstack/devstack/instructor-README.md
@@ -0,0 +1,8 @@
+# DevStack Lab Implementation Notes
+
+Notes for implementation:
+* aws vm's need to in a security group that allows a crap-ton of ports, opening all of them for now
+* devstack install fails on t2.micro ubuntu. :( t2.medium worked
+* devstack install takes about 15 minutes, plan appropriately
+* Anytime ./stack.sh fails we should document the error and solution in [common errors](common-errors.md), most of my time spent on this lab was mis-configuring local.conf and seeing what errors show up
+* VPC == 172.30.1.0/24, needs changed in local.conf's if not correct
openstack/devstack/lab-hosts
@@ -0,0 +1,3 @@
+[devstack]
+dev.controller ansible_ssh_host=52.20.62.42 ansible_ssh_private_key_file=~/openstack-labs/student.pem ansible_ssh_user=ubuntu
+dev.compute ansible_ssh_host=52.0.202.229 ansible_ssh_private_key_file=~/openstack-labs/student.pem ansible_ssh_user=ubuntu
openstack/devstack/lab-prep.yml
@@ -0,0 +1,6 @@
+---
+- hosts: devstack
+ tasks:
+ - name: Update & Upgrade
+ apt: update_cache=yes upgrade=yes
+ sudo: yes
openstack/devstack/README.md
@@ -0,0 +1,37 @@
+# DevStack Lab
+
+:red_circle: TODO A well thoughout over-view paragraph about what this lab is about should go here.
+
+Lab Objectives:
+
+ 0. Install a DevStack controller
+ 0. Become familiar with the OpenStack Horizon Interface
+ 0. Launch an instance from the Horizon Interface
+ 0. Install a DevStack compute node
+ 0. Become familiar with adding a Floating IP Address
+ 0. Become familiar with adding Security Groups
+ 0. Become familiar with the openstack command line utilities
+
+## Lab 0 - Document Lab IP addresses
+
+Your instructor will provide two public IP addresses for this lab.
+
+ | **Controller** | **Compute** |
+-------- | ------------- | ----------- |
+**Public** | W.X.Y.Z | W.X.Y.Z |
+**Internal** | A.B.C.D | A.B.C.D |
+
+0. Edit this README.md file and add the ip addresses to your forked repository.
+ * You can make these changes in the github webpage or from your checkedout version.
+ * Make sure your changes are committed and pushed to github.com and then refresh the page.
+
+0. Record your instance's internal IP address in the table
+
+ * `ssh ubuntu@<CONTROLLER IP> -i student.pem`
+ * `ip addr show dev eth0`
+
+0. Set the hostname on each to help with command line differentiation
+
+ * `sudo hostname <controller or compute>`
+ * `bash` to show the result
+
openstack/lab-00/img/dstat.png
Binary file
openstack/lab-00/img/github-create.png
Binary file
openstack/lab-00/img/github-fork1.png
Binary file
openstack/lab-00/img/github-fork2.png
Binary file
openstack/lab-00/img/github-fork3.png
Binary file
openstack/lab-00/img/github-star.png
Binary file
openstack/lab-00/.DS_Store
Binary file
openstack/lab-00/instructor-README.md
@@ -0,0 +1,32 @@
+# Instructor Setup
+
+ Each lab will have an instructor-README.md like this one.
+ This is wheres spcific infrastructure requirements and pre-lab setup will be explained.
+ (i.e. what type of hosts and security groups need launched).
+
+## EC2
+
+ All of the labs in this repository rely on a shared private key that is commited to this repository.
+ This is INSECURE and is done intentionally for the convienences of a lab environment.
+ It is definitely worth mentioning to students that they should never do this in 'the real world'
+ The below steps explain the process of updating the private key for each new course offering.
+
+ 0. Create a shared ssh private key
+ 0. :red_circle: TODO, document this
+
+## Ansible
+
+ As an instructor it is super useful to have ansible installed and ready to use.
+ Every lab should contain a /ansible directory with a playbook that automates the student portion of the lab.
+ This is super useful if you need to get the student's environment into a known state.
+ A great example would be a student who joins late and needs previous labs completed in order to continue with the class.
+ Ansible is used to automate and test lab environments. Think of it as a robo-student. You will need to install and configure a few files in order for ansible to be usable within the lab environment.
+
+ 0. [Install Ansible](http://docs.ansible.com/ansible/intro_installation.html) on your system
+
+## Connectivity Testing
+
+ Each lab will have a `lab-hosts` file. Once the public IP addresses are set in this file you can test connectivity:
+
+ `ansible -i lab-hosts all -m ping -vvvv`
+
openstack/lab-00/README.md
@@ -0,0 +1,321 @@
+# Lab 0 - Lab Environment Setup
+
+ Lab Objectives:
+
+ 0. Setup your own git fork of these labs
+ 0. Document Public IP and EC2 Internal IP addresses of lab instances
+ 0. Checkout (clone) this repository into your Jumper instance
+
+
+## Setup Github Lab Repository
+
+ 0. In a new tab, [Create a Github account](https://github.com/join) or [Login](https://github.com/login)
+
+ 
+
+ 0. Return to this tab, refresh, you are now logged in.
+
+ 0. Star and this lab's repository
+
+ If you aren't already here it is located at https://github.com/alta3/openstack-labs
+
+ 
+
+ > :white_check_mark: **Additional Info**:
+ >
+ > Staring is like a bookmark on github.com, you can view and search your stared repositories at [github.com/stars](github.com/stars)
+
+ 0. Fork the lab repository into your account
+
+ 
+
+ 
+
+ 
+
+ From this point on you can edit and commit changes to your own copy
+ of the lab repository. You can do this from right inside of github
+ (click edit on any file)
+
+## Lab environment
+
+ This lab will utilize three hosts. Below is a brief description of each host and
+ what it will be used for
+
+ **Jumper**:
+
+ A jumper is a simple linux machine you will use as a launching point into the lab environment.
+ We will ssh into this host in order to connect to all other instances in the lab environments. This machine will retain
+ a copy of student.pem, which serves as an authentication key, for all of the machines you will admin.
+
+ :red_circle: TODO include student.pem in jumper
+
+ **Controller**:
+
+ :red_circle: TODO explain Controller
+
+ **Compute**:
+
+ :red_circle: TODO explain Compute
+
+## Connect to Lab Instances
+
+
+ **Lab Instance Detials**
+
+| Attribute | Jumper | Controller | Compute |
+| ------------------ | -------- | ---------- | --------- |
+| Size | t2.micro | t2.medium | t2.medium |
+| OS image | CentOS 7 | CentOS 7 | CentOS 7 |
+| Public IP address | x.x.x.x | x.x.x.x | x.x.x.x |
+| Private IP address | x.x.x.x | x.x.x.x | x.x.x.x |
+
+ 0. Your instructor will provide you three public IP addresses.
+ Edit this README.md file and add the public IP addresses for the appropriate hosts.
+
+ Alternatively (and perhaps additionally), also record all of this information on a piece of paper.
+
+ :red_circle: TODO Pictures of editing a github file
+
+ 0. Record your instance's internal IP address in the table
+
+ * `ssh centos@<Public IP Address of JUMPER> -i student.pem`
+
+ If using PuTTy, login as 'centos' and use the student.ppk keyfile (takes place of a password)
+
+ * `ip addr show dev eth0`
+
+ Record the displayed IPv4 address, not the IPv6 address. When you record this address, be sure to note that it is jumper so you don't get confused. In the following example, your IPv4 address would occupy the location of x.x.x.x
+
+ ```
+ $ ip addr show dev eth0
+
+ 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 9001 qdisc pfifo_fast state UP qlen 1000
+ link/ether 0a:6f:52:8c:f0:ef brd ff:ff:ff:ff:ff:ff
+ inet x.x.x.x/24 brd 192.168.0.255 scope global dynamic eth0
+ valid_lft 2829sec preferred_lft 2829sec
+ inet6 fe80::86f:52ff:fe8c:f0ef/64 scope link
+ valid_lft forever preferred_lft forever
+ ```
+
+ 0. Set the hostname on 'jumper' to help with command line differentiation
+
+ * `sudo yum install -y vim nano`
+
+ These hosts are super bare-bones. This command installs nano, a common (user friendly) CLI text editor. You can use vi or vim, however, all lab instructions will be given assuming the use of nano.
+
+ * `sudo hostname jumper`
+
+ This command sents the name of the 'jumper' machine to 'jumper', for the current session. Please use 'jumper' and not an inventive naming scheme. This helps make the troubleshooting process manageable.
+
+ * `sudo nano /etc/hosts`
+
+ Add a single line to the bottom of /etc/hosts where x.x.x.x is the internal eth0 IPv4 address of your JUMPER machine (you just recorded this). This will ensure that the machine can resolve its own local hostname to an IP address.
+
+ `x.x.x.x jumper`
+
+ After you have added this file, save and exit. If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ * `sudo nano /etc/hostname`
+
+ This command edits the hostname file for the local machine. By editing this file, we are ensuring that the machine is still named 'jumper' even after a reboot. Delete anything in this file (usually just one line). The only content of the file should be the single, lowercase, name of the instance as follows:
+
+ `jumper`
+
+ After you have added this file, save and exit. If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ * `exit`
+
+ The terminal session will exit. After it does, log right back in...
+
+ * `ssh centos@<Public IP Address of JUMPER> -i student.pem`
+
+ Of course, you might be using PuTTy to perform the above command. Regardless of how you start an SSH session with jumper, bash should look like this now: `[centos@jumper ~]`
+
+ 0. Log into controller and learn the internal IP address
+
+ From jumper, issue the following command to log into the controller. If prompted, respond with 'yes' to import the new key.
+
+ * `ssh centos@<Public IP Address of CONTROLLER> -i student.pem`
+
+ * `ip addr show dev eth0`
+
+ Record the displayed IPv4 address, not the IPv6 address. When you record this address, be sure to note that it is CONTROLLER so you don't get confused.
+
+ * `sudo yum install -y vim nano`
+
+ * `sudo hostname controller`
+
+ This command sets the name of the 'controller' to 'controller', for the current session. Please use 'controller' and not an inventive naming scheme. This helps make the troubleshooting process manageable.
+
+ * `sudo nano /etc/hosts`
+
+ Add a single line to the bottom of /etc/hosts where x.x.x.x is the internal eth0 IPv4 address of your CONTROLLER machine (you just recorded this). This will ensure that the machine can resolve its own local hostname to an IP address.
+
+ `x.x.x.x controller`
+
+ After you have added this file, save and exit. If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ * `sudo nano /etc/hostname`
+
+ This command edits the hostname file for the local machine. By editing this file, we are ensuring that the machine is still named 'controller' even after a reboot. Delete anything in this file (usually just one line). The only content of the file should be the single, lowercase, name of the instance as follows:
+
+ `controller`
+
+ After you have added this file, save and exit. If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ 0. Prevent hostname updates on reboot & add controller internal IP to jumper
+
+ CentOS has the ability to detect when it is running in a cloud (such as AWS), it checks for a new hostname everytime it is launched. To ensure a predictable hostname, we'll disable this feature.
+
+ * `sudo nano /etc/cloud/cloud.cfg`
+
+ Comment out these two entries under `cloud_init_modules`:
+
+ ```
+ - set_hostname
+ - update_hostname
+ ```
+
+ When you are done editing /etc/cloud/cloud.cfg those entires should look like this:
+
+ ```
+ # - set_hostname
+ # - update_hostname
+ ```
+
+ If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ * `exit`
+
+ You should now be back at your jumper machine (bash should look like this now: `[centos@jumper~]`
+
+ We want to add the INTERNAL IP address of the CONTROLLER to the /etc/hosts in JUMPER. Therefore, once again, issue the following command:
+
+ * `sudo nano /etc/hosts`
+
+ Add a single line to the bottom of /etc/hosts where x.x.x.x is the internal eth0 IPv4 address of your CONTROLLER machine. This will ensure that the JUMPER machine can resolve controller to the local IP address of the CONTROLLER.
+
+ `x.x.x.x controller`
+
+ After you have added this file, save and exit. If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ 0. Log into compute and learn the internal IP address
+
+ From the jumper box, issue the following command to log into compute. If prompted, respond with 'yes' to import the new key.
+
+ * `ssh centos@<Public IP Address of COMPUTE> -i student.pem`
+
+ * `ip addr show dev eth0`
+
+ Record the displayed IPv4 address, not the IPv6 address. When you record this address, be sure to note that it is COMPUTE so you don't get confused.
+
+ * `sudo yum install -y vim nano`
+
+ * `sudo hostname compute`
+
+ This command sets the name of the 'compute' to 'compute', for the current session. Please use 'compute' and not an inventive naming scheme. This helps make the troubleshooting process manageable.
+
+ * `sudo /etc/hosts`
+
+ Add a single line to the bottom of /etc/hosts where x.x.x.x is the internal eth0 IPv4 address of your COMPUTE machine (you just recorded this). This will ensure that the machine can resolve its own local hostname to an IP address.
+
+ `x.x.x.x compute`
+
+ After you have added this file, save and exit. If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ * `sudo nano /etc/hostname`
+
+ This command edits the hostname file for the local machine. By editing this file, we are ensuring that the machine is still named 'compute' even after a reboot. Delete anything in this file (usually just one line). The only content of the file should be the single, lowercase, name of the instance as follows:
+
+ `compute`
+
+ After you have added this file, save and exit. If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ 0. Prevent hostname updates on reboot & add compute internal IP to jumper
+
+ CentOS has the ability to detect when it is running in a cloud (such as AWS), it checks for a new hostname everytime it is launched. To ensure a predictable hostname, we'll disable this feature.
+
+ * `sudo nano /etc/cloud/cloud.cfg`
+
+ Comment out these two entries under `cloud_init_modules`:
+
+ ```
+ - set_hostname
+ - update_hostname
+ ```
+
+ When you are done editing /etc/cloud/cloud.cfg those entires should look like this:
+
+ ```
+ # - set_hostname
+ # - update_hostname
+ ```
+
+ If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ * `exit`
+
+ You should now be back at your jumper machine (bash should look like this now: `[centos@jumper~]`
+
+ We want to add the INTERNAL IP address of the COMPUTE to the /etc/hosts in JUMPER. Therefore, once again, issue the following command:
+
+ * `sudo nano /etc/hosts`
+
+ Add a single line to the bottom of /etc/hosts where x.x.x.x is the internal eth0 IPv4 address of your COMPUTE machine. This will ensure that the JUMPER machine can resolve controller to the local IP address of the COMPUTE.
+
+ `x.x.x.x compute`
+
+ After you have added this file, save and exit. If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ 0. Prevent hostname updates on reboot
+
+ CentOS has the ability to detect when it is running in a cloud (such as AWS), it checks for a new hostname everytime it is launched. To ensure a predictable hostname, we'll disable this feature.
+
+ * `sudo nano /etc/cloud/cloud.cfg`
+
+ Comment out these two entries under `cloud_init_modules`:
+
+ ```
+ - set_hostname
+ - update_hostname
+ ```
+
+ When you are done editing /etc/cloud/cloud.cfg those entires should look like this:
+
+ ```
+ # - set_hostname
+ # - update_hostname
+ ```
+
+ If this is your first time working with nano, press (CTRL + o), press ENTER, then press (CTRL + x)
+
+ * `sudo reboot`
+
+ It may take around 180 seconds for the machine to fully powercycle.
+
+## Log back into jumper, test your work, install git, and clone your forked repository
+
+ 0. Log back into jumper. Of course you could use PuTTy, or if working from the CLI, issue:
+ * `ssh centos@<Jumper Public IP> -i student.pem`
+
+ 0. Test that the following commands resolve and receive ping responses. If they do not, now is the time to let the instructor know. FYI, you'll need to press (CTRL + C) in order to stop pinging.
+ * `ping jumper`
+ * `ping controller`
+ * `ping compute`
+
+ 0. Install git to the linux CLI. This is a standarded utility you will NEED to understand if you wish to work with OpenStack, as git is the service that distrubutes the OpenStack project. In order to force git comprehension, we will also use git to distribute our lab manual.
+
+ * `sudo yum install -y git`
+
+ 0. Now clone the Alta3 Reserach OpenStack lab manual
+
+ * `git clone https://github.com/<Your username>/openstack-labs`
+
+ 0. :red_circle: TODO fill this out with instructions and screen shots
+ * make a change to a file
+ * commit the change
+ * push the change into github so it can be viewed on the webpage
+
+
+#### [Continue to the next lab](../lab-01)
openstack/lab-01/ansible/controller.pub
@@ -0,0 +1,1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCx4JiLHQzwbwqfbV3H3UxQVWjZb9uBsbjLSX1f3PktE+niFkfg2WF/3+OmFUgzesMfKNz/x+klIiuEZU2XGWCtJpRhie2jne2YgDIPRws6yQOwkIafS6W0wgELcaLjM28MwmyR2bbA19hvUrkjevmmAya10726K3FoDFox23m4nwmbnfu2FfPK5aNudTc61sSqFIIe/JBDZXdDjGQhMjGbkf9OS3lOtucAZDaq6Rjtmv6fSis+nbh59+dGg8KauMXiww8ZH9JiP3uJ84MwEFh7BPYDc4PBTnvb8X2l2XwqW6c3mEK6MIDfx7y1XdXFj9Etv2tDpMq32hQpHx52C/KD centos@ip-192-168-0-229
openstack/lab-01/ansible/main.yml
@@ -0,0 +1,58 @@
+---
+- hosts: dev.controller5
+ vars:
+ host_ip: "{{ ansible_default_ipv4.address }}"
+ user_priv_key: "/home/centos/.ssh/id_rsa"
+ user_pub_key: "/home/centos/.ssh/id_rsa.pub"
+ tasks:
+ - name: Record public IP
+ shell: wget -qO- http://ipecho.net/plain ; echo
+ register: public_ip
+
+ - name: Install tools
+ yum: name={{item.name}} state={{item.state}} update_cache=yes
+ sudo: yes
+ with_items:
+ - {name: git, state: latest}
+ - {name: vim, state: latest}
+ - {name: nano, state: latest}
+ - {name: screen, state: latest}
+ - {name: "https://rdoproject.org/repos/rdo-release.rpm", state: present}
+ - {name: openstack-packstack, state: latest}
+
+ - name: Allow localhost root ssh connections (RDO requirement)
+ file: src=sshd_config dest=/etc/ssh/sshd_config owner=root
+ sudo: yes
+
+ - name: Researt SSH service
+ service: name=sshd state=restarted
+ sudo: yes
+
+ - name: ssh authorized key mgmt (1/4)
+ file: path={{item}} state=absent
+ with_items:
+ - "{{ user_priv_key }}"
+ - "{{ user_pub_key }}"
+
+ - name: ssh authorized key mgmt (2/4)
+ command: ssh-keygen -b 2048 -t rsa -f {{ user_priv_key }} -q -N ""
+
+ - name: ssh authorized key mgmt (3/4)
+ fetch: src={{ user_pub_key }} dest=controller.pub flat=yes
+
+ - name: ssh authorized key mgmt (4/4)
+ authorized_key:
+ user: root
+ key: "{{ lookup('file', 'controller.pub') }}"
+ path: '/root/.ssh/authorized_keys'
+ sudo: yes
+
+ - name: PackStack
+ command: packstack \
+ --allinone \
+ --install-hosts={{ host_ip }} \
+ --keystone-admin-passwd=supersecret \
+ --provision-demo=n
+ async: 1500
+ poll: 5
+ tags: this
openstack/lab-01/ansible/nova.conf
@@ -0,0 +1,166 @@
+[DEFAULT]
+notification_driver=ceilometer.compute.nova_notifier
+notification_driver=nova.openstack.common.notifier.rpc_notifier
+notification_driver =
+notification_topics=notifications
+
+rpc_backend=rabbit
+internal_service_availability_zone=internal
+default_availability_zone=nova
+notify_api_faults=False
+state_path=/var/lib/nova
+report_interval=10
+
+enabled_apis=ec2,osapi_compute,metadata
+ec2_listen=0.0.0.0
+ec2_workers=8
+osapi_compute_listen=0.0.0.0
+osapi_compute_workers=8
+metadata_listen=0.0.0.0
+metadata_workers=8
+
+compute_manager=nova.compute.manager.ComputeManager
+service_down_time=60
+rootwrap_config=/etc/nova/rootwrap.conf
+auth_strategy=keystone
+use_forwarded_for=False
+
+novncproxy_host=0.0.0.0
+novncproxy_port=6080
+
+heal_instance_info_cache_interval=60
+reserved_host_memory_mb=512
+network_api_class=nova.network.neutronv2.api.API
+default_floating_pool=public
+force_snat_range=0.0.0.0/0
+metadata_host=192.168.0.243
+dhcp_domain=novalocal
+security_group_api=neutron
+debug=False
+verbose=True
+log_dir=/var/log/nova
+use_syslog=False
+cpu_allocation_ratio=16.0
+ram_allocation_ratio=1.5
+scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,CoreFilter
+scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
+compute_driver=libvirt.LibvirtDriver
+vif_plugging_is_fatal=True
+vif_plugging_timeout=300
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+force_raw_images=True
+
+novncproxy_base_url=http://51.21.204.128:6080/vnc_auto.html
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address=192.168.0.243
+vnc_enabled=True
+vnc_keymap=en-us
+
+volume_api_class=nova.volume.cinder.API
+amqp_durable_queues=False
+sql_connection=mysql://nova:7677ab576f4549db@192.168.0.243/nova
+image_service=nova.image.glance.GlanceImageService
+lock_path=/var/lib/nova/tmp
+osapi_volume_listen=0.0.0.0
+[api_database]
+
+[barbican]
+
+[cells]
+
+[cinder]
+
+[conductor]
+
+[database]
+
+[ephemeral_storage_encryption]
+
+[glance]
+api_servers=192.168.0.243:9292
+
+[guestfs]
+
+[hyperv]
+
+[image_file_url]
+
+[ironic]
+
+[keymgr]
+
+[keystone_authtoken]
+auth_uri=http://192.168.0.243:5000/v2.0
+identity_uri=http://192.168.0.243:35357
+admin_user=nova
+admin_password=supersecret
+admin_tenant_name=services
+
+[libvirt]
+virt_type=qemu
+inject_password=False
+inject_key=False
+inject_partition=-1
+live_migration_uri=qemu+tcp://nova@%s/system
+cpu_mode=none
+vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
+
+[metrics]
+
+[neutron]
+service_metadata_proxy=True
+metadata_proxy_shared_secret=d23b7e1dcbe645a6
+url=http://192.168.0.243:9696
+admin_username=neutron
+admin_password=ea620e596f184d7e
+admin_tenant_name=services
+region_name=RegionOne
+admin_auth_url=http://192.168.0.243:5000/v2.0
+auth_strategy=keystone
+ovs_bridge=br-int
+extension_sync_interval=600
+url_timeout=30
+default_tenant_id=default
+
+[osapi_v3]
+enabled=False
+
+[rdp]
+
+[serial_console]
+
+[spice]
+
+[ssl]
+
+[trusted_computing]
+
+[upgrade_levels]
+
+[vmware]
+
+[workarounds]
+
+[xenserver]
+
+[zookeeper]
+
+[matchmaker_redis]
+
+[matchmaker_ring]
+
+[oslo_concurrency]
+
+[oslo_messaging_amqp]
+
+[oslo_messaging_qpid]
+
+[oslo_messaging_rabbit]
+rabbit_host=192.168.0.243
+rabbit_port=5672
+rabbit_hosts=192.168.0.243:5672
+rabbit_use_ssl=False
+rabbit_userid=guest
+rabbit_password=guest
+rabbit_virtual_host=/
+rabbit_ha_queues=False
openstack/lab-01/ansible/packstack-answers.txt
@@ -0,0 +1,1166 @@
+[general]
+
+# Path to a public key to install on servers. If a usable key has not
+# been installed on the remote servers, the user is prompted for a
+# password and this key is installed so the password will not be
+# required again.
+CONFIG_SSH_KEY=/home/centos/.ssh/id_rsa.pub
+
+# Default password to be used everywhere (overridden by passwords set
+# for individual services or users).
+CONFIG_DEFAULT_PASSWORD=
+
+# Specify 'y' to install MariaDB. ['y', 'n']
+CONFIG_MARIADB_INSTALL=y
+
+# Specify 'y' to install OpenStack Image Service (glance). ['y', 'n']
+CONFIG_GLANCE_INSTALL=y
+
+# Specify 'y' to install OpenStack Block Storage (cinder). ['y', 'n']
+CONFIG_CINDER_INSTALL=y
+
+# Specify 'y' to install OpenStack Shared File System (manila). ['y',
+# 'n']
+CONFIG_MANILA_INSTALL=n
+
+# Specify 'y' to install OpenStack Compute (nova). ['y', 'n']
+CONFIG_NOVA_INSTALL=y
+
+# Specify 'y' to install OpenStack Networking (neutron); otherwise,
+# Compute Networking (nova) will be used. ['y', 'n']
+CONFIG_NEUTRON_INSTALL=y
+
+# Specify 'y' to install OpenStack Dashboard (horizon). ['y', 'n']
+CONFIG_HORIZON_INSTALL=y
+
+# Specify 'y' to install OpenStack Object Storage (swift). ['y', 'n']
+CONFIG_SWIFT_INSTALL=y
+
+# Specify 'y' to install OpenStack Metering (ceilometer). ['y', 'n']
+CONFIG_CEILOMETER_INSTALL=y
+
+# Specify 'y' to install OpenStack Orchestration (heat). ['y', 'n']
+CONFIG_HEAT_INSTALL=n
+
+# Specify 'y' to install OpenStack Data Processing (sahara). ['y',
+# 'n']
+CONFIG_SAHARA_INSTALL=n
+
+# Specify 'y' to install OpenStack Database (trove) ['y', 'n']
+CONFIG_TROVE_INSTALL=n
+
+# Specify 'y' to install OpenStack Bare Metal Provisioning (ironic).
+# ['y', 'n']
+CONFIG_IRONIC_INSTALL=n
+
+# Specify 'y' to install the OpenStack Client packages (command-line
+# tools). An admin "rc" file will also be installed. ['y', 'n']
+CONFIG_CLIENT_INSTALL=y
+
+# Comma-separated list of NTP servers. Leave plain if Packstack
+# should not install ntpd on instances.
+CONFIG_NTP_SERVERS=
+
+# Specify 'y' to install Nagios to monitor OpenStack hosts. Nagios
+# provides additional tools for monitoring the OpenStack environment.
+# ['y', 'n']
+CONFIG_NAGIOS_INSTALL=y
+
+# Comma-separated list of servers to be excluded from the
+# installation. This is helpful if you are running Packstack a second
+# time with the same answer file and do not want Packstack to
+# overwrite these server's configurations. Leave empty if you do not
+# need to exclude any servers.
+EXCLUDE_SERVERS=
+
+# Specify 'y' if you want to run OpenStack services in debug mode;
+# otherwise, specify 'n'. ['y', 'n']
+CONFIG_DEBUG_MODE=n
+
+# IP address of the server on which to install OpenStack services
+# specific to the controller role (for example, API servers or
+# dashboard).
+CONFIG_CONTROLLER_HOST=52.20.3.64
+
+# List of IP addresses of the servers on which to install the Compute
+# service.
+CONFIG_COMPUTE_HOSTS=172.31.51.93
+
+# List of IP addresses of the server on which to install the network
+# service such as Compute networking (nova network) or OpenStack
+# Networking (neutron).
+CONFIG_NETWORK_HOSTS=172.31.51.93
+
+# Specify 'y' if you want to use VMware vCenter as hypervisor and
+# storage; otherwise, specify 'n'. ['y', 'n']
+CONFIG_VMWARE_BACKEND=n
+
+# Specify 'y' if you want to use unsupported parameters. This should
+# be used only if you know what you are doing. Issues caused by using
+# unsupported options will not be fixed before the next major release.
+# ['y', 'n']
+CONFIG_UNSUPPORTED=n
+
+# Specify 'y' if you want to use subnet addresses (in CIDR format)
+# instead of interface names in following options:
+# CONFIG_NOVA_COMPUTE_PRIVIF, CONFIG_NOVA_NETWORK_PRIVIF,
+# CONFIG_NOVA_NETWORK_PUBIF, CONFIG_NEUTRON_OVS_BRIDGE_IFACES,
+# CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS, CONFIG_NEUTRON_OVS_TUNNEL_IF.
+# This is useful for cases when interface names are not same on all
+# installation hosts.
+CONFIG_USE_SUBNETS=n
+
+# IP address of the VMware vCenter server.
+CONFIG_VCENTER_HOST=
+
+# User name for VMware vCenter server authentication.
+CONFIG_VCENTER_USER=
+
+# Password for VMware vCenter server authentication.
+CONFIG_VCENTER_PASSWORD=
+
+# Name of the VMware vCenter cluster.
+CONFIG_VCENTER_CLUSTER_NAME=
+
+# (Unsupported!) IP address of the server on which to install
+# OpenStack services specific to storage servers such as Image or
+# Block Storage services.
+CONFIG_STORAGE_HOST=172.31.51.93
+
+# (Unsupported!) IP address of the server on which to install
+# OpenStack services specific to OpenStack Data Processing (sahara).
+CONFIG_SAHARA_HOST=172.31.51.93
+
+# Specify 'y' to enable the EPEL repository (Extra Packages for
+# Enterprise Linux). ['y', 'n']
+CONFIG_USE_EPEL=n
+
+# Comma-separated list of URLs for any additional yum repositories,
+# to use for installation.
+CONFIG_REPO=
+
+# Specify 'y' to enable the RDO testing repository. ['y', 'n']
+CONFIG_ENABLE_RDO_TESTING=n
+
+# To subscribe each server with Red Hat Subscription Manager, include
+# this with CONFIG_RH_PW.
+CONFIG_RH_USER=
+
+# To subscribe each server to receive updates from a Satellite
+# server, provide the URL of the Satellite server. You must also
+# provide a user name (CONFIG_SATELLITE_USERNAME) and password
+# (CONFIG_SATELLITE_PASSWORD) or an access key (CONFIG_SATELLITE_AKEY)
+# for authentication.
+CONFIG_SATELLITE_URL=
+
+# To subscribe each server with Red Hat Subscription Manager, include
+# this with CONFIG_RH_USER.
+CONFIG_RH_PW=
+
+# Specify 'y' to enable RHEL optional repositories. ['y', 'n']
+CONFIG_RH_OPTIONAL=y
+
+# HTTP proxy to use with Red Hat Subscription Manager.
+CONFIG_RH_PROXY=
+
+# Port to use for Red Hat Subscription Manager's HTTP proxy.
+CONFIG_RH_PROXY_PORT=
+
+# User name to use for Red Hat Subscription Manager's HTTP proxy.
+CONFIG_RH_PROXY_USER=
+
+# Password to use for Red Hat Subscription Manager's HTTP proxy.
+CONFIG_RH_PROXY_PW=
+
+# User name to authenticate with the RHN Satellite server; if you
+# intend to use an access key for Satellite authentication, leave this
+# blank.
+CONFIG_SATELLITE_USER=
+
+# Password to authenticate with the RHN Satellite server; if you
+# intend to use an access key for Satellite authentication, leave this
+# blank.
+CONFIG_SATELLITE_PW=
+
+# Access key for the Satellite server; if you intend to use a user
+# name and password for Satellite authentication, leave this blank.
+CONFIG_SATELLITE_AKEY=
+
+# Certificate path or URL of the certificate authority to verify that
+# the connection with the Satellite server is secure. If you are not
+# using Satellite in your deployment, leave this blank.
+CONFIG_SATELLITE_CACERT=
+
+# Profile name that should be used as an identifier for the system in
+# RHN Satellite (if required).
+CONFIG_SATELLITE_PROFILE=
+
+# Comma-separated list of flags passed to the rhnreg_ks command.
+# Valid flags are: novirtinfo, norhnsd, nopackages ['novirtinfo',
+# 'norhnsd', 'nopackages']
+CONFIG_SATELLITE_FLAGS=
+
+# HTTP proxy to use when connecting to the RHN Satellite server (if
+# required).
+CONFIG_SATELLITE_PROXY=
+
+# User name to authenticate with the Satellite-server HTTP proxy.
+CONFIG_SATELLITE_PROXY_USER=
+
+# User password to authenticate with the Satellite-server HTTP proxy.
+CONFIG_SATELLITE_PROXY_PW=
+
+# Specify filepath for CA cert file. If CONFIG_SSL_CACERT_SELFSIGN is
+# set to 'n' it has to be preexisting file.
+CONFIG_SSL_CACERT_FILE=/etc/pki/tls/certs/selfcert.crt
+
+# Specify filepath for CA cert key file. If
+# CONFIG_SSL_CACERT_SELFSIGN is set to 'n' it has to be preexisting
+# file.
+CONFIG_SSL_CACERT_KEY_FILE=/etc/pki/tls/private/selfkey.key
+
+# Enter the path to use to store generated SSL certificates in.
+CONFIG_SSL_CERT_DIR=~/packstackca/
+
+# Specify 'y' if you want Packstack to pregenerate the CA
+# Certificate.
+CONFIG_SSL_CACERT_SELFSIGN=y
+
+# Enter the selfsigned CAcert subject country.
+CONFIG_SELFSIGN_CACERT_SUBJECT_C=--
+
+# Enter the selfsigned CAcert subject state.
+CONFIG_SELFSIGN_CACERT_SUBJECT_ST=State
+
+# Enter the selfsigned CAcert subject location.
+CONFIG_SELFSIGN_CACERT_SUBJECT_L=City
+
+# Enter the selfsigned CAcert subject organization.
+CONFIG_SELFSIGN_CACERT_SUBJECT_O=openstack
+
+# Enter the selfsigned CAcert subject organizational unit.
+CONFIG_SELFSIGN_CACERT_SUBJECT_OU=packstack
+
+# Enter the selfsigned CAcert subject common name.
+CONFIG_SELFSIGN_CACERT_SUBJECT_CN=controller
+
+CONFIG_SELFSIGN_CACERT_SUBJECT_MAIL=admin@controller
+
+# Service to be used as the AMQP broker. Allowed values are: qpid,
+# rabbitmq ['qpid', 'rabbitmq']
+CONFIG_AMQP_BACKEND=rabbitmq
+
+# IP address of the server on which to install the AMQP service.
+CONFIG_AMQP_HOST=172.31.51.93
+
+# Specify 'y' to enable SSL for the AMQP service. ['y', 'n']
+CONFIG_AMQP_ENABLE_SSL=n
+
+# Specify 'y' to enable authentication for the AMQP service. ['y',
+# 'n']
+CONFIG_AMQP_ENABLE_AUTH=n
+
+# Password for the NSS certificate database of the AMQP service.
+CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER
+
+# User for AMQP authentication.
+CONFIG_AMQP_AUTH_USER=amqp_user
+
+# Password for AMQP authentication.
+CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER
+
+# IP address of the server on which to install MariaDB. If a MariaDB
+# installation was not specified in CONFIG_MARIADB_INSTALL, specify
+# the IP address of an existing database server (a MariaDB cluster can
+# also be specified).
+CONFIG_MARIADB_HOST=172.31.51.93
+
+# User name for the MariaDB administrative user.
+CONFIG_MARIADB_USER=root
+
+# Password for the MariaDB administrative user.
+CONFIG_MARIADB_PW=9cc16509728d4714
+
+# Password to use for the Identity service (keystone) to access the
+# database.
+CONFIG_KEYSTONE_DB_PW=841cb5816a1b4a7b
+
+# Default region name to use when creating tenants in the Identity
+# service.
+CONFIG_KEYSTONE_REGION=RegionOne
+
+# Token to use for the Identity service API.
+CONFIG_KEYSTONE_ADMIN_TOKEN=42beae94da804c51a4f26d6fc5199929
+
+# Email address for the Identity service 'admin' user. Defaults to
+CONFIG_KEYSTONE_ADMIN_EMAIL=root@localhost
+
+# User name for the Identity service 'admin' user. Defaults to
+# 'admin'.
+CONFIG_KEYSTONE_ADMIN_USERNAME=admin
+
+# Password to use for the Identity service 'admin' user.
+CONFIG_KEYSTONE_ADMIN_PW=supersecret
+
+# Password to use for the Identity service 'demo' user.
+CONFIG_KEYSTONE_DEMO_PW=16f9b6c595df4629
+
+# Identity service API version string. ['v2.0', 'v3']
+CONFIG_KEYSTONE_API_VERSION=v2.0
+
+# Identity service token format (UUID or PKI). The recommended format
+# for new deployments is UUID. ['UUID', 'PKI']
+CONFIG_KEYSTONE_TOKEN_FORMAT=UUID
+
+# Name of service to use to run the Identity service (keystone or
+# httpd). ['keystone', 'httpd']
+CONFIG_KEYSTONE_SERVICE_NAME=keystone
+
+# Type of Identity service backend (sql or ldap). ['sql', 'ldap']
+CONFIG_KEYSTONE_IDENTITY_BACKEND=sql
+
+# URL for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_URL=ldap://172.31.51.93
+
+# User DN for the Identity service LDAP backend. Used to bind to the
+# LDAP server if the LDAP server does not allow anonymous
+# authentication.
+CONFIG_KEYSTONE_LDAP_USER_DN=
+
+# User DN password for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_PASSWORD=
+
+# Base suffix for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_SUFFIX=
+
+# Query scope for the Identity service LDAP backend. Use 'one' for
+# onelevel/singleLevel or 'sub' for subtree/wholeSubtree ('base' is
+# not actually used by the Identity service and is therefore
+# deprecated). ['base', 'one', 'sub']
+CONFIG_KEYSTONE_LDAP_QUERY_SCOPE=one
+
+# Query page size for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_PAGE_SIZE=-1
+
+# User subtree for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_SUBTREE=
+
+# User query filter for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_FILTER=
+
+# User object class for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_OBJECTCLASS=
+
+# User ID attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_ID_ATTRIBUTE=
+
+# User name attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_NAME_ATTRIBUTE=
+
+# User email address attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_MAIL_ATTRIBUTE=
+
+# User-enabled attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE=
+
+# Bit mask integer applied to user-enabled attribute for the Identity
+# service LDAP backend. Indicate the bit that the enabled value is
+# stored in if the LDAP server represents "enabled" as a bit on an
+# integer rather than a boolean. A value of "0" indicates the mask is
+# not used (default). If this is not set to "0", the typical value is
+# "2", typically used when
+# "CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE = userAccountControl".
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK=-1
+
+# Value of enabled attribute which indicates user is enabled for the
+# Identity service LDAP backend. This should match an appropriate
+# integer value if the LDAP server uses non-boolean (bitmask) values
+# to indicate whether a user is enabled or disabled. If this is not
+# set as 'y', the typical value is "512". This is typically used when
+# "CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE = userAccountControl".
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_DEFAULT=TRUE
+
+# Specify 'y' if users are disabled (not enabled) in the Identity
+# service LDAP backend (inverts boolean-enalbed values). Some LDAP
+# servers use a boolean lock attribute where "y" means an account is
+# disabled. Setting this to 'y' allows these lock attributes to be
+# used. This setting will have no effect if
+# "CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK" is in use. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_INVERT=n
+
+# Comma-separated list of attributes stripped from LDAP user entry
+# upon update.
+CONFIG_KEYSTONE_LDAP_USER_ATTRIBUTE_IGNORE=
+
+# Identity service LDAP attribute mapped to default_project_id for
+# users.
+CONFIG_KEYSTONE_LDAP_USER_DEFAULT_PROJECT_ID_ATTRIBUTE=
+
+# Specify 'y' if you want to be able to create Identity service users
+# through the Identity service interface; specify 'n' if you will
+# create directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USER_ALLOW_CREATE=n
+
+# Specify 'y' if you want to be able to update Identity service users
+# through the Identity service interface; specify 'n' if you will
+# update directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USER_ALLOW_UPDATE=n
+
+# Specify 'y' if you want to be able to delete Identity service users
+# through the Identity service interface; specify 'n' if you will
+# delete directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USER_ALLOW_DELETE=n
+
+# Identity service LDAP attribute mapped to password.
+CONFIG_KEYSTONE_LDAP_USER_PASS_ATTRIBUTE=
+
+# DN of the group entry to hold enabled LDAP users when using enabled
+# emulation.
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_EMULATION_DN=
+
+# List of additional LDAP attributes for mapping additional attribute
+# mappings for users. The attribute-mapping format is
+# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the
+# LDAP entry and user_attr is the Identity API attribute.
+CONFIG_KEYSTONE_LDAP_USER_ADDITIONAL_ATTRIBUTE_MAPPING=
+
+# Group subtree for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_SUBTREE=
+
+# Group query filter for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_FILTER=
+
+# Group object class for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_OBJECTCLASS=
+
+# Group ID attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_ID_ATTRIBUTE=
+
+# Group name attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_NAME_ATTRIBUTE=
+
+# Group member attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_MEMBER_ATTRIBUTE=
+
+# Group description attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_DESC_ATTRIBUTE=
+
+# Comma-separated list of attributes stripped from LDAP group entry
+# upon update.
+CONFIG_KEYSTONE_LDAP_GROUP_ATTRIBUTE_IGNORE=
+
+# Specify 'y' if you want to be able to create Identity service
+# groups through the Identity service interface; specify 'n' if you
+# will create directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_CREATE=n
+
+# Specify 'y' if you want to be able to update Identity service
+# groups through the Identity service interface; specify 'n' if you
+# will update directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_UPDATE=n
+
+# Specify 'y' if you want to be able to delete Identity service
+# groups through the Identity service interface; specify 'n' if you
+# will delete directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_DELETE=n
+
+# List of additional LDAP attributes used for mapping additional
+# attribute mappings for groups. The attribute=mapping format is
+# <ldap_attr>:<group_attr>, where ldap_attr is the attribute in the
+# LDAP entry and group_attr is the Identity API attribute.
+CONFIG_KEYSTONE_LDAP_GROUP_ADDITIONAL_ATTRIBUTE_MAPPING=
+
+# Specify 'y' if the Identity service LDAP backend should use TLS.
+# ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USE_TLS=n
+
+# CA certificate directory for Identity service LDAP backend (if TLS
+# is used).
+CONFIG_KEYSTONE_LDAP_TLS_CACERTDIR=
+
+# CA certificate file for Identity service LDAP backend (if TLS is
+# used).
+CONFIG_KEYSTONE_LDAP_TLS_CACERTFILE=
+
+# Certificate-checking strictness level for Identity service LDAP
+# backend; valid options are: never, allow, demand. ['never', 'allow',
+# 'demand']
+CONFIG_KEYSTONE_LDAP_TLS_REQ_CERT=demand
+
+# Password to use for the Image service (glance) to access the
+# database.
+CONFIG_GLANCE_DB_PW=2fac68f862254f62
+
+# Password to use for the Image service to authenticate with the
+# Identity service.
+CONFIG_GLANCE_KS_PW=b421ff11e8c74173
+
+# Storage backend for the Image service (controls how the Image
+# service stores disk images). Valid options are: file or swift
+# (Object Storage). The Object Storage service must be enabled to use
+# it as a working backend; otherwise, Packstack falls back to 'file'.
+# ['file', 'swift']
+CONFIG_GLANCE_BACKEND=file
+
+# Password to use for the Block Storage service (cinder) to access
+# the database.
+CONFIG_CINDER_DB_PW=d70794d119e7411f
+
+# Password to use for the Block Storage service to authenticate with
+# the Identity service.
+CONFIG_CINDER_KS_PW=bf5f82c89cea40a0
+
+# Storage backend to use for the Block Storage service; valid options
+# are: lvm, gluster, nfs, vmdk, netapp. ['lvm', 'gluster', 'nfs',
+# 'vmdk', 'netapp']
+CONFIG_CINDER_BACKEND=lvm
+
+# Specify 'y' to create the Block Storage volumes group. That is,
+# Packstack creates a raw disk image in /var/lib/cinder, and mounts it
+# using a loopback device. This should only be used for testing on a
+# proof-of-concept installation of the Block Storage service (a file-
+# backed volume group is not suitable for production usage). ['y',
+# 'n']
+CONFIG_CINDER_VOLUMES_CREATE=y
+
+# Size of Block Storage volumes group. Actual volume size will be
+# extended with 3% more space for VG metadata. Remember that the size
+# of the volume group will restrict the amount of disk space that you
+# can expose to Compute instances, and that the specified amount must
+# be available on the device used for /var/lib/cinder.
+CONFIG_CINDER_VOLUMES_SIZE=20G
+
+# A single or comma-separated list of Red Hat Storage (gluster)
+# volume shares to mount. Example: 'ip-address:/vol-name', 'domain
+# :/vol-name'
+CONFIG_CINDER_GLUSTER_MOUNTS=
+
+# A single or comma-separated list of NFS exports to mount. Example:
+# 'ip-address:/export-name'
+CONFIG_CINDER_NFS_MOUNTS=
+
+# Administrative user account name used to access the NetApp storage
+# system or proxy server.
+CONFIG_CINDER_NETAPP_LOGIN=
+
+# Password for the NetApp administrative user account specified in
+# the CONFIG_CINDER_NETAPP_LOGIN parameter.
+CONFIG_CINDER_NETAPP_PASSWORD=
+
+# Hostname (or IP address) for the NetApp storage system or proxy
+# server.
+CONFIG_CINDER_NETAPP_HOSTNAME=
+
+# The TCP port to use for communication with the storage system or
+# proxy. If not specified, Data ONTAP drivers will use 80 for HTTP and
+# 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS.
+# Defaults to 80.
+CONFIG_CINDER_NETAPP_SERVER_PORT=80
+
+# Storage family type used on the NetApp storage system; valid
+# options are ontap_7mode for using Data ONTAP operating in 7-Mode,
+# ontap_cluster for using clustered Data ONTAP, or E-Series for NetApp
+# E-Series. Defaults to ontap_cluster. ['ontap_7mode',
+# 'ontap_cluster', 'eseries']
+CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster
+
+# The transport protocol used when communicating with the NetApp
+# storage system or proxy server. Valid values are http or https.
+# Defaults to 'http'. ['http', 'https']
+CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http
+
+# Storage protocol to be used on the data path with the NetApp
+# storage system; valid options are iscsi, fc, nfs. Defaults to nfs.
+# ['iscsi', 'fc', 'nfs']
+CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs
+
+# Quantity to be multiplied by the requested volume size to ensure
+# enough space is available on the virtual storage server (Vserver) to
+# fulfill the volume creation request. Defaults to 1.0.
+CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0
+
+# Time period (in minutes) that is allowed to elapse after the image
+# is last accessed, before it is deleted from the NFS image cache.
+# When a cache-cleaning cycle begins, images in the cache that have
+# not been accessed in the last M minutes, where M is the value of
+# this parameter, are deleted from the cache to create free space on
+# the NFS share. Defaults to 720.
+CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720
+
+# If the percentage of available space for an NFS share has dropped
+# below the value specified by this parameter, the NFS image cache is
+# cleaned. Defaults to 20.
+CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20
+
+# When the percentage of available space on an NFS share has reached
+# the percentage specified by this parameter, the driver stops
+# clearing files from the NFS image cache that have not been accessed
+# in the last M minutes, where M is the value of the
+# CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES parameter. Defaults to 60.
+CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60
+
+# Single or comma-separated list of NetApp NFS shares for Block
+# Storage to use. Format: ip-address:/export-name. Defaults to ''.
+CONFIG_CINDER_NETAPP_NFS_SHARES=
+
+# File with the list of available NFS shares. Defaults to
+# '/etc/cinder/shares.conf'.
+CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=/etc/cinder/shares.conf
+
+# This parameter is only utilized when the storage protocol is
+# configured to use iSCSI or FC. This parameter is used to restrict
+# provisioning to the specified controller volumes. Specify the value
+# of this parameter to be a comma separated list of NetApp controller
+# volume names to be used for provisioning. Defaults to ''.
+CONFIG_CINDER_NETAPP_VOLUME_LIST=
+
+# The vFiler unit on which provisioning of block storage volumes will
+# be done. This parameter is only used by the driver when connecting
+# to an instance with a storage family of Data ONTAP operating in
+# 7-Mode Only use this parameter when utilizing the MultiStore feature
+# on the NetApp storage system. Defaults to ''.
+CONFIG_CINDER_NETAPP_VFILER=
+
+# The name of the config.conf stanza for a Data ONTAP (7-mode) HA
+# partner. This option is only used by the driver when connecting to
+# an instance with a storage family of Data ONTAP operating in 7-Mode,
+# and it is required if the storage protocol selected is FC. Defaults
+# to ''.
+CONFIG_CINDER_NETAPP_PARTNER_BACKEND_NAME=
+
+# This option specifies the virtual storage server (Vserver) name on
+# the storage cluster on which provisioning of block storage volumes
+# should occur. Defaults to ''.
+CONFIG_CINDER_NETAPP_VSERVER=
+
+# Restricts provisioning to the specified controllers. Value must be
+# a comma-separated list of controller hostnames or IP addresses to be
+# used for provisioning. This option is only utilized when the storage
+# family is configured to use E-Series. Defaults to ''.
+CONFIG_CINDER_NETAPP_CONTROLLER_IPS=
+
+# Password for the NetApp E-Series storage array. Defaults to ''.
+CONFIG_CINDER_NETAPP_SA_PASSWORD=
+
+# This option is used to define how the controllers in the E-Series
+# storage array will work with the particular operating system on the
+# hosts that are connected to it. Defaults to 'linux_dm_mp'
+CONFIG_CINDER_NETAPP_ESERIES_HOST_TYPE=linux_dm_mp
+
+# Path to the NetApp E-Series proxy application on a proxy server.
+# The value is combined with the value of the
+# CONFIG_CINDER_NETAPP_TRANSPORT_TYPE, CONFIG_CINDER_NETAPP_HOSTNAME,
+# and CONFIG_CINDER_NETAPP_HOSTNAME options to create the URL used by
+# the driver to connect to the proxy application. Defaults to
+# '/devmgr/v2'.
+CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2
+
+# Restricts provisioning to the specified storage pools. Only dynamic
+# disk pools are currently supported. The value must be a comma-
+# separated list of disk pool names to be used for provisioning.
+# Defaults to ''.
+CONFIG_CINDER_NETAPP_STORAGE_POOLS=
+
+# Password to use for the OpenStack File Share service (manila) to
+# access the database.
+CONFIG_MANILA_DB_PW=PW_PLACEHOLDER
+
+# Password to use for the OpenStack File Share service (manila) to
+# authenticate with the Identity service.
+CONFIG_MANILA_KS_PW=PW_PLACEHOLDER
+
+# Backend for the OpenStack File Share service (manila); valid
+# options are: generic or netapp. ['generic', 'netapp']
+CONFIG_MANILA_BACKEND=generic
+
+# Denotes whether the driver should handle the responsibility of
+# managing share servers. This must be set to false if the driver is
+# to operate without managing share servers. Defaults to 'false'
+# ['true', 'false']
+CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS=false
+
+# The transport protocol used when communicating with the storage
+# system or proxy server. Valid values are 'http' and 'https'.
+# Defaults to 'https'. ['https', 'http']
+CONFIG_MANILA_NETAPP_TRANSPORT_TYPE=https
+
+# Administrative user account name used to access the NetApp storage
+# system. Defaults to ''.
+CONFIG_MANILA_NETAPP_LOGIN=admin
+
+# Password for the NetApp administrative user account specified in
+# the CONFIG_MANILA_NETAPP_LOGIN parameter. Defaults to ''.
+CONFIG_MANILA_NETAPP_PASSWORD=
+
+# Hostname (or IP address) for the NetApp storage system or proxy
+# server. Defaults to ''.
+CONFIG_MANILA_NETAPP_SERVER_HOSTNAME=
+
+# The storage family type used on the storage system; valid values
+# are ontap_cluster for clustered Data ONTAP. Defaults to
+# 'ontap_cluster'. ['ontap_cluster']
+CONFIG_MANILA_NETAPP_STORAGE_FAMILY=ontap_cluster
+
+# The TCP port to use for communication with the storage system or
+# proxy server. If not specified, Data ONTAP drivers will use 80 for
+# HTTP and 443 for HTTPS. Defaults to '443'.
+CONFIG_MANILA_NETAPP_SERVER_PORT=443
+
+# Pattern for searching available aggregates for NetApp provisioning.
+# Defaults to '(.*)'.
+CONFIG_MANILA_NETAPP_AGGREGATE_NAME_SEARCH_PATTERN=(.*)
+
+# Name of aggregate on which to create the NetApp root volume. This
+# option only applies when the option
+# CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS is set to True.
+CONFIG_MANILA_NETAPP_ROOT_VOLUME_AGGREGATE=
+
+# NetApp root volume name. Defaults to 'root'.
+CONFIG_MANILA_NETAPP_ROOT_VOLUME_NAME=root
+
+# This option specifies the storage virtual machine (previously
+# called a Vserver) name on the storage cluster on which provisioning
+# of shared file systems should occur. This option only applies when
+# the option driver_handles_share_servers is set to False. Defaults to
+# ''.
+CONFIG_MANILA_NETAPP_VSERVER=
+
+# Denotes whether the driver should handle the responsibility of
+# managing share servers. This must be set to false if the driver is
+# to operate without managing share servers. Defaults to 'true'.
+# ['true', 'false']
+CONFIG_MANILA_GENERIC_DRV_HANDLES_SHARE_SERVERS=true
+
+# Volume name template for Manila service. Defaults to 'manila-
+# share-%s'.
+CONFIG_MANILA_GENERIC_VOLUME_NAME_TEMPLATE=manila-share-%s
+
+# Share mount path for Manila service. Defaults to '/shares'.
+CONFIG_MANILA_GENERIC_SHARE_MOUNT_PATH=/shares
+
+# Location of disk image for Manila service instance. Defaults to '
+CONFIG_MANILA_SERVICE_IMAGE_LOCATION=https://www.dropbox.com/s/vi5oeh10q1qkckh/ubuntu_1204_nfs_cifs.qcow2
+
+# User in Manila service instance.
+CONFIG_MANILA_SERVICE_INSTANCE_USER=ubuntu
+
+# Password to service instance user.
+CONFIG_MANILA_SERVICE_INSTANCE_PASSWORD=ubuntu
+
+# Type of networking that the backend will use. A more detailed
+# description of each option is available in the Manila docs. Defaults
+# to 'neutron'. ['neutron', 'nova-network', 'standalone']
+CONFIG_MANILA_NETWORK_TYPE=neutron
+
+# Gateway IPv4 address that should be used. Required. Defaults to ''.
+CONFIG_MANILA_NETWORK_STANDALONE_GATEWAY=
+
+# Network mask that will be used. Can be either decimal like '24' or
+# binary like '255.255.255.0'. Required. Defaults to ''.
+CONFIG_MANILA_NETWORK_STANDALONE_NETMASK=
+
+# Set it if network has segmentation (VLAN, VXLAN, etc). It will be
+# assigned to share-network and share drivers will be able to use this
+# for network interfaces within provisioned share servers. Optional.
+# Example: 1001. Defaults to ''.
+CONFIG_MANILA_NETWORK_STANDALONE_SEG_ID=
+
+# Can be IP address, range of IP addresses or list of addresses or
+# ranges. Contains addresses from IP network that are allowed to be
+# used. If empty, then will be assumed that all host addresses from
+# network can be used. Optional. Examples: 10.0.0.10 or
+# 10.0.0.10-10.0.0.20 or
+# 10.0.0.10-10.0.0.20,10.0.0.30-10.0.0.40,10.0.0.50. Defaults to ''.
+CONFIG_MANILA_NETWORK_STANDALONE_IP_RANGE=
+
+# IP version of network. Optional. Defaults to '4'. ['4', '6']
+CONFIG_MANILA_NETWORK_STANDALONE_IP_VERSION=4
+
+# Password to use for OpenStack Bare Metal Provisioning (ironic) to
+# access the database.
+CONFIG_IRONIC_DB_PW=PW_PLACEHOLDER
+
+# Password to use for OpenStack Bare Metal Provisioning to
+# authenticate with the Identity service.
+CONFIG_IRONIC_KS_PW=PW_PLACEHOLDER
+
+# Password to use for the Compute service (nova) to access the
+# database.
+CONFIG_NOVA_DB_PW=fc7c306ac4604b46
+
+# Password to use for the Compute service to authenticate with the
+# Identity service.
+CONFIG_NOVA_KS_PW=ee6b96f94ff2426f
+
+# Overcommitment ratio for virtual to physical CPUs. Specify 1.0 to
+# disable CPU overcommitment.
+CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0
+
+# Overcommitment ratio for virtual to physical RAM. Specify 1.0 to
+# disable RAM overcommitment.
+CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5
+
+# Protocol used for instance migration. Valid options are: tcp and
+# ssh. Note that by default, the Compute user is created with the
+# /sbin/nologin shell so that the SSH protocol will not work. To make
+# the SSH protocol work, you must configure the Compute user on
+# compute hosts manually. ['tcp', 'ssh']
+CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp
+
+# Manager that runs the Compute service.
+CONFIG_NOVA_COMPUTE_MANAGER=nova.compute.manager.ComputeManager
+
+# PEM encoded certificate to be used for ssl on the https server,
+# leave blank if one should be generated, this certificate should not
+# require a passphrase. If CONFIG_HORIZON_SSL is set to 'n' this
+# parameter is ignored.
+CONFIG_VNC_SSL_CERT=
+
+# SSL keyfile corresponding to the certificate if one was entered. If
+# CONFIG_HORIZON_SSL is set to 'n' this parameter is ignored.
+CONFIG_VNC_SSL_KEY=
+
+# Private interface for flat DHCP on the Compute servers.
+CONFIG_NOVA_COMPUTE_PRIVIF=eth1
+
+# Compute Network Manager. ['^nova\.network\.manager\.\w+Manager$']
+CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager
+
+# Public interface on the Compute network server.
+CONFIG_NOVA_NETWORK_PUBIF=eth0
+
+# Private interface for flat DHCP on the Compute network server.
+CONFIG_NOVA_NETWORK_PRIVIF=eth1
+
+# IP Range for flat DHCP. ['^[\:\.\da-fA-f]+(\/\d+){0,1}$']
+CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22
+
+# IP Range for floating IP addresses. ['^[\:\.\da-
+# fA-f]+(\/\d+){0,1}$']
+CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22
+
+# Specify 'y' to automatically assign a floating IP to new instances.
+# ['y', 'n']
+CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n
+
+# First VLAN for private networks (Compute networking).
+CONFIG_NOVA_NETWORK_VLAN_START=100
+
+# Number of networks to support (Compute networking).
+CONFIG_NOVA_NETWORK_NUMBER=1
+
+# Number of addresses in each private subnet (Compute networking).
+CONFIG_NOVA_NETWORK_SIZE=255
+
+# Password to use for OpenStack Networking (neutron) to authenticate
+# with the Identity service.
+CONFIG_NEUTRON_KS_PW=8c3f85e9f3cd424b
+
+# The password to use for OpenStack Networking to access the
+# database.
+CONFIG_NEUTRON_DB_PW=8343c41200a04477
+
+# The name of the Open vSwitch bridge (or empty for linuxbridge) for
+# the OpenStack Networking L3 agent to use for external traffic.
+# Specify 'provider' if you intend to use a provider network to handle
+# external traffic.
+CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex
+
+# Password for the OpenStack Networking metadata agent.
+CONFIG_NEUTRON_METADATA_PW=fc1fca9bb48c4cb4
+
+# Specify 'y' to install OpenStack Networking's Load-Balancing-
+# as-a-Service (LBaaS). ['y', 'n']
+CONFIG_LBAAS_INSTALL=n
+
+# Specify 'y' to install OpenStack Networking's L3 Metering agent
+# ['y', 'n']
+CONFIG_NEUTRON_METERING_AGENT_INSTALL=n
+
+# Specify 'y' to configure OpenStack Networking's Firewall-
+# as-a-Service (FWaaS). ['y', 'n']
+CONFIG_NEUTRON_FWAAS=n
+
+# Comma-separated list of network-type driver entry points to be
+# loaded from the neutron.ml2.type_drivers namespace. ['local',
+# 'flat', 'vlan', 'gre', 'vxlan']
+CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan
+
+# Comma-separated, ordered list of network types to allocate as
+# tenant networks. The 'local' value is only useful for single-box
+# testing and provides no connectivity between hosts. ['local',
+# 'vlan', 'gre', 'vxlan']
+CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vxlan
+
+# Comma-separated ordered list of networking mechanism driver entry
+# points to be loaded from the neutron.ml2.mechanism_drivers
+# namespace. ['logger', 'test', 'linuxbridge', 'openvswitch',
+# 'hyperv', 'ncs', 'arista', 'cisco_nexus', 'mlnx', 'l2population']
+CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch
+
+# Comma-separated list of physical_network names with which flat
+# networks can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
+
+# Comma-separated list of <physical_network>:<vlan_min>:<vlan_max> or
+# <physical_network> specifying physical_network names usable for VLAN
+# provider and tenant networks, as well as ranges of VLAN tags on each
+# available for allocation to tenant networks.
+CONFIG_NEUTRON_ML2_VLAN_RANGES=
+
+# Comma-separated list of <tun_min>:<tun_max> tuples enumerating
+# ranges of GRE tunnel IDs that are available for tenant-network
+# allocation. A tuple must be an array with tun_max +1 - tun_min >
+# 1000000.
+CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=
+
+# Comma-separated list of addresses for VXLAN multicast group. If
+# left empty, disables VXLAN from sending allocate broadcast traffic
+# (disables multicast VXLAN mode). Should be a Multicast IP (v4 or v6)
+# address.
+CONFIG_NEUTRON_ML2_VXLAN_GROUP=
+
+# Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network
+# allocation. Minimum value is 0 and maximum value is 16777215.
+CONFIG_NEUTRON_ML2_VNI_RANGES=10:100
+
+# Name of the L2 agent to be used with OpenStack Networking.
+# ['linuxbridge', 'openvswitch']
+CONFIG_NEUTRON_L2_AGENT=openvswitch
+
+# Comma-separated list of interface mappings for the OpenStack
+# Networking linuxbridge plugin. Each tuple in the list must be in the
+# format <physical_network>:<net_interface>. Example:
+# physnet1:eth1,physnet2:eth2,physnet3:eth3.
+CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=
+
+# Comma-separated list of bridge mappings for the OpenStack
+# Networking Open vSwitch plugin. Each tuple in the list must be in
+# the format <physical_network>:<ovs_bridge>. Example: physnet1:br-
+# eth1,physnet2:br-eth2,physnet3:br-eth3
+CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=
+
+# Comma-separated list of colon-separated Open vSwitch
+# <bridge>:<interface> pairs. The interface will be added to the
+# associated bridge. If you desire the bridge to be persistent a value
+# must be added to this directive, also
+# CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS must be set in order to create
+# the proper port. This can be achieved from the command line by
+# issuing the following command: packstack --allinone --os-neutron-
+# ovs-bridge-mappings=ext-net:br-ex --os-neutron-ovs-bridge-interfaces
+# =br-ex:eth0
+CONFIG_NEUTRON_OVS_BRIDGE_IFACES=
+
+# Interface for the Open vSwitch tunnel. Packstack overrides the IP
+# address used for tunnels on this hypervisor to the IP found on the
+# specified interface (for example, eth1).
+CONFIG_NEUTRON_OVS_TUNNEL_IF=
+
+# VXLAN UDP port.
+CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789
+
+# Specify 'y' to set up Horizon communication over https. ['y', 'n']
+CONFIG_HORIZON_SSL=n
+
+# Secret key to use for Horizon Secret Encryption Key.
+CONFIG_HORIZON_SECRET_KEY=bdda0f58004a4c79bdf8061f9d3d9d60
+
+# PEM-encoded certificate to be used for SSL connections on the https
+# server (the certificate should not require a passphrase). To
+# generate a certificate, leave blank.
+CONFIG_HORIZON_SSL_CERT=
+
+# SSL keyfile corresponding to the certificate if one was specified.
+CONFIG_HORIZON_SSL_KEY=
+
+CONFIG_HORIZON_SSL_CACERT=
+
+# Password to use for the Object Storage service to authenticate with
+# the Identity service.
+CONFIG_SWIFT_KS_PW=1beea9a7e2194dea
+
+# Comma-separated list of devices to use as storage device for Object
+# Storage. Each entry must take the format /path/to/dev (for example,
+# specifying /dev/vdb installs /dev/vdb as the Object Storage storage
+# device; Packstack does not create the filesystem, you must do this
+# first). If left empty, Packstack creates a loopback device for test
+# setup.
+CONFIG_SWIFT_STORAGES=
+
+# Number of Object Storage storage zones; this number MUST be no
+# larger than the number of configured storage devices.
+CONFIG_SWIFT_STORAGE_ZONES=1
+
+# Number of Object Storage storage replicas; this number MUST be no
+# larger than the number of configured storage zones.
+CONFIG_SWIFT_STORAGE_REPLICAS=1
+
+# File system type for storage nodes. ['xfs', 'ext4']
+CONFIG_SWIFT_STORAGE_FSTYPE=ext4
+
+# Custom seed number to use for swift_hash_path_suffix in
+# /etc/swift/swift.conf. If you do not provide a value, a seed number
+# is automatically generated.
+CONFIG_SWIFT_HASH=b3890d3fc8b24b3a
+
+# Size of the Object Storage loopback file storage device.
+CONFIG_SWIFT_STORAGE_SIZE=2G
+
+# Password used by Orchestration service user to authenticate against
+# the database.
+CONFIG_HEAT_DB_PW=PW_PLACEHOLDER
+
+# Encryption key to use for authentication in the Orchestration
+# database (16, 24, or 32 chars).
+CONFIG_HEAT_AUTH_ENC_KEY=15405bbafcb94737
+
+# Password to use for the Orchestration service to authenticate with
+# the Identity service.
+CONFIG_HEAT_KS_PW=PW_PLACEHOLDER
+
+# Specify 'y' to install the Orchestration CloudWatch API. ['y', 'n']
+CONFIG_HEAT_CLOUDWATCH_INSTALL=n
+
+# Specify 'y' to install the Orchestration CloudFormation API. ['y',
+# 'n']
+CONFIG_HEAT_CFN_INSTALL=n
+
+# Name of the Identity domain for Orchestration.
+CONFIG_HEAT_DOMAIN=heat
+
+# Name of the Identity domain administrative user for Orchestration.
+CONFIG_HEAT_DOMAIN_ADMIN=heat_admin
+
+# Password for the Identity domain administrative user for
+# Orchestration.
+CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER
+
+# Specify 'y' to provision for demo usage and testing. ['y', 'n']
+CONFIG_PROVISION_DEMO=y
+
+# Specify 'y' to configure the OpenStack Integration Test Suite
+# (tempest) for testing. The test suite requires OpenStack Networking
+# to be installed. ['y', 'n']
+CONFIG_PROVISION_TEMPEST=n
+
+# CIDR network address for the floating IP subnet.
+CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28
+
+# The name to be assigned to the demo image in Glance (default
+# "cirros").
+CONFIG_PROVISION_IMAGE_NAME=cirros
+
+# A URL or local file location for an image to download and provision
+# in Glance (defaults to a URL for a recent "cirros" image).
+CONFIG_PROVISION_IMAGE_URL=http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+
+# Format for the demo image (default "qcow2").
+CONFIG_PROVISION_IMAGE_FORMAT=qcow2
+
+# User to use when connecting to instances booted from the demo
+# image.
+CONFIG_PROVISION_IMAGE_SSH_USER=cirros
+
+# Name of the Integration Test Suite provisioning user. If you do not
+# provide a user name, Tempest is configured in a standalone mode.
+CONFIG_PROVISION_TEMPEST_USER=
+
+# Password to use for the Integration Test Suite provisioning user.
+CONFIG_PROVISION_TEMPEST_USER_PW=PW_PLACEHOLDER
+
+# CIDR network address for the floating IP subnet.
+CONFIG_PROVISION_TEMPEST_FLOATRANGE=172.24.4.224/28
+
+# URI of the Integration Test Suite git repository.
+CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git
+
+# Revision (branch) of the Integration Test Suite git repository.
+CONFIG_PROVISION_TEMPEST_REPO_REVISION=master
+
+# Specify 'y' to configure the Open vSwitch external bridge for an
+# all-in-one deployment (the L3 external bridge acts as the gateway
+# for virtual machines). ['y', 'n']
+CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n
+
+# Secret key for signing Telemetry service (ceilometer) messages.
+CONFIG_CEILOMETER_SECRET=09667c76079c4ecf
+
+# Password to use for Telemetry to authenticate with the Identity
+# service.
+CONFIG_CEILOMETER_KS_PW=1049876e6d034c15
+
+# Backend driver for Telemetry's group membership coordination.
+# ['redis', 'none']
+CONFIG_CEILOMETER_COORDINATION_BACKEND=redis
+
+# IP address of the server on which to install MongoDB.
+CONFIG_MONGODB_HOST=172.31.51.93
+
+# IP address of the server on which to install the Redis master
+# server.
+CONFIG_REDIS_MASTER_HOST=172.31.51.93
+
+# Port on which the Redis server(s) listens.
+CONFIG_REDIS_PORT=6379
+
+# Specify 'y' to have Redis try to use HA. ['y', 'n']
+CONFIG_REDIS_HA=n
+
+# Hosts on which to install Redis slaves.
+CONFIG_REDIS_SLAVE_HOSTS=
+
+# Hosts on which to install Redis sentinel servers.
+CONFIG_REDIS_SENTINEL_HOSTS=
+
+# Host to configure as the Redis coordination sentinel.
+CONFIG_REDIS_SENTINEL_CONTACT_HOST=
+
+# Port on which Redis sentinel servers listen.
+CONFIG_REDIS_SENTINEL_PORT=26379
+
+# Quorum value for Redis sentinel servers.
+CONFIG_REDIS_SENTINEL_QUORUM=2
+
+# Name of the master server watched by the Redis sentinel. ['[a-z]+']
+CONFIG_REDIS_MASTER_NAME=mymaster
+
+# Password to use for OpenStack Data Processing (sahara) to access
+# the database.
+CONFIG_SAHARA_DB_PW=PW_PLACEHOLDER
+
+# Password to use for OpenStack Data Processing to authenticate with
+# the Identity service.
+CONFIG_SAHARA_KS_PW=PW_PLACEHOLDER
+
+# Password to use for OpenStack Database-as-a-Service (trove) to
+# access the database.
+CONFIG_TROVE_DB_PW=PW_PLACEHOLDER
+
+# Password to use for OpenStack Database-as-a-Service to authenticate
+# with the Identity service.
+CONFIG_TROVE_KS_PW=PW_PLACEHOLDER
+
+# User name to use when OpenStack Database-as-a-Service connects to
+# the Compute service.
+CONFIG_TROVE_NOVA_USER=trove
+
+# Tenant to use when OpenStack Database-as-a-Service connects to the
+# Compute service.
+CONFIG_TROVE_NOVA_TENANT=services
+
+# Password to use when OpenStack Database-as-a-Service connects to
+# the Compute service.
+CONFIG_TROVE_NOVA_PW=PW_PLACEHOLDER
+
+# Password of the nagiosadmin user on the Nagios server.
+CONFIG_NAGIOS_PW=ae710cf3d0e54cbc
+
+# Please replace x.x.x.x below with the controller public IP
+NOVNCPROXY_URL="http://52.20.3.64:6080/vnc_auto.html"
+NOVA_VNC_ENABLED=True
+
+# Please replace x.x.x.x below with the controller internal IP
+VNCSERVER_LISTEN=172.31.51.93
+VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
openstack/lab-01/ansible/sshd_config
@@ -0,0 +1,157 @@
+# $OpenBSD: sshd_config,v 1.90 2013/05/16 04:09:14 dtucker Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/local/bin:/usr/bin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options override the
+# default value.
+
+# If you want to change the port on a SELinux system, you have to tell
+# SELinux about this change.
+# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
+#
+#Port 22
+#AddressFamily any
+#ListenAddress 0.0.0.0
+#ListenAddress ::
+
+# The default requires explicit activation of protocol 1
+#Protocol 2
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+HostKey /etc/ssh/ssh_host_ecdsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 1024
+
+# Ciphers and keying
+#RekeyLimit default none
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+SyslogFacility AUTHPRIV
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+#PermitRootLogin yes
+#StrictModes yes
+#MaxAuthTries 6
+#MaxSessions 10
+
+#RSAAuthentication yes
+#PubkeyAuthentication yes
+
+# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
+# but this is overridden so installations will only check .ssh/authorized_keys
+AuthorizedKeysFile .ssh/authorized_keys
+
+#AuthorizedPrincipalsFile none
+
+#AuthorizedKeysCommand none
+#AuthorizedKeysCommandUser nobody
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+#RhostsRSAAuthentication no
+# similar for protocol version 2
+#HostbasedAuthentication no
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+# Don't read the user's ~/.rhosts and ~/.shosts files
+#IgnoreRhosts yes
+
+# To disable tunneled clear text passwords, change to no here!
+#PasswordAuthentication yes
+#PermitEmptyPasswords no
+PasswordAuthentication no
+
+# Change to no to disable s/key passwords
+#ChallengeResponseAuthentication yes
+ChallengeResponseAuthentication no
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+#KerberosUseKuserok yes
+
+# GSSAPI options
+#GSSAPIAuthentication no
+GSSAPIAuthentication yes
+#GSSAPICleanupCredentials yes
+GSSAPICleanupCredentials yes
+#GSSAPIStrictAcceptorCheck yes
+#GSSAPIKeyExchange no
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+# WARNING: 'UsePAM no' is not supported in Red Hat Enterprise Linux and may cause several
+# problems.
+#UsePAM no
+UsePAM yes
+
+#AllowAgentForwarding yes
+#AllowTcpForwarding yes
+#GatewayPorts no
+#X11Forwarding no
+X11Forwarding yes
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+#PrintMotd yes
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+UsePrivilegeSeparation sandbox # Default for new installations.
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#ShowPatchLevel no
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10:30:100
+#PermitTunnel no
+#ChrootDirectory none
+#VersionAddendum none
+
+# no default banner path
+#Banner none
+
+# Accept locale-related environment variables
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
+AcceptEnv XMODIFIERS
+
+# override default of no subsystems
+Subsystem sftp /usr/libexec/openssh/sftp-server
+
+# Uncomment this if you want to use .local domain
+#Host *.local
+# CheckHostIP no
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
\ No newline at end of file
openstack/lab-01/README.md
@@ -0,0 +1,138 @@
+# Lab 1 - Controller Install
+
+ Lab Objectives:
+
+ 0. Install a OpenStack controller (with RDO & PackStack)
+
+## Connect to your controller instance:
+
+ 0. `chmod 400 student.pem`
+ 0. `ssh centos@<Controller IP> -i student.pem`
+
+## Install PackStack on controller:
+
+ :red_circle: TODO: brief paragraph about packstack (i.e. that it is puppet based)
+
+ 0. Install PackStack dependencies
+
+ NOTE: bash should look like this now: `[centos@controller ~]`
+
+ * `sudo yum update -y`
+ * `sudo yum install -y https://rdoproject.org/repos/rdo-release.rpm`
+ * `sudo yum install -y openstack-packstack vim nano screen`
+
+ 0. Enable root ssh access
+
+ `packstack` requires the ability to ssh as root into the target machine
+ (the machine which is getting OpenStack services are being installed to,
+ in our case the controller instance). The following config changes enables
+ key-enabled root ssh login to our controller and setup authorized keys to allow
+ the centos user to ssh in as root using their private key.
+
+ 0. Alter SSH Daemon config to permit root login and restart it to take effect
+
+ Edit the sshd_config file by removing the comment on PermitRootLogin and set it to yes.
+
+ * `sudo nano /etc/ssh/sshd_config`
+
+ ```
+ # --- snip --- #
+ # Authentication:
+
+ #LoginGraceTime 2m
+ PermitRootLogin yes
+ #StrictModes yes
+ #MaxAuthTries 6
+ #MaxSessions 10
+ # --- end-snip --- #
+ ```
+
+ * `sudo systemctl restart sshd.service`
+
+ 0. Setup root private key login and appropriate keys
+
+ * `ssh root@localhost` - expected failure
+ * `sudo cat /root/.ssh/authorized_keys` - the keys currently allowed
+ * `ssh-keygen` - accept defaults
+ * `cat /home/centos/.ssh/id_rsa.pub` - the user's public key
+ * `cat /home/centos/.ssh/id_rsa.pub | sudo tee -a /root/.ssh/authorized_keys` - append controllers key
+ * `sudo cat /root/.ssh/authorized_keys` - one more key should be there
+ * `ssh root@localhost` - expected success
+ * `exit` - exit successful ssh session
+
+ 0. Run packstack (newlines and `\`'s are for clarity, not required)
+
+ Replace x.x.x.x below with the Controller Internal IP address
+
+ * `packstack --install-hosts=x.x.x.x --keystone-admin-passwd=supersecret --provision-demo=n`
+
+ Expected Result:
+
+ ```
+ Welcome to the Packstack setup utility
+
+ The installation log file is available at: /var/tmp/packstack/20150824-185121-1WICWt/openstack-setup.log
+
+ Installing:
+ Clean Up [ DONE ]
+ Discovering ip protocol version [ DONE ]
+ Setting up ssh keys [ DONE ]
+ Preparing servers [ DONE ]
+ Pre installing Puppet and discovering hosts' details [ DONE ]
+ Adding pre install manifest entries [ DONE ]
+ Setting up CACERT [ DONE ]
+
+ ----- snip, many hundreds of seconds pass -----
+
+ **** Installation completed successfully ******
+
+ Additional information:
+ * Time synchronization installation was skipped. Please note that unsynchronized time on server instances might be problem for some OpenStack components.
+ * File /root/keystonerc_admin has been created on OpenStack client host 52.2.224.157. To use the command line tools you need to source the file.
+ * To access the OpenStack Dashboard browse to http://x.x.x.x/dashboard .
+ Please, find your login credentials stored in the keystonerc_admin in your home directory.
+ * To use Nagios, browse to http://x.x.x.x/nagios username: nagiosadmin, password: 888aa1cb6e544ffa
+ * Because of the kernel update the host x.x.x.x requires reboot.
+ * Because of the kernel update the host x.x.x.x requires reboot.
+ * The installation log file is available at: /var/tmp/packstack/20150824-002755-FL1Fzg/openstack-setup.log
+ * The generated manifests are available at: /var/tmp/packstack/20150824-002755-FL1Fzg/manifests
+ ```
+
+ 0. `sudo reboot`
+ 0. `ping <Controller IP>` - Wait for it to fail and then come back
+ 0. `ssh centos@<Controller IP> -i student.pem`
+
+ 0. Fix the Horizon Dashboard ServerAlias configuration
+
+ * `sudo nano /etc/httpd/conf.d/15-horizon_vhost.conf`
+
+ ```
+ # Please replace add another Server Alias like shown below and replace x.x.x.x with the Controller Public IP address
+ # ---- snip ---- #
+ ## Server aliases
+ ServerAlias 192.168.0.195
+ ServerAlias ip-192-168-0-195.ec2.internal
+ ServerAlias localhost
+ ServerAlias x.x.x.x
+ # -- end-snip -- #
+ ```
+
+ * `sudo systemctl restart httpd.service`
+
+ 0. Fix the nova vnc proxy service
+
+ * `sudo vim /etc/nova/nova.conf` or `sudo nano /etc/nova/nova.conf`
+
+ ```
+ # Please replace x.x.x.x below with the Controller Public IP address
+ novncproxy_base_url=http://x.x.x.x:6080/vnc_auto.html
+ vncserver_proxyclient_address=x.x.x.x
+
+ ```
+ * `sudo systemctl restart openstack-nova-novncproxy.service`
+ * `sudo systemctl restart openstack-nova-compute.service`
+
+
+ Official reference documentation: [RDO Quick Start](https://www.rdoproject.org/Quickstart)
+
+#### [Continue to the next lab](../lab-02)
openstack/lab-02/img/.DS_Store
Binary file
openstack/lab-02/img/horizon-dashboard.png
Binary file
openstack/lab-02/img/horizon-login.png
Binary file
openstack/lab-02/.DS_Store
Binary file
openstack/lab-02/Instructor-README.md
@@ -0,0 +1,20 @@
+# Locations to find items
+
+| Info to find | Location |
+| -------------| -------- |
+| Available VCPUs | System > Hypervisors |
+| Available Memory | System > Hypervisors |
+| Available Disk | System > Hypervisors |
+| Total Glance Images | System > Resource View > Glace > image.upload |
+| Total Glance Disk Size | System > Resource View > Glace > image.size |
+| Total number of Flavors | System > Flavors |
+| Total number of OpenStack Services | System > System Information > Services |
+| Total number of Network Agents | System > System Information > Network Agents |
+| Total number of Users | Identity > Users |
+| Total number of Project | Identity > Projects |
+| Default Project RAM Quota | System > Defaults |
+| Default Project VCPU Quota | System > Defaults |
+| Demo Private Subnet Address Range | System > Network > Demo |
+| Demo Router IP address | System > Network > Demo > Ports |
+| Demo DHCP IP address | System > Network > Demo > Ports |
+
openstack/lab-02/README.md
@@ -0,0 +1,36 @@
+# Lab 2 - Horizon Interface
+
+ Lab Objectives:
+
+ 0. Become familiar with the OpenStack Horizon Interface
+
+## Login to the Horizon Interface
+
+ 0. Login to the OpenStack Horizon Web Interface by navigating your browser to the public IP address of your instance, explore the accessible pages and fill in the table of information
+
+ 
+
+ 
+
+
+ 0. Navigate around the interface, find and document the below values
+
+ | Info to find | Value |
+ | ---------------------------------- | ----- |
+ | Available VCPUs | |
+ | Available Memory | |
+ | Available Disk | |
+ | Total Glance Images | |
+ | Total Glance Disk Size | |
+ | Total number of Flavors | |
+ | Total number of OpenStack Services | |
+ | Total number of Network Agents | |
+ | Total number of Users | |
+ | Total number of Project | |
+ | Default Project RAM Quota | |
+ | Default Project VCPU Quota | |
+ | Demo Private Subnet Address Range | |
+ | Demo Router IP address | |
+ | Demo DHCP IP address | |
+
+#### [Continue to the next lab](../lab-03)
openstack/lab-03/img/.DS_Store
Binary file
openstack/lab-03/img/create-project.png
Binary file
openstack/lab-03/img/create-project2.png
Binary file
openstack/lab-03/img/create-project3.png
Binary file
openstack/lab-03/img/create-user.png
Binary file
openstack/lab-03/img/create-user2.png
Binary file
openstack/lab-03/img/create-user3.png
Binary file
openstack/lab-03/img/project-quota.png
Binary file
openstack/lab-03/README.md
@@ -0,0 +1,34 @@
+# Lab 3 - Projects and Users
+
+ Lab Objectives:
+
+ 0. Demonstrate the ability to create Projects and Users from the Horizon Web Interface
+
+## Add a Project:
+
+ 0. Navigate to: Identity > Project > +Create Project
+
+ 
+ 
+ 
+
+## Add a User:
+
+ 0. Navigate to: Identity > Users > +Create User
+
+ 
+ 
+ 
+
+## Member Interface
+
+ 0. Logout from the `admin` account and login as `student`
+ 0. Explore :red_circle: TODO: table of things to find/document
+
+## Extra Credit
+
+ 0. Set the student's project quota to a realistic level, givin the of the available VCP and Memory on our system
+ 0. View change from the student login on the Overview page
+ 
+
+#### [Continue to the next lab](../lab-04)
openstack/lab-04/img/create-network.png
Binary file
openstack/lab-04/img/create-network2.png
Binary file
openstack/lab-04/img/create-network3.png
Binary file
openstack/lab-04/img/create-network4.png
Binary file
openstack/lab-04/img/create-network5.png
Binary file
openstack/lab-04/img/create-router.png
Binary file
openstack/lab-04/img/create-router10.png
Binary file
openstack/lab-04/img/create-router11.png
Binary file
openstack/lab-04/img/create-router2.png
Binary file
openstack/lab-04/img/create-router3.png
Binary file
openstack/lab-04/img/create-router4.png
Binary file
openstack/lab-04/img/create-router5.png
Binary file
openstack/lab-04/img/create-router6.png
Binary file
openstack/lab-04/img/create-router7.png
Binary file
openstack/lab-04/img/create-router8.png
Binary file
openstack/lab-04/img/create-router9.png
Binary file
openstack/lab-04/README.md
@@ -0,0 +1,57 @@
+# Lab 4 - Networking
+
+ Lab Objectives:
+
+ 0. Demonstrate the ability to Networks and Routers from the Horizon Web Interface
+
+## Login as student
+
+ 0. Login using the credentials created in the [previous lab](../lab-03)
+
+## Add a Private Network
+
+ 0. Navigate to: Project > Network > Networks > +Create Network
+
+ :red_circle: TODO need a new screenshot showing 2 default shared networks?
+ 
+ 
+ 
+
+ > :white_check_mark: **Additional Info**:
+ >
+ > The Allocation Pools is the `start,end` addresses for the pools.
+ > The entry form for this field does not parse spaces.
+ > Example Pool: `192.168.1.100,192.168.1.120`
+
+ 
+ 
+
+## Add a Router
+
+ 0. Navigate to: Project > Network > Network Topology > +Create Router
+
+ 
+ 
+
+ 0. Either click on "View Router Details" or choose our new router from the list on Project > Network > Routers
+
+ 
+ 
+
+ 0. Add an interface to the new router
+
+ 
+ 
+ 
+
+ 0. Set the Gateway for this network
+
+ 
+ 
+
+ 0. View the Network Topology and Networks page and verify configurations
+
+ 
+ 
+
+#### [Continue to the next lab](../lab-05)
openstack/rdo/lab-01/img/horizon-dashboard.png
Binary file
openstack/rdo/lab-01/img/horizon-login.png
Binary file
openstack/rdo/lab-01/README.md
@@ -0,0 +1,78 @@
+# Lab 1 - RDO Controller
+
+ Lab Objectives:
+
+ 0. Install a PackStack (RDO) controller
+
+## Connect to your _controller_ instance:
+ 0. `chmod 400 student.pem`
+ 0. `ssh centos@<CONTROLLER IP> -i student.pem`
+ 0. `sudo whoami`
+
+## Install PackStack:
+
+:red_circle: TODO: brief paragraph about packstack (i.e. that it is puppet based)
+
+ 0. `sudo yum update -y`
+ 0. `sudo yum install -y https://rdoproject.org/repos/rdo-release.rpm`
+ 0. `sudo yum isntall openstack-packstack vim htop -y`
+ 0. Enable root ssh access
+
+ 0. Alter SSH Daemon config to permit root login and restart it to take effect
+
+ * `sudo vim /etc/ssh/sshd_config` or `sudo nano /etc/ssh/sshd_config`
+
+ ```
+ ...
+ # Authentication:
+
+ #LoginGraceTime 2m
+ PermitRootLogin Yes
+ #StrictModes yes
+ #MaxAuthTries 6
+ #MaxSessions 10
+ ...
+ ```
+
+ * `sudo service sshd restart`
+
+ 0. Setup root private key login
+ * `ssh root@localhost` - expected failure
+ * `cat /root/.ssh/authorized_keys` - the keys currently allowed
+ * `cat /home/centos/.ssh/id_rsa.pub` - the controller's public key (if absent generate with `ssh-keygen`)
+ * `cat /home/centos/.ssh/id_rsa.pub | sudo tee -a /root/.ssh/authorized_keys` - append controllers key
+ * `ssh localhost` - expected success
+
+ 0. Generate a PackStack Config gile (answers)
+
+ * `packstack --gen-answer-file packstack-answers.txt`
+
+ 0. Edit packstack answers to tweak install configuration
+
+ * `vim packstack-answers.txt` or `nano packstack-answers.txt`
+
+ ```
+ CONFIG_CONTROLLER_HOST=<CONTROLLER IP>
+ CONFIG_KEYSTONE_ADMIN_PW=supersecret
+
+ ```
+
+ 0. `packstack --answers-file packstack-answers.txt`
+
+ 0. Login to the OpenStack Horizon Web Interface by navigating your browser to the public IP address of your instance, explore the accessible pages and fill in the table of information
+
+ 
+
+ 
+
+ :red_circle: TODO items from interface
+
+ | Info to find | Value |
+ | -------------| ----- |
+ | Item 1 | |
+ | Item 2 | |
+
+
+ For future reference see [RDO Quick Start](https://www.rdoproject.org/Quickstart)
+
+#### [Next Lab](../lab-02)
openstack/rdo/lab-02/img/.DS_Store
Binary file
openstack/rdo/lab-02/img/create-network.png
Binary file
openstack/rdo/lab-02/img/create-network2.png
Binary file
openstack/rdo/lab-02/img/create-network3.png
Binary file
openstack/rdo/lab-02/img/create-network4.png
Binary file
openstack/rdo/lab-02/img/create-network5.png
Binary file
openstack/rdo/lab-02/img/create-project.png
Binary file
openstack/rdo/lab-02/img/create-project2.png
Binary file
openstack/rdo/lab-02/img/create-project3.png
Binary file
openstack/rdo/lab-02/img/create-router.png
Binary file
openstack/rdo/lab-02/img/create-router10.png
Binary file
openstack/rdo/lab-02/img/create-router11.png
Binary file
openstack/rdo/lab-02/img/create-router2.png
Binary file
openstack/rdo/lab-02/img/create-router3.png
Binary file
openstack/rdo/lab-02/img/create-router4.png
Binary file
openstack/rdo/lab-02/img/create-router5.png
Binary file
openstack/rdo/lab-02/img/create-router6.png
Binary file
openstack/rdo/lab-02/img/create-router7.png
Binary file
openstack/rdo/lab-02/img/create-router8.png
Binary file
openstack/rdo/lab-02/img/create-router9.png
Binary file
openstack/rdo/lab-02/img/create-user.png
Binary file
openstack/rdo/lab-02/img/create-user2.png
Binary file
openstack/rdo/lab-02/img/create-user3.png
Binary file
openstack/rdo/lab-02/.DS_Store
Binary file
openstack/rdo/lab-02/README.md
@@ -0,0 +1,73 @@
+# Lab 2 - RDO Horizon Interface
+
+ Lab Objectives:
+
+ 0. Become familiar with the OpenStack Horizon Interface
+
+## Add a Project:
+
+ 0. Navigate to: Identity > Project > +Create Project
+
+ 
+ 
+ 
+
+## Add a User:
+
+ 0. Navigate to: Identity > Users > +Create User
+
+ 
+ 
+ 
+
+## Member Interface
+
+ 0. Logout from the `admin` account and login as `student`
+ 0. Explore :red_circle: TODO: table of things to find/document
+
+## Add a Private Network
+
+ 0. Navigate to: Project > Network > Networks > +Create Network
+
+ 
+ 
+ 
+ 
+
+ > :white_check_mark: **Additional Info**:
+ >
+ > The Allocation Pools is the `start,end` addresses for the pools.
+ > The entry form for this field does not parse spaces.
+ > Example Pool: `192.168.1.100,192.168.1.120`
+
+ 
+
+## Add a Router
+
+ 0. Navigate to: Project > Network > Network Topology > +Create Router
+
+ 
+ 
+
+ 0. Either click on "View Router Details" or choose our new router from the list on Project > Network > Routers
+
+ 
+ 
+
+ 0. Add an interface to the new router
+
+ 
+ 
+ 
+
+ 0. Set the Gateway for this network
+
+ 
+ 
+
+ 0. View the Network Topology and Networks page and verify configurations
+
+ 
+ 
+
+#### [Next Lab](../lab-03)
openstack/rdo/lab-03/.README.md.swp
Binary file
openstack/rdo/lab-03/README.md
@@ -0,0 +1,32 @@
+# Lab 3 - RDO Horizon Interface, Part Deux
+
+ Lab Objectives:
+
+ 0. Launch an instance from the Horizon interface
+
+## Launch an Instance:
+
+ 0. Open the Horizon interface (navigate your browser to the public IP address of the controller)
+ 0. Login to the Horizon interface as the user created in [Lab 2](../lab-02)
+ 0. Navigate to: Project > Compute > Instances > Launch Instance
+
+ 
+
+ 0. Set details and Launch
+
+ 
+ 
+
+ 0. Open console, login and interact with the newly launched instance
+
+ :red_circle: TODO: haven't cracked the nut yet on RDO's VNC PROXY
+ 
+ 
+ 
+
+
+ 0. Use `ssh` to access and interact with the newly launched instance
+
+ * Ensure you are currently ssh'ed into the controller instance
+ * If not follow steps from previous lab
+ * `ssh cirros@10.0.0.2` (password `cubswin:)`)
openstack/rdo/.DS_Store
Binary file
openstack/rdo/README.md
@@ -0,0 +1,29 @@
+# RDO Lab
+
+:red_circle: TODO A well thoughout over-view paragraph about what this lab is about should go here.
+
+Lab Objectives:
+
+## Lab 0 - Document Lab IP addresses
+
+Your instructor will provide two IP addresses for this lab.
+
+ | Controller | Compute |
+-------- | ------------- | ----------- |
+Public | CONTROLLER IP | COMPUTE IP |
+Internal | A.B.C.D | A.B.C.D |
+
+0. Edit this README.md file and add the ip addresses to your forked repository.
+ * You can make these changes in the github webpage or from your checkedout version.
+ * Make sure your changes are committed and pushed to github.com and then refresh the page.
+
+0. Record your instance's internal IP address in the table
+
+ * `ssh centos@<IP> -i student.pem`
+ * `ip addr show dev eth0`
+
+0. Set the hostname on each to help with command line differentiation
+
+ * `sudo hostname <controller or compute>`
+ * `bash` to show the result
+
openstack/answers.txt
@@ -0,0 +1,1097 @@
+[general]
+
+# Path to a public key to install on servers. If a usable key has not
+# been installed on the remote servers, the user is prompted for a
+# password and this key is installed so the password will not be
+# required again.
+CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub
+
+# Default password to be used everywhere (overridden by passwords set
+# for individual services or users).
+CONFIG_DEFAULT_PASSWORD=ravellosystems
+
+# Specify 'y' to install MariaDB. ['y', 'n']
+CONFIG_MARIADB_INSTALL=y
+
+# Specify 'y' to install OpenStack Image Service (glance). ['y', 'n']
+CONFIG_GLANCE_INSTALL=y
+
+# Specify 'y' to install OpenStack Block Storage (cinder). ['y', 'n']
+CONFIG_CINDER_INSTALL=y
+
+# Specify 'y' to install OpenStack Shared File System (manila). ['y',
+# 'n']
+CONFIG_MANILA_INSTALL=n
+
+# Specify 'y' to install OpenStack Compute (nova). ['y', 'n']
+CONFIG_NOVA_INSTALL=y
+
+# Specify 'y' to install OpenStack Networking (neutron); otherwise,
+# Compute Networking (nova) will be used. ['y', 'n']
+CONFIG_NEUTRON_INSTALL=y
+
+# Specify 'y' to install OpenStack Dashboard (horizon). ['y', 'n']
+CONFIG_HORIZON_INSTALL=y
+
+# Specify 'y' to install OpenStack Object Storage (swift). ['y', 'n']
+CONFIG_SWIFT_INSTALL=y
+
+# Specify 'y' to install OpenStack Metering (ceilometer). ['y', 'n']
+CONFIG_CEILOMETER_INSTALL=y
+
+# Specify 'y' to install OpenStack Orchestration (heat). ['y', 'n']
+CONFIG_HEAT_INSTALL=n
+
+# Specify 'y' to install OpenStack Data Processing (sahara). ['y',
+# 'n']
+CONFIG_SAHARA_INSTALL=n
+
+# Specify 'y' to install OpenStack Database (trove) ['y', 'n']
+CONFIG_TROVE_INSTALL=n
+
+# Specify 'y' to install OpenStack Bare Metal Provisioning (ironic).
+# ['y', 'n']
+CONFIG_IRONIC_INSTALL=n
+
+# Specify 'y' to install the OpenStack Client packages (command-line
+# tools). An admin "rc" file will also be installed. ['y', 'n']
+CONFIG_CLIENT_INSTALL=y
+
+# Comma-separated list of NTP servers. Leave plain if Packstack
+# should not install ntpd on instances.
+CONFIG_NTP_SERVERS=
+
+# Specify 'y' to install Nagios to monitor OpenStack hosts. Nagios
+# provides additional tools for monitoring the OpenStack environment.
+# ['y', 'n']
+CONFIG_NAGIOS_INSTALL=y
+
+# Comma-separated list of servers to be excluded from the
+# installation. This is helpful if you are running Packstack a second
+# time with the same answer file and do not want Packstack to
+# overwrite these server's configurations. Leave empty if you do not
+# need to exclude any servers.
+EXCLUDE_SERVERS=
+
+# Specify 'y' if you want to run OpenStack services in debug mode;
+# otherwise, specify 'n'. ['y', 'n']
+CONFIG_DEBUG_MODE=n
+
+# IP address of the server on which to install OpenStack services
+# specific to the controller role (for example, API servers or
+# dashboard).
+CONFIG_CONTROLLER_HOST=192.168.0.10
+
+# List of IP addresses of the servers on which to install the Compute
+# service.
+CONFIG_COMPUTE_HOSTS=192.168.0.12,192.168.0.13
+
+# List of IP addresses of the server on which to install the network
+# service such as Compute networking (nova network) or OpenStack
+# Networking (neutron).
+CONFIG_NETWORK_HOSTS=192.168.0.11
+
+# Specify 'y' if you want to use VMware vCenter as hypervisor and
+# storage; otherwise, specify 'n'. ['y', 'n']
+CONFIG_VMWARE_BACKEND=n
+
+# Specify 'y' if you want to use unsupported parameters. This should
+# be used only if you know what you are doing. Issues caused by using
+# unsupported options will not be fixed before the next major release.
+# ['y', 'n']
+CONFIG_UNSUPPORTED=n
+
+# IP address of the VMware vCenter server.
+CONFIG_VCENTER_HOST=
+
+# User name for VMware vCenter server authentication.
+CONFIG_VCENTER_USER=
+
+# Password for VMware vCenter server authentication.
+CONFIG_VCENTER_PASSWORD=
+
+# Name of the VMware vCenter cluster.
+CONFIG_VCENTER_CLUSTER_NAME=
+
+# (Unsupported!) IP address of the server on which to install
+# OpenStack services specific to storage servers such as Image or
+# Block Storage services.
+CONFIG_STORAGE_HOST=192.168.0.10
+
+# (Unsupported!) IP address of the server on which to install
+# OpenStack services specific to OpenStack Data Processing (sahara).
+CONFIG_SAHARA_HOST=192.168.0.10
+
+# Specify 'y' to enable the EPEL repository (Extra Packages for
+# Enterprise Linux). ['y', 'n']
+CONFIG_USE_EPEL=y
+
+# Comma-separated list of URLs for any additional yum repositories,
+# to use for installation.
+CONFIG_REPO=
+
+# To subscribe each server with Red Hat Subscription Manager, include
+# this with CONFIG_RH_PW.
+CONFIG_RH_USER=
+
+# To subscribe each server to receive updates from a Satellite
+# server, provide the URL of the Satellite server. You must also
+# provide a user name (CONFIG_SATELLITE_USERNAME) and password
+# (CONFIG_SATELLITE_PASSWORD) or an access key (CONFIG_SATELLITE_AKEY)
+# for authentication.
+CONFIG_SATELLITE_URL=
+
+# To subscribe each server with Red Hat Subscription Manager, include
+# this with CONFIG_RH_USER.
+CONFIG_RH_PW=
+
+# Specify 'y' to enable RHEL optional repositories. ['y', 'n']
+CONFIG_RH_OPTIONAL=y
+
+# HTTP proxy to use with Red Hat Subscription Manager.
+CONFIG_RH_PROXY=
+
+# Port to use for Red Hat Subscription Manager's HTTP proxy.
+CONFIG_RH_PROXY_PORT=
+
+# User name to use for Red Hat Subscription Manager's HTTP proxy.
+CONFIG_RH_PROXY_USER=
+
+# Password to use for Red Hat Subscription Manager's HTTP proxy.
+CONFIG_RH_PROXY_PW=
+
+# User name to authenticate with the RHN Satellite server; if you
+# intend to use an access key for Satellite authentication, leave this
+# blank.
+CONFIG_SATELLITE_USER=
+
+# Password to authenticate with the RHN Satellite server; if you
+# intend to use an access key for Satellite authentication, leave this
+# blank.
+CONFIG_SATELLITE_PW=
+
+# Access key for the Satellite server; if you intend to use a user
+# name and password for Satellite authentication, leave this blank.
+CONFIG_SATELLITE_AKEY=
+
+# Certificate path or URL of the certificate authority to verify that
+# the connection with the Satellite server is secure. If you are not
+# using Satellite in your deployment, leave this blank.
+CONFIG_SATELLITE_CACERT=
+
+# Profile name that should be used as an identifier for the system in
+# RHN Satellite (if required).
+CONFIG_SATELLITE_PROFILE=
+
+# Comma-separated list of flags passed to the rhnreg_ks command.
+# Valid flags are: novirtinfo, norhnsd, nopackages ['novirtinfo',
+# 'norhnsd', 'nopackages']
+CONFIG_SATELLITE_FLAGS=
+
+# HTTP proxy to use when connecting to the RHN Satellite server (if
+# required).
+CONFIG_SATELLITE_PROXY=
+
+# User name to authenticate with the Satellite-server HTTP proxy.
+CONFIG_SATELLITE_PROXY_USER=
+
+# User password to authenticate with the Satellite-server HTTP proxy.
+CONFIG_SATELLITE_PROXY_PW=
+
+# Service to be used as the AMQP broker. Allowed values are: qpid,
+# rabbitmq ['qpid', 'rabbitmq']
+CONFIG_AMQP_BACKEND=rabbitmq
+
+# IP address of the server on which to install the AMQP service.
+CONFIG_AMQP_HOST=192.168.0.10
+
+# Specify 'y' to enable SSL for the AMQP service. ['y', 'n']
+CONFIG_AMQP_ENABLE_SSL=n
+
+# Specify 'y' to enable authentication for the AMQP service. ['y',
+# 'n']
+CONFIG_AMQP_ENABLE_AUTH=n
+
+# Password for the NSS certificate database of the AMQP service.
+CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER
+
+# Port on which the AMQP service listens for SSL connections.
+CONFIG_AMQP_SSL_PORT=5671
+
+# File name of the CAcertificate that the AMQP service will use for
+# verification.
+CONFIG_AMQP_SSL_CACERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem
+
+# File name of the certificate that the AMQP service will use for
+# verification.
+CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem
+
+# File name of the private key that the AMQP service will use for
+# verification.
+CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem
+
+# Specify 'y' to automatically generate a self-signed SSL certificate
+# and key. ['y', 'n']
+CONFIG_AMQP_SSL_SELF_SIGNED=y
+
+# User for AMQP authentication.
+CONFIG_AMQP_AUTH_USER=amqp_user
+
+# Password for AMQP authentication.
+CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER
+
+# IP address of the server on which to install MariaDB. If a MariaDB
+# installation was not specified in CONFIG_MARIADB_INSTALL, specify
+# the IP address of an existing database server (a MariaDB cluster can
+# also be specified).
+CONFIG_MARIADB_HOST=192.168.0.10
+
+# User name for the MariaDB administrative user.
+CONFIG_MARIADB_USER=root
+
+# Password for the MariaDB administrative user.
+CONFIG_MARIADB_PW=80320de658b440eb
+
+# Password to use for the Identity service (keystone) to access the
+# database.
+CONFIG_KEYSTONE_DB_PW=ef7e49833f324450
+
+# Default region name to use when creating tenants in the Identity
+# service.
+CONFIG_KEYSTONE_REGION=RegionOne
+
+# Token to use for the Identity service API.
+CONFIG_KEYSTONE_ADMIN_TOKEN=8caecc30f0374052a8e2eb4492221d98
+
+# Email address for the Identity service 'admin' user. Defaults to
+CONFIG_KEYSTONE_ADMIN_EMAIL=root@localhost
+
+# User name for the Identity service 'admin' user. Defaults to
+# 'admin'.
+CONFIG_KEYSTONE_ADMIN_USERNAME=admin
+
+# Password to use for the Identity service 'admin' user.
+CONFIG_KEYSTONE_ADMIN_PW=ravellosystems
+
+# Password to use for the Identity service 'demo' user.
+CONFIG_KEYSTONE_DEMO_PW=ravellosystems
+
+# Identity service API version string. ['v2.0', 'v3']
+CONFIG_KEYSTONE_API_VERSION=v2.0
+
+# Identity service token format (UUID or PKI). The recommended format
+# for new deployments is UUID. ['UUID', 'PKI']
+CONFIG_KEYSTONE_TOKEN_FORMAT=UUID
+
+# Name of service to use to run the Identity service (keystone or
+# httpd). ['keystone', 'httpd']
+CONFIG_KEYSTONE_SERVICE_NAME=httpd
+
+# Type of Identity service backend (sql or ldap). ['sql', 'ldap']
+CONFIG_KEYSTONE_IDENTITY_BACKEND=sql
+
+# URL for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_URL=ldap://192.168.0.10
+
+# User DN for the Identity service LDAP backend. Used to bind to the
+# LDAP server if the LDAP server does not allow anonymous
+# authentication.
+CONFIG_KEYSTONE_LDAP_USER_DN=
+
+# User DN password for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_PASSWORD=
+
+# Base suffix for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_SUFFIX=
+
+# Query scope for the Identity service LDAP backend (base, one, sub).
+# ['base', 'one', 'sub']
+CONFIG_KEYSTONE_LDAP_QUERY_SCOPE=one
+
+# Query page size for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_PAGE_SIZE=-1
+
+# User subtree for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_SUBTREE=
+
+# User query filter for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_FILTER=
+
+# User object class for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_OBJECTCLASS=
+
+# User ID attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_ID_ATTRIBUTE=
+
+# User name attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_NAME_ATTRIBUTE=
+
+# User email address attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_MAIL_ATTRIBUTE=
+
+# User-enabled attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE=
+
+# Bit mask applied to user-enabled attribute for the Identity service
+# LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK=-1
+
+# Value of enabled attribute which indicates user is enabled for the
+# Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_DEFAULT=TRUE
+
+# Specify 'y' if users are disabled (not enabled) in the Identity
+# service LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_INVERT=n
+
+# Comma-separated list of attributes stripped from LDAP user entry
+# upon update.
+CONFIG_KEYSTONE_LDAP_USER_ATTRIBUTE_IGNORE=
+
+# Identity service LDAP attribute mapped to default_project_id for
+# users.
+CONFIG_KEYSTONE_LDAP_USER_DEFAULT_PROJECT_ID_ATTRIBUTE=
+
+# Specify 'y' if you want to be able to create Identity service users
+# through the Identity service interface; specify 'n' if you will
+# create directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USER_ALLOW_CREATE=n
+
+# Specify 'y' if you want to be able to update Identity service users
+# through the Identity service interface; specify 'n' if you will
+# update directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USER_ALLOW_UPDATE=n
+
+# Specify 'y' if you want to be able to delete Identity service users
+# through the Identity service interface; specify 'n' if you will
+# delete directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USER_ALLOW_DELETE=n
+
+# Identity service LDAP attribute mapped to password.
+CONFIG_KEYSTONE_LDAP_USER_PASS_ATTRIBUTE=
+
+# DN of the group entry to hold enabled LDAP users when using enabled
+# emulation.
+CONFIG_KEYSTONE_LDAP_USER_ENABLED_EMULATION_DN=
+
+# List of additional LDAP attributes for mapping additional attribute
+# mappings for users. The attribute-mapping format is
+# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the
+# LDAP entry and user_attr is the Identity API attribute.
+CONFIG_KEYSTONE_LDAP_USER_ADDITIONAL_ATTRIBUTE_MAPPING=
+
+# Group subtree for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_SUBTREE=
+
+# Group query filter for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_FILTER=
+
+# Group object class for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_OBJECTCLASS=
+
+# Group ID attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_ID_ATTRIBUTE=
+
+# Group name attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_NAME_ATTRIBUTE=
+
+# Group member attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_MEMBER_ATTRIBUTE=
+
+# Group description attribute for the Identity service LDAP backend.
+CONFIG_KEYSTONE_LDAP_GROUP_DESC_ATTRIBUTE=
+
+# Comma-separated list of attributes stripped from LDAP group entry
+# upon update.
+CONFIG_KEYSTONE_LDAP_GROUP_ATTRIBUTE_IGNORE=
+
+# Specify 'y' if you want to be able to create Identity service
+# groups through the Identity service interface; specify 'n' if you
+# will create directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_CREATE=n
+
+# Specify 'y' if you want to be able to update Identity service
+# groups through the Identity service interface; specify 'n' if you
+# will update directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_UPDATE=n
+
+# Specify 'y' if you want to be able to delete Identity service
+# groups through the Identity service interface; specify 'n' if you
+# will delete directly in the LDAP backend. ['n', 'y']
+CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_DELETE=n
+
+# List of additional LDAP attributes used for mapping additional
+# attribute mappings for groups. The attribute=mapping format is
+# <ldap_attr>:<group_attr>, where ldap_attr is the attribute in the
+# LDAP entry and group_attr is the Identity API attribute.
+CONFIG_KEYSTONE_LDAP_GROUP_ADDITIONAL_ATTRIBUTE_MAPPING=
+
+# Specify 'y' if the Identity service LDAP backend should use TLS.
+# ['n', 'y']
+CONFIG_KEYSTONE_LDAP_USE_TLS=n
+
+# CA certificate directory for Identity service LDAP backend (if TLS
+# is used).
+CONFIG_KEYSTONE_LDAP_TLS_CACERTDIR=
+
+# CA certificate file for Identity service LDAP backend (if TLS is
+# used).
+CONFIG_KEYSTONE_LDAP_TLS_CACERTFILE=
+
+# Certificate-checking strictness level for Identity service LDAP
+# backend; valid options are: never, allow, demand. ['never', 'allow',
+# 'demand']
+CONFIG_KEYSTONE_LDAP_TLS_REQ_CERT=demand
+
+# Password to use for the Image service (glance) to access the
+# database.
+CONFIG_GLANCE_DB_PW=441cfebf6e8c4569
+
+# Password to use for the Image service to authenticate with the
+# Identity service.
+CONFIG_GLANCE_KS_PW=5f594609aa4b41a3
+
+# Storage backend for the Image service (controls how the Image
+# service stores disk images). Valid options are: file or swift
+# (Object Storage). The Object Storage service must be enabled to use
+# it as a working backend; otherwise, Packstack falls back to 'file'.
+# ['file', 'swift']
+CONFIG_GLANCE_BACKEND=file
+
+# Password to use for the Block Storage service (cinder) to access
+# the database.
+CONFIG_CINDER_DB_PW=7aa2a2adc5df456d
+
+# Password to use for the Block Storage service to authenticate with
+# the Identity service.
+CONFIG_CINDER_KS_PW=f697ca4660e54eda
+
+# Storage backend to use for the Block Storage service; valid options
+# are: lvm, gluster, nfs, vmdk, netapp. ['lvm', 'gluster', 'nfs',
+# 'vmdk', 'netapp']
+CONFIG_CINDER_BACKEND=lvm
+
+# Specify 'y' to create the Block Storage volumes group. That is,
+# Packstack creates a raw disk image in /var/lib/cinder, and mounts it
+# using a loopback device. This should only be used for testing on a
+# proof-of-concept installation of the Block Storage service (a file-
+# backed volume group is not suitable for production usage). ['y',
+# 'n']
+CONFIG_CINDER_VOLUMES_CREATE=y
+
+# Size of Block Storage volumes group. Actual volume size will be
+# extended with 3% more space for VG metadata. Remember that the size
+# of the volume group will restrict the amount of disk space that you
+# can expose to Compute instances, and that the specified amount must
+# be available on the device used for /var/lib/cinder.
+CONFIG_CINDER_VOLUMES_SIZE=20G
+
+# A single or comma-separated list of Red Hat Storage (gluster)
+# volume shares to mount. Example: 'ip-address:/vol-name', 'domain
+# :/vol-name'
+CONFIG_CINDER_GLUSTER_MOUNTS=
+
+# A single or comma-separated list of NFS exports to mount. Example:
+# 'ip-address:/export-name'
+CONFIG_CINDER_NFS_MOUNTS=
+
+# Administrative user account name used to access the NetApp storage
+# system or proxy server.
+CONFIG_CINDER_NETAPP_LOGIN=
+
+# Password for the NetApp administrative user account specified in
+# the CONFIG_CINDER_NETAPP_LOGIN parameter.
+CONFIG_CINDER_NETAPP_PASSWORD=
+
+# Hostname (or IP address) for the NetApp storage system or proxy
+# server.
+CONFIG_CINDER_NETAPP_HOSTNAME=
+
+# The TCP port to use for communication with the storage system or
+# proxy. If not specified, Data ONTAP drivers will use 80 for HTTP and
+# 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS.
+# Defaults to 80.
+CONFIG_CINDER_NETAPP_SERVER_PORT=80
+
+# Storage family type used on the NetApp storage system; valid
+# options are ontap_7mode for using Data ONTAP operating in 7-Mode,
+# ontap_cluster for using clustered Data ONTAP, or E-Series for NetApp
+# E-Series. Defaults to ontap_cluster. ['ontap_7mode',
+# 'ontap_cluster', 'eseries']
+CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster
+
+# The transport protocol used when communicating with the NetApp
+# storage system or proxy server. Valid values are http or https.
+# Defaults to 'http'. ['http', 'https']
+CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http
+
+# Storage protocol to be used on the data path with the NetApp
+# storage system; valid options are iscsi, fc, nfs. Defaults to nfs.
+# ['iscsi', 'fc', 'nfs']
+CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs
+
+# Quantity to be multiplied by the requested volume size to ensure
+# enough space is available on the virtual storage server (Vserver) to
+# fulfill the volume creation request. Defaults to 1.0.
+CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0
+
+# Time period (in minutes) that is allowed to elapse after the image
+# is last accessed, before it is deleted from the NFS image cache.
+# When a cache-cleaning cycle begins, images in the cache that have
+# not been accessed in the last M minutes, where M is the value of
+# this parameter, are deleted from the cache to create free space on
+# the NFS share. Defaults to 720.
+CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720
+
+# If the percentage of available space for an NFS share has dropped
+# below the value specified by this parameter, the NFS image cache is
+# cleaned. Defaults to 20.
+CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20
+
+# When the percentage of available space on an NFS share has reached
+# the percentage specified by this parameter, the driver stops
+# clearing files from the NFS image cache that have not been accessed
+# in the last M minutes, where M is the value of the
+# CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES parameter. Defaults to 60.
+CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60
+
+# Single or comma-separated list of NetApp NFS shares for Block
+# Storage to use. Format: ip-address:/export-name. Defaults to ''.
+CONFIG_CINDER_NETAPP_NFS_SHARES=
+
+# File with the list of available NFS shares. Defaults to
+# '/etc/cinder/shares.conf'.
+CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=/etc/cinder/shares.conf
+
+# This parameter is only utilized when the storage protocol is
+# configured to use iSCSI or FC. This parameter is used to restrict
+# provisioning to the specified controller volumes. Specify the value
+# of this parameter to be a comma separated list of NetApp controller
+# volume names to be used for provisioning. Defaults to ''.
+CONFIG_CINDER_NETAPP_VOLUME_LIST=
+
+# The vFiler unit on which provisioning of block storage volumes will
+# be done. This parameter is only used by the driver when connecting
+# to an instance with a storage family of Data ONTAP operating in
+# 7-Mode Only use this parameter when utilizing the MultiStore feature
+# on the NetApp storage system. Defaults to ''.
+CONFIG_CINDER_NETAPP_VFILER=
+
+# The name of the config.conf stanza for a Data ONTAP (7-mode) HA
+# partner. This option is only used by the driver when connecting to
+# an instance with a storage family of Data ONTAP operating in 7-Mode,
+# and it is required if the storage protocol selected is FC. Defaults
+# to ''.
+CONFIG_CINDER_NETAPP_PARTNER_BACKEND_NAME=
+
+# This option specifies the virtual storage server (Vserver) name on
+# the storage cluster on which provisioning of block storage volumes
+# should occur. Defaults to ''.
+CONFIG_CINDER_NETAPP_VSERVER=
+
+# Restricts provisioning to the specified controllers. Value must be
+# a comma-separated list of controller hostnames or IP addresses to be
+# used for provisioning. This option is only utilized when the storage
+# family is configured to use E-Series. Defaults to ''.
+CONFIG_CINDER_NETAPP_CONTROLLER_IPS=
+
+# Password for the NetApp E-Series storage array. Defaults to ''.
+CONFIG_CINDER_NETAPP_SA_PASSWORD=
+
+# This option is used to define how the controllers in the E-Series
+# storage array will work with the particular operating system on the
+# hosts that are connected to it. Defaults to 'linux_dm_mp'
+CONFIG_CINDER_NETAPP_ESERIES_HOST_TYPE=linux_dm_mp
+
+# Path to the NetApp E-Series proxy application on a proxy server.
+# The value is combined with the value of the
+# CONFIG_CINDER_NETAPP_TRANSPORT_TYPE, CONFIG_CINDER_NETAPP_HOSTNAME,
+# and CONFIG_CINDER_NETAPP_HOSTNAME options to create the URL used by
+# the driver to connect to the proxy application. Defaults to
+# '/devmgr/v2'.
+CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2
+
+# Restricts provisioning to the specified storage pools. Only dynamic
+# disk pools are currently supported. The value must be a comma-
+# separated list of disk pool names to be used for provisioning.
+# Defaults to ''.
+CONFIG_CINDER_NETAPP_STORAGE_POOLS=
+
+# Password to use for the OpenStack File Share service (manila) to
+# access the database.
+CONFIG_MANILA_DB_PW=PW_PLACEHOLDER
+
+# Password to use for the OpenStack File Share service (manila) to
+# authenticate with the Identity service.
+CONFIG_MANILA_KS_PW=PW_PLACEHOLDER
+
+# Backend for the OpenStack File Share service (manila); valid
+# options are: generic or netapp. ['generic', 'netapp']
+CONFIG_MANILA_BACKEND=generic
+
+# Denotes whether the driver should handle the responsibility of
+# managing share servers. This must be set to false if the driver is
+# to operate without managing share servers. Defaults to 'false'
+# ['true', 'false']
+CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS=false
+
+# The transport protocol used when communicating with the storage
+# system or proxy server. Valid values are 'http' and 'https'.
+# Defaults to 'https'. ['https', 'http']
+CONFIG_MANILA_NETAPP_TRANSPORT_TYPE=https
+
+# Administrative user account name used to access the NetApp storage
+# system. Defaults to ''.
+CONFIG_MANILA_NETAPP_LOGIN=admin
+
+# Password for the NetApp administrative user account specified in
+# the CONFIG_MANILA_NETAPP_LOGIN parameter. Defaults to ''.
+CONFIG_MANILA_NETAPP_PASSWORD=
+
+# Hostname (or IP address) for the NetApp storage system or proxy
+# server. Defaults to ''.
+CONFIG_MANILA_NETAPP_SERVER_HOSTNAME=
+
+# The storage family type used on the storage system; valid values
+# are ontap_cluster for clustered Data ONTAP. Defaults to
+# 'ontap_cluster'. ['ontap_cluster']
+CONFIG_MANILA_NETAPP_STORAGE_FAMILY=ontap_cluster
+
+# The TCP port to use for communication with the storage system or
+# proxy server. If not specified, Data ONTAP drivers will use 80 for
+# HTTP and 443 for HTTPS. Defaults to '443'.
+CONFIG_MANILA_NETAPP_SERVER_PORT=443
+
+# Pattern for searching available aggregates for NetApp provisioning.
+# Defaults to '(.*)'.
+CONFIG_MANILA_NETAPP_AGGREGATE_NAME_SEARCH_PATTERN=(.*)
+
+# Name of aggregate on which to create the NetApp root volume. This
+# option only applies when the option
+# CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS is set to True.
+CONFIG_MANILA_NETAPP_ROOT_VOLUME_AGGREGATE=
+
+# NetApp root volume name. Defaults to 'root'.
+CONFIG_MANILA_NETAPP_ROOT_VOLUME_NAME=root
+
+# This option specifies the storage virtual machine (previously
+# called a Vserver) name on the storage cluster on which provisioning
+# of shared file systems should occur. This option only applies when
+# the option driver_handles_share_servers is set to False. Defaults to
+# ''.
+CONFIG_MANILA_NETAPP_VSERVER=
+
+# Denotes whether the driver should handle the responsibility of
+# managing share servers. This must be set to false if the driver is
+# to operate without managing share servers. Defaults to 'true'.
+# ['true', 'false']
+CONFIG_MANILA_GENERIC_DRV_HANDLES_SHARE_SERVERS=true
+
+# Volume name template for Manila service. Defaults to 'manila-
+# share-%s'.
+CONFIG_MANILA_GENERIC_VOLUME_NAME_TEMPLATE=manila-share-%s
+
+# Share mount path for Manila service. Defaults to '/shares'.
+CONFIG_MANILA_GENERIC_SHARE_MOUNT_PATH=/shares
+
+# Location of disk image for Manila service instance. Defaults to '
+CONFIG_MANILA_SERVICE_IMAGE_LOCATION=https://www.dropbox.com/s/vi5oeh10q1qkckh/ubuntu_1204_nfs_cifs.qcow2
+
+# User in Manila service instance.
+CONFIG_MANILA_SERVICE_INSTANCE_USER=ubuntu
+
+# Password to service instance user.
+CONFIG_MANILA_SERVICE_INSTANCE_PASSWORD=ubuntu
+
+# Type of networking that the backend will use. A more detailed
+# description of each option is available in the Manila docs. Defaults
+# to 'neutron'. ['neutron', 'nova-network', 'standalone']
+CONFIG_MANILA_NETWORK_TYPE=neutron
+
+# Gateway IPv4 address that should be used. Required. Defaults to ''.
+CONFIG_MANILA_NETWORK_STANDALONE_GATEWAY=
+
+# Network mask that will be used. Can be either decimal like '24' or
+# binary like '255.255.255.0'. Required. Defaults to ''.
+CONFIG_MANILA_NETWORK_STANDALONE_NETMASK=
+
+# Set it if network has segmentation (VLAN, VXLAN, etc). It will be
+# assigned to share-network and share drivers will be able to use this
+# for network interfaces within provisioned share servers. Optional.
+# Example: 1001. Defaults to ''.
+CONFIG_MANILA_NETWORK_STANDALONE_SEG_ID=
+
+# Can be IP address, range of IP addresses or list of addresses or
+# ranges. Contains addresses from IP network that are allowed to be
+# used. If empty, then will be assumed that all host addresses from
+# network can be used. Optional. Examples: 10.0.0.10 or
+# 10.0.0.10-10.0.0.20 or
+# 10.0.0.10-10.0.0.20,10.0.0.30-10.0.0.40,10.0.0.50. Defaults to ''.
+CONFIG_MANILA_NETWORK_STANDALONE_IP_RANGE=
+
+# IP version of network. Optional. Defaults to '4'. ['4', '6']
+CONFIG_MANILA_NETWORK_STANDALONE_IP_VERSION=4
+
+# Password to use for OpenStack Bare Metal Provisioning (ironic) to
+# access the database.
+CONFIG_IRONIC_DB_PW=PW_PLACEHOLDER
+
+# Password to use for OpenStack Bare Metal Provisioning to
+# authenticate with the Identity service.
+CONFIG_IRONIC_KS_PW=PW_PLACEHOLDER
+
+# Password to use for the Compute service (nova) to access the
+# database.
+CONFIG_NOVA_DB_PW=5dc7786f29f7444a
+
+# Password to use for the Compute service to authenticate with the
+# Identity service.
+CONFIG_NOVA_KS_PW=ec0e7532149c4c1b
+
+# Overcommitment ratio for virtual to physical CPUs. Specify 1.0 to
+# disable CPU overcommitment.
+CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0
+
+# Overcommitment ratio for virtual to physical RAM. Specify 1.0 to
+# disable RAM overcommitment.
+CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5
+
+# Protocol used for instance migration. Valid options are: tcp and
+# ssh. Note that by default, the Compute user is created with the
+# /sbin/nologin shell so that the SSH protocol will not work. To make
+# the SSH protocol work, you must configure the Compute user on
+# compute hosts manually. ['tcp', 'ssh']
+CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp
+
+# Manager that runs the Compute service.
+CONFIG_NOVA_COMPUTE_MANAGER=nova.compute.manager.ComputeManager
+
+# Private interface for flat DHCP on the Compute servers.
+CONFIG_NOVA_COMPUTE_PRIVIF=eth1
+
+# Compute Network Manager. ['^nova\.network\.manager\.\w+Manager$']
+CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager
+
+# Public interface on the Compute network server.
+CONFIG_NOVA_NETWORK_PUBIF=eth0
+
+# Private interface for flat DHCP on the Compute network server.
+CONFIG_NOVA_NETWORK_PRIVIF=eth1
+
+# IP Range for flat DHCP. ['^[\:\.\da-fA-f]+(\/\d+){0,1}$']
+CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22
+
+# IP Range for floating IP addresses. ['^[\:\.\da-
+# fA-f]+(\/\d+){0,1}$']
+CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22
+
+# Specify 'y' to automatically assign a floating IP to new instances.
+# ['y', 'n']
+CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n
+
+# First VLAN for private networks (Compute networking).
+CONFIG_NOVA_NETWORK_VLAN_START=100
+
+# Number of networks to support (Compute networking).
+CONFIG_NOVA_NETWORK_NUMBER=1
+
+# Number of addresses in each private subnet (Compute networking).
+CONFIG_NOVA_NETWORK_SIZE=255
+
+# Password to use for OpenStack Networking (neutron) to authenticate
+# with the Identity service.
+CONFIG_NEUTRON_KS_PW=b80e24e5e8fe4939
+
+# The password to use for OpenStack Networking to access the
+# database.
+CONFIG_NEUTRON_DB_PW=ef787571117c4ea2
+
+# The name of the Open vSwitch bridge (or empty for linuxbridge) for
+# the OpenStack Networking L3 agent to use for external traffic.
+# Specify 'provider' if you intend to use a provider network to handle
+# external traffic.
+CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex
+
+# Password for the OpenStack Networking metadata agent.
+CONFIG_NEUTRON_METADATA_PW=4f67c8d5ca4e41b7
+
+# Specify 'y' to install OpenStack Networking's Load-Balancing-
+# as-a-Service (LBaaS). ['y', 'n']
+CONFIG_LBAAS_INSTALL=n
+
+# Specify 'y' to install OpenStack Networking's L3 Metering agent
+# ['y', 'n']
+CONFIG_NEUTRON_METERING_AGENT_INSTALL=n
+
+# Specify 'y' to configure OpenStack Networking's Firewall-
+# as-a-Service (FWaaS). ['y', 'n']
+CONFIG_NEUTRON_FWAAS=n
+
+# Comma-separated list of network-type driver entry points to be
+# loaded from the neutron.ml2.type_drivers namespace. ['local',
+# 'flat', 'vlan', 'gre', 'vxlan']
+CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vlan
+
+# Comma-separated, ordered list of network types to allocate as
+# tenant networks. The 'local' value is only useful for single-box
+# testing and provides no connectivity between hosts. ['local',
+# 'vlan', 'gre', 'vxlan']
+CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vlan
+
+# Comma-separated ordered list of networking mechanism driver entry
+# points to be loaded from the neutron.ml2.mechanism_drivers
+# namespace. ['logger', 'test', 'linuxbridge', 'openvswitch',
+# 'hyperv', 'ncs', 'arista', 'cisco_nexus', 'mlnx', 'l2population']
+CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch
+
+# Comma-separated list of physical_network names with which flat
+# networks can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+CONFIG_NEUTRON_ML2_FLAT_NETWORKS=*
+
+# Comma-separated list of <physical_network>:<vlan_min>:<vlan_max> or
+# <physical_network> specifying physical_network names usable for VLAN
+# provider and tenant networks, as well as ranges of VLAN tags on each
+# available for allocation to tenant networks.
+CONFIG_NEUTRON_ML2_VLAN_RANGES=physnet1:1:1000
+
+# Comma-separated list of <tun_min>:<tun_max> tuples enumerating
+# ranges of GRE tunnel IDs that are available for tenant-network
+# allocation. A tuple must be an array with tun_max +1 - tun_min >
+# 1000000.
+CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES=
+
+# Comma-separated list of addresses for VXLAN multicast group. If
+# left empty, disables VXLAN from sending allocate broadcast traffic
+# (disables multicast VXLAN mode). Should be a Multicast IP (v4 or v6)
+# address.
+CONFIG_NEUTRON_ML2_VXLAN_GROUP=
+
+# Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network
+# allocation. Minimum value is 0 and maximum value is 16777215.
+CONFIG_NEUTRON_ML2_VNI_RANGES=10:100
+
+# Name of the L2 agent to be used with OpenStack Networking.
+# ['linuxbridge', 'openvswitch']
+CONFIG_NEUTRON_L2_AGENT=openvswitch
+
+# Comma-separated list of interface mappings for the OpenStack
+# Networking linuxbridge plugin. Each tuple in the list must be in the
+# format <physical_network>:<net_interface>. Example:
+# physnet1:eth1,physnet2:eth2,physnet3:eth3.
+CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS=
+
+# Comma-separated list of bridge mappings for the OpenStack
+# Networking Open vSwitch plugin. Each tuple in the list must be in
+# the format <physical_network>:<ovs_bridge>. Example: physnet1:br-
+# eth1,physnet2:br-eth2,physnet3:br-eth3
+CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-eth1
+
+# Comma-separated list of colon-separated Open vSwitch
+# <bridge>:<interface> pairs. The interface will be added to the
+# associated bridge.
+CONFIG_NEUTRON_OVS_BRIDGE_IFACES=br-eth1:eth1
+
+# Interface for the Open vSwitch tunnel. Packstack overrides the IP
+# address used for tunnels on this hypervisor to the IP found on the
+# specified interface (for example, eth1).
+CONFIG_NEUTRON_OVS_TUNNEL_IF=eth1
+
+# VXLAN UDP port.
+CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789
+
+# Specify 'y' to set up Horizon communication over https. ['y', 'n']
+CONFIG_HORIZON_SSL=y
+
+# PEM-encoded certificate to be used for SSL connections on the https
+# server (the certificate should not require a passphrase). To
+# generate a certificate, leave blank.
+CONFIG_SSL_CERT=
+
+# SSL keyfile corresponding to the certificate if one was specified.
+CONFIG_SSL_KEY=
+
+# PEM-encoded CA certificates from which the certificate chain of the
+# server certificate can be assembled.
+CONFIG_SSL_CACHAIN=
+
+# Password to use for the Object Storage service to authenticate with
+# the Identity service.
+CONFIG_SWIFT_KS_PW=703fab01b1344d12
+
+# Comma-separated list of devices to use as storage device for Object
+# Storage. Each entry must take the format /path/to/dev (for example,
+# specifying /dev/vdb installs /dev/vdb as the Object Storage storage
+# device; Packstack does not create the filesystem, you must do this
+# first). If left empty, Packstack creates a loopback device for test
+# setup.
+CONFIG_SWIFT_STORAGES=
+
+# Number of Object Storage storage zones; this number MUST be no
+# larger than the number of configured storage devices.
+CONFIG_SWIFT_STORAGE_ZONES=1
+
+# Number of Object Storage storage replicas; this number MUST be no
+# larger than the number of configured storage zones.
+CONFIG_SWIFT_STORAGE_REPLICAS=1
+
+# File system type for storage nodes. ['xfs', 'ext4']
+CONFIG_SWIFT_STORAGE_FSTYPE=ext4
+
+# Custom seed number to use for swift_hash_path_suffix in
+# /etc/swift/swift.conf. If you do not provide a value, a seed number
+# is automatically generated.
+CONFIG_SWIFT_HASH=8a0a14b3a8cd436c
+
+# Size of the Object Storage loopback file storage device.
+CONFIG_SWIFT_STORAGE_SIZE=2G
+
+# Password used by Orchestration service user to authenticate against
+# the database.
+CONFIG_HEAT_DB_PW=PW_PLACEHOLDER
+
+# Encryption key to use for authentication in the Orchestration
+# database (16, 24, or 32 chars).
+CONFIG_HEAT_AUTH_ENC_KEY=e8cd8c873bb24e92
+
+# Password to use for the Orchestration service to authenticate with
+# the Identity service.
+CONFIG_HEAT_KS_PW=PW_PLACEHOLDER
+
+# Specify 'y' to install the Orchestration CloudWatch API. ['y', 'n']
+CONFIG_HEAT_CLOUDWATCH_INSTALL=n
+
+# Specify 'y' to install the Orchestration CloudFormation API. ['y',
+# 'n']
+CONFIG_HEAT_CFN_INSTALL=n
+
+# Name of the Identity domain for Orchestration.
+CONFIG_HEAT_DOMAIN=heat
+
+# Name of the Identity domain administrative user for Orchestration.
+CONFIG_HEAT_DOMAIN_ADMIN=heat_admin
+
+# Password for the Identity domain administrative user for
+# Orchestration.
+CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER
+
+# Specify 'y' to provision for demo usage and testing. ['y', 'n']
+CONFIG_PROVISION_DEMO=y
+
+# Specify 'y' to configure the OpenStack Integration Test Suite
+# (tempest) for testing. The test suite requires OpenStack Networking
+# to be installed. ['y', 'n']
+CONFIG_PROVISION_TEMPEST=n
+
+# CIDR network address for the floating IP subnet.
+CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28
+
+# The name to be assigned to the demo image in Glance (default
+# "cirros").
+CONFIG_PROVISION_IMAGE_NAME=cirros
+
+# A URL or local file location for an image to download and provision
+# in Glance (defaults to a URL for a recent "cirros" image).
+CONFIG_PROVISION_IMAGE_URL=http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+
+# Format for the demo image (default "qcow2").
+CONFIG_PROVISION_IMAGE_FORMAT=qcow2
+
+# User to use when connecting to instances booted from the demo
+# image.
+CONFIG_PROVISION_IMAGE_SSH_USER=cirros
+
+# Name of the Integration Test Suite provisioning user. If you do not
+# provide a user name, Tempest is configured in a standalone mode.
+CONFIG_PROVISION_TEMPEST_USER=
+
+# Password to use for the Integration Test Suite provisioning user.
+CONFIG_PROVISION_TEMPEST_USER_PW=PW_PLACEHOLDER
+
+# CIDR network address for the floating IP subnet.
+CONFIG_PROVISION_TEMPEST_FLOATRANGE=172.24.4.224/28
+
+# URI of the Integration Test Suite git repository.
+CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git
+
+# Revision (branch) of the Integration Test Suite git repository.
+CONFIG_PROVISION_TEMPEST_REPO_REVISION=master
+
+# Specify 'y' to configure the Open vSwitch external bridge for an
+# all-in-one deployment (the L3 external bridge acts as the gateway
+# for virtual machines). ['y', 'n']
+CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n
+
+# Secret key for signing Telemetry service (ceilometer) messages.
+CONFIG_CEILOMETER_SECRET=97da677da18747a6
+
+# Password to use for Telemetry to authenticate with the Identity
+# service.
+CONFIG_CEILOMETER_KS_PW=c4c84e0e70d74dc3
+
+# Backend driver for Telemetry's group membership coordination.
+# ['redis', 'none']
+CONFIG_CEILOMETER_COORDINATION_BACKEND=redis
+
+# IP address of the server on which to install MongoDB.
+CONFIG_MONGODB_HOST=192.168.0.10
+
+# IP address of the server on which to install the Redis master
+# server.
+CONFIG_REDIS_MASTER_HOST=192.168.0.10
+
+# Port on which the Redis server(s) listens.
+CONFIG_REDIS_PORT=6379
+
+# Specify 'y' to have Redis try to use HA. ['y', 'n']
+CONFIG_REDIS_HA=n
+
+# Hosts on which to install Redis slaves.
+CONFIG_REDIS_SLAVE_HOSTS=
+
+# Hosts on which to install Redis sentinel servers.
+CONFIG_REDIS_SENTINEL_HOSTS=
+
+# Host to configure as the Redis coordination sentinel.
+CONFIG_REDIS_SENTINEL_CONTACT_HOST=
+
+# Port on which Redis sentinel servers listen.
+CONFIG_REDIS_SENTINEL_PORT=26379
+
+# Quorum value for Redis sentinel servers.
+CONFIG_REDIS_SENTINEL_QUORUM=2
+
+# Name of the master server watched by the Redis sentinel. ['[a-z]+']
+CONFIG_REDIS_MASTER_NAME=mymaster
+
+# Password to use for OpenStack Data Processing (sahara) to access
+# the database.
+CONFIG_SAHARA_DB_PW=PW_PLACEHOLDER
+
+# Password to use for OpenStack Data Processing to authenticate with
+# the Identity service.
+CONFIG_SAHARA_KS_PW=PW_PLACEHOLDER
+
+# Password to use for OpenStack Database-as-a-Service (trove) to
+# access the database.
+CONFIG_TROVE_DB_PW=PW_PLACEHOLDER
+
+# Password to use for OpenStack Database-as-a-Service to authenticate
+# with the Identity service.
+CONFIG_TROVE_KS_PW=PW_PLACEHOLDER
+
+# User name to use when OpenStack Database-as-a-Service connects to
+# the Compute service.
+CONFIG_TROVE_NOVA_USER=admin
+
+# Tenant to use when OpenStack Database-as-a-Service connects to the
+# Compute service.
+CONFIG_TROVE_NOVA_TENANT=services
+
+# Password to use when OpenStack Database-as-a-Service connects to
+# the Compute service.
+CONFIG_TROVE_NOVA_PW=PW_PLACEHOLDER
+
+# Password of the nagiosadmin user on the Nagios server.
+CONFIG_NAGIOS_PW=7461b2cc0c6b4dc7
openstack/lab-hosts
@@ -0,0 +1,10 @@
+[openstack-lab]
+controller ansible_ssh_host=153.92.35.188 prompt_color=38 ansible_ssh_private_key_file=~/openstack-labs/student.pem ansible_ssh_user=root
+neutron ansible_ssh_host=31.220.67.166 prompt_color=36 ansible_ssh_private_key_file=~/openstack-labs/student.pem ansible_ssh_user=root
+compute1 ansible_ssh_host=153.92.34.135 prompt_color=35 ansible_ssh_private_key_file=~/openstack-labs/student.pem ansible_ssh_user=root
+compute2 ansible_ssh_host=31.220.64.107 prompt_color=34 ansible_ssh_private_key_file=~/openstack-labs/student.pem ansible_ssh_user=root
+
+[ceph-lab]
+storage1 ansible_ssh_host= prompt_color=33 ansible_ssh_private_key_file=~/openstack-labs/student.pem ansible_ssh_user=root
+storage2 ansible_ssh_host= prompt_color=32 ansible_ssh_private_key_file=~/openstack-labs/student.pem ansible_ssh_user=root
+storage3 ansible_ssh_host= prompt_color=31 ansible_ssh_private_key_file=~/openstack-labs/student.pem ansible_ssh_user=root
openstack/out.txt
@@ -0,0 +1,39 @@
+
+PLAY [controller] *************************************************************
+
+GATHERING FACTS ***************************************************************
+ok: [controller]
+
+TASK: [Record public IP] ******************************************************
+changed: [controller]
+
+TASK: [Deploy Nova Compute Template] ******************************************
+changed: [controller]
+
+PLAY [compute1;compute2] ******************************************************
+
+GATHERING FACTS ***************************************************************
+ok: [compute2]
+ok: [compute1]
+
+TASK: [Deploy Nova Compute Template] ******************************************
+ok: [compute1]
+ok: [compute2]
+
+TASK: [Restart Nova Compute Service] ******************************************
+changed: [compute2] => (item=openstack-nova-compute)
+changed: [compute1] => (item=openstack-nova-compute)
+
+PLAY [controller] *************************************************************
+
+GATHERING FACTS ***************************************************************
+ok: [controller]
+
+TASK: [Restart Nova Compute and VNC Proxy Service] ****************************
+changed: [controller] => (item=openstack-nova-novncproxy)
+
+PLAY RECAP ********************************************************************
+compute1 : ok=3 changed=1 unreachable=0 failed=0
+compute2 : ok=3 changed=1 unreachable=0 failed=0
+controller : ok=5 changed=3 unreachable=0 failed=0
+
openstack/student.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAr97AHhV5EIPGKXg6g8sA68VsHdt66Eu5Od6+Yv/hVtBREfgZ
+1bRdTro+Nq3cfw+kqmyYaz3ohujyQsIfjhN1z10vbIwGwsKOSbn0oGCzpAStuUgJ
+C4En4a2DJ6FbbvN/z45VX3j2NdCKMiVTWITwfBEaZgwS9qaLHbecQrY0s3wPo0Jx
+7o27pQM47GPjRwi+nC9uI9hpTJck87a3au1BgB6pvTZcN7aXotZPNUOVqQuwb5eo
+c36820TkXQvszlHBML7aoW6E3YhuEPJGIwUbImxToYNTRkbPaS7o6DNEq/ONlrTF
+fal5Uu3btd2YhSck2ZpLiwKWJuX6l7xNVMC+KwIDAQABAoIBAQCRk2ijflLAANO+
+7+4/uYIBp2FfNZX/Qu8HoAEoIXZ9TeJNZXi9FJdH25Iz4KZr5p7/EvT3kD8xrREb
+gWRtTl5uT0w6JY5TGIh/LpX3SxRn6BeFDsOt6jMA84dk/oOYy6Y4QDEa3h2vXfeG
+mq7qzt/hUqbvYX8w+5qb9qg58GifR+pxFzx9EXdkuSYVvhu/3Zw86u3eDmGOPn8r
+QU/QxGyo1YfLehdhIbNcgQTMbVLg83vlZfDdsMb45bSEzzE1mNecpUH7CjYGSxsB
++5yW9m0hfOQReGrShTZiBL1tRi48uMX/i/Zp+K4Qlf95UkM28aYrOdUdCPHZYbov
+dZdJdpVBAoGBAN1uRhhnrE1x17DveYR7Q0LtdirfX09UUxmRV5+dLEDZktnF88wQ
+gSiJ9myGFK60juI6oWqXLTAPmPa8e0iwkeVNZnBzQNIhLlAAJtj8rr8oWaS+BUmw
+JjE9PDygahjJu7XU5MBPbtTihuDdxzA2Wxm/ywIwjOs6erF6M7EIWHltAoGBAMtT
+mFrKR020F6j9KPT8M5aDPJc5d44wI6o9G4BeU36bInEqGpHWOXPJddUa/pUKR0mK
+T8yCpgSm6pJnutuXAD19gPes7oh8/3eqh43U0LvUueaU1SZqXz68o4ItRDpnmkA3
+hAiziZ7YgS4KLU1Fcv+tMhVV8EFk6IgeZyrumi73AoGAXuvsxH180V4h9klSMEae
+2MvuwgYrV1hwnx89DsFTvSEH4K8JCctxcq1ho7f+mb3xGm3D0GuqBVZKXksCFnEK
+ud9qMH5XpTxwG/9Up+P6DRwBAawrnCEU3jq/3cVudd3Ux+fRf9cNTdkltLOdtb83
+YlxjbDbf0d6pxfb6vcM0GikCgYBByBrrjLQC9LgO6VDd6dZ65JyuxF3kt7hiNID4
+SAw1UtWSzS9pbWiLCGvB9YjWCUxFA6WLod06dhJAKaUvtCvUuQ0HlIBb0i0ItzhB
+789UZZ97dT18vz9vWbKAxEplpu0lx5BFDGkC5kLs9Ufwzv5Er/EbLAGd9pqwpz1z
+i1yAwwKBgQDcY7aA1Di5Ti6RgW+UY+yUY8JMq0anNW4GDQLUyUY39Aw9pOW+I+dC
+mS+QR2FfLadLMU+EMeagzY0JDUWZyCxxseypC2k90sfhVeZnMQZDVm383eQAGy/f
+Wpj6R3sh0KsOJoUvCrJrxLIMZuh1fMVLoTpedyXY9Ty5BWBc8Gio6Q==
+-----END RSA PRIVATE KEY-----