blob: 6789a56e9175ac24186552429cca84104ce53c1d [file] [log] [blame]
#!/usr/bin/python
#
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tool to upgrade the configuration file.
This code handles only the types supported by simplejson. As an
example, 'set' is a 'list'.
"""
import os
import os.path
import sys
import optparse
import logging
import time
from cStringIO import StringIO
from ganeti import constants
from ganeti import serializer
from ganeti import utils
from ganeti import cli
from ganeti import bootstrap
from ganeti import config
from ganeti import netutils
from ganeti import pathutils
from ganeti.utils import version
options = None
args = None
#: Target major version we will upgrade to
TARGET_MAJOR = 2
#: Target minor version we will upgrade to
TARGET_MINOR = 13
#: Target major version for downgrade
DOWNGRADE_MAJOR = 2
#: Target minor version for downgrade
DOWNGRADE_MINOR = 12
# map of legacy device types
# (mapping differing old LD_* constants to new DT_* constants)
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
# (mapping differing new DT_* constants to old LD_* constants)
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
class Error(Exception):
"""Generic exception"""
pass
def SetupLogging():
"""Configures the logging module.
"""
formatter = logging.Formatter("%(asctime)s: %(message)s")
stderr_handler = logging.StreamHandler()
stderr_handler.setFormatter(formatter)
if options.debug:
stderr_handler.setLevel(logging.NOTSET)
elif options.verbose:
stderr_handler.setLevel(logging.INFO)
else:
stderr_handler.setLevel(logging.WARNING)
root_logger = logging.getLogger("")
root_logger.setLevel(logging.NOTSET)
root_logger.addHandler(stderr_handler)
def CheckHostname(path):
"""Ensures hostname matches ssconf value.
@param path: Path to ssconf file
"""
ssconf_master_node = utils.ReadOneLineFile(path)
hostname = netutils.GetHostname().name
if ssconf_master_node == hostname:
return True
logging.warning("Warning: ssconf says master node is '%s', but this"
" machine's name is '%s'; this tool must be run on"
" the master node", ssconf_master_node, hostname)
return False
def _FillIPolicySpecs(default_ipolicy, ipolicy):
if "minmax" in ipolicy:
for (key, spec) in ipolicy["minmax"][0].items():
for (par, val) in default_ipolicy["minmax"][0][key].items():
if par not in spec:
spec[par] = val
def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
minmax_keys = ["min", "max"]
if any((k in ipolicy) for k in minmax_keys):
minmax = {}
for key in minmax_keys:
if key in ipolicy:
if ipolicy[key]:
minmax[key] = ipolicy[key]
del ipolicy[key]
if minmax:
ipolicy["minmax"] = [minmax]
if isgroup and "std" in ipolicy:
del ipolicy["std"]
_FillIPolicySpecs(default_ipolicy, ipolicy)
def UpgradeNetworks(config_data):
networks = config_data.get("networks", None)
if not networks:
config_data["networks"] = {}
def UpgradeCluster(config_data):
cluster = config_data.get("cluster", None)
if cluster is None:
raise Error("Cannot find cluster")
ipolicy = cluster.setdefault("ipolicy", None)
if ipolicy:
UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
ial_params = cluster.get("default_iallocator_params", None)
if not ial_params:
cluster["default_iallocator_params"] = {}
if not "candidate_certs" in cluster:
cluster["candidate_certs"] = {}
cluster["instance_communication_network"] = \
cluster.get("instance_communication_network", "")
cluster["install_image"] = \
cluster.get("install_image", "")
cluster["zeroing_image"] = \
cluster.get("zeroing_image", "")
cluster["compression_tools"] = \
cluster.get("compression_tools", constants.IEC_DEFAULT_TOOLS)
if "enabled_user_shutdown" not in cluster:
cluster["enabled_user_shutdown"] = False
cluster["data_collectors"] = cluster.get("data_collectors", {})
for name in constants.DATA_COLLECTOR_NAMES:
cluster["data_collectors"][name] = \
cluster["data_collectors"].get(
name, dict(active=True, interval=constants.MOND_TIME_INTERVAL * 1e6))
def UpgradeGroups(config_data):
cl_ipolicy = config_data["cluster"].get("ipolicy")
for group in config_data["nodegroups"].values():
networks = group.get("networks", None)
if not networks:
group["networks"] = {}
ipolicy = group.get("ipolicy", None)
if ipolicy:
if cl_ipolicy is None:
raise Error("A group defines an instance policy but there is no"
" instance policy at cluster level")
UpgradeIPolicy(ipolicy, cl_ipolicy, True)
def GetExclusiveStorageValue(config_data):
"""Return a conservative value of the exclusive_storage flag.
Return C{True} if the cluster or at least a nodegroup have the flag set.
"""
ret = False
cluster = config_data["cluster"]
ndparams = cluster.get("ndparams")
if ndparams is not None and ndparams.get("exclusive_storage"):
ret = True
for group in config_data["nodegroups"].values():
ndparams = group.get("ndparams")
if ndparams is not None and ndparams.get("exclusive_storage"):
ret = True
return ret
def RemovePhysicalId(disk):
if "children" in disk:
for d in disk["children"]:
RemovePhysicalId(d)
if "physical_id" in disk:
del disk["physical_id"]
def ChangeDiskDevType(disk, dev_type_map):
"""Replaces disk's dev_type attributes according to the given map.
This can be used for both, up or downgrading the disks.
"""
if disk["dev_type"] in dev_type_map:
disk["dev_type"] = dev_type_map[disk["dev_type"]]
if "children" in disk:
for child in disk["children"]:
ChangeDiskDevType(child, dev_type_map)
def UpgradeDiskDevType(disk):
"""Upgrades the disks' device type."""
ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
def _ConvertNicNameToUuid(iobj, network2uuid):
for nic in iobj["nics"]:
name = nic.get("network", None)
if name:
uuid = network2uuid.get(name, None)
if uuid:
print("NIC with network name %s found."
" Substituting with uuid %s." % (name, uuid))
nic["network"] = uuid
def AssignUuid(disk):
if not "uuid" in disk:
disk["uuid"] = utils.io.NewUUID()
if "children" in disk:
for d in disk["children"]:
AssignUuid(d)
def _ConvertDiskAndCheckMissingSpindles(iobj, instance):
missing_spindles = False
if "disks" not in iobj:
raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
disks = iobj["disks"]
if not all(isinstance(d, str) for d in disks):
# Disks are not top level citizens
for idx, dobj in enumerate(disks):
RemovePhysicalId(dobj)
expected = "disk/%s" % idx
current = dobj.get("iv_name", "")
if current != expected:
logging.warning("Updating iv_name for instance %s/disk %s"
" from '%s' to '%s'",
instance, idx, current, expected)
dobj["iv_name"] = expected
if "dev_type" in dobj:
UpgradeDiskDevType(dobj)
if not "spindles" in dobj:
missing_spindles = True
AssignUuid(dobj)
return missing_spindles
def UpgradeInstances(config_data):
"""Upgrades the instances' configuration."""
network2uuid = dict((n["name"], n["uuid"])
for n in config_data["networks"].values())
if "instances" not in config_data:
raise Error("Can't find the 'instances' key in the configuration!")
missing_spindles = False
for instance, iobj in config_data["instances"].items():
_ConvertNicNameToUuid(iobj, network2uuid)
if _ConvertDiskAndCheckMissingSpindles(iobj, instance):
missing_spindles = True
if "admin_state_source" not in iobj:
iobj["admin_state_source"] = constants.ADMIN_SOURCE
if GetExclusiveStorageValue(config_data) and missing_spindles:
# We cannot be sure that the instances that are missing spindles have
# exclusive storage enabled (the check would be more complicated), so we
# give a noncommittal message
logging.warning("Some instance disks could be needing to update the"
" spindles parameter; you can check by running"
" 'gnt-cluster verify', and fix any problem with"
" 'gnt-cluster repair-disk-sizes'")
def UpgradeRapiUsers():
if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
if os.path.exists(options.RAPI_USERS_FILE):
raise Error("Found pre-2.4 RAPI users file at %s, but another file"
" already exists at %s" %
(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
if not options.dry_run:
utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
mkdir=True, mkdir_mode=0750)
# Create a symlink for RAPI users file
if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
os.path.isfile(options.RAPI_USERS_FILE)):
logging.info("Creating symlink from %s to %s",
options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
if not options.dry_run:
os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
def UpgradeWatcher():
# Remove old watcher state file if it exists
if os.path.exists(options.WATCHER_STATEFILE):
logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
if not options.dry_run:
utils.RemoveFile(options.WATCHER_STATEFILE)
def UpgradeFileStoragePaths(config_data):
# Write file storage paths
if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
cluster = config_data["cluster"]
file_storage_dir = cluster.get("file_storage_dir")
shared_file_storage_dir = cluster.get("shared_file_storage_dir")
del cluster
logging.info("Ganeti 2.7 and later only allow whitelisted directories"
" for file storage; writing existing configuration values"
" into '%s'",
options.FILE_STORAGE_PATHS_FILE)
if file_storage_dir:
logging.info("File storage directory: %s", file_storage_dir)
if shared_file_storage_dir:
logging.info("Shared file storage directory: %s",
shared_file_storage_dir)
buf = StringIO()
buf.write("# List automatically generated from configuration by\n")
buf.write("# cfgupgrade at %s\n" % time.asctime())
if file_storage_dir:
buf.write("%s\n" % file_storage_dir)
if shared_file_storage_dir:
buf.write("%s\n" % shared_file_storage_dir)
utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
data=buf.getvalue(),
mode=0600,
dry_run=options.dry_run,
backup=True)
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
if old_key not in nodes_by_old_key:
logging.warning("Can't find node '%s' in configuration, assuming that it's"
" already up-to-date", old_key)
return old_key
return nodes_by_old_key[old_key][new_key_field]
def ChangeNodeIndices(config_data, old_key_field, new_key_field):
def ChangeDiskNodeIndices(disk):
# Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
# considered when up/downgrading from/to any versions touching 2.9 on the
# way.
drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
if disk["dev_type"] in drbd_disk_types:
for i in range(0, 2):
disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
disk["logical_id"][i],
new_key_field)
if "children" in disk:
for child in disk["children"]:
ChangeDiskNodeIndices(child)
nodes_by_old_key = {}
nodes_by_new_key = {}
for (_, node) in config_data["nodes"].items():
nodes_by_old_key[node[old_key_field]] = node
nodes_by_new_key[node[new_key_field]] = node
config_data["nodes"] = nodes_by_new_key
cluster = config_data["cluster"]
cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
cluster["master_node"],
new_key_field)
for inst in config_data["instances"].values():
inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
inst["primary_node"],
new_key_field)
for disk in config_data["disks"].values():
ChangeDiskNodeIndices(disk)
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
insts_by_old_key = {}
insts_by_new_key = {}
for (_, inst) in config_data["instances"].items():
insts_by_old_key[inst[old_key_field]] = inst
insts_by_new_key[inst[new_key_field]] = inst
config_data["instances"] = insts_by_new_key
def UpgradeNodeIndices(config_data):
ChangeNodeIndices(config_data, "name", "uuid")
def UpgradeInstanceIndices(config_data):
ChangeInstanceIndices(config_data, "name", "uuid")
def UpgradeFilters(config_data):
filters = config_data.get("filters", None)
if not filters:
config_data["filters"] = {}
def UpgradeTopLevelDisks(config_data):
"""Upgrades the disks as config top level citizens."""
if "instances" not in config_data:
raise Error("Can't find the 'instances' key in the configuration!")
if "disks" in config_data:
# Disks are already top level citizens
return
config_data["disks"] = dict()
for iobj in config_data["instances"].values():
disk_uuids = []
for disk in iobj["disks"]:
duuid = disk["uuid"]
disk["serial_no"] = 1
# Instances may not have the ctime value, and the Haskell serialization
# will have set it to zero.
disk["ctime"] = disk["mtime"] = iobj.get("ctime", 0)
config_data["disks"][duuid] = disk
disk_uuids.append(duuid)
iobj["disks"] = disk_uuids
def UpgradeAll(config_data):
config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
UpgradeRapiUsers()
UpgradeWatcher()
UpgradeFileStoragePaths(config_data)
UpgradeNetworks(config_data)
UpgradeCluster(config_data)
UpgradeGroups(config_data)
UpgradeInstances(config_data)
UpgradeTopLevelDisks(config_data)
UpgradeNodeIndices(config_data)
UpgradeInstanceIndices(config_data)
UpgradeFilters(config_data)
# DOWNGRADE ------------------------------------------------------------
def DowngradeExtAccess(config_data):
# Remove 'access' for ext storage from cluster diskparams
cluster_extparams = config_data["cluster"]["diskparams"].get("ext", None)
if (cluster_extparams is not None and
"access" in cluster_extparams):
del cluster_extparams["access"]
# Remove 'access' for ext storage from nodegroup diskparams
for group in config_data["nodegroups"].values():
group_extparams = group["diskparams"].get("ext", None)
if (group_extparams is not None and
"access" in group_extparams):
del group_extparams["access"]
def DowngradeDataCollectors(config_data):
cluster = config_data["cluster"]
if "data_collectors" in cluster:
del cluster["data_collectors"]
def DowngradeFilters(config_data):
if "filters" in config_data:
del config_data["filters"]
def DowngradeLxcParams(hvparams):
hv = "lxc"
if hv not in hvparams:
return
params_to_del = [
"devices",
"drop_capabilities",
"extra_cgroups",
"extra_config",
"num_ttys",
"startup_timeout",
]
for param in params_to_del:
if param in hvparams[hv]:
del hvparams[hv][param]
def DowngradeAllLxcParams(config_data):
cluster = config_data["cluster"]
if "hvparams" in cluster:
DowngradeLxcParams(cluster["hvparams"])
for iobj in cluster.get("instances", {}):
if "hvparams" in iobj:
DowngradeLxcParams(iobj["hvparams"])
def DowngradeAll(config_data):
# Any code specific to a particular version should be labeled that way, so
# it can be removed when updating to the next version.
config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
DOWNGRADE_MINOR, 0)
DowngradeExtAccess(config_data)
DowngradeDataCollectors(config_data)
DowngradeFilters(config_data)
DowngradeAllLxcParams(config_data)
def _ParseOptions():
parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
parser.add_option("--dry-run", dest="dry_run",
action="store_true",
help="Try to do the conversion, but don't write"
" output file")
parser.add_option(cli.FORCE_OPT)
parser.add_option(cli.DEBUG_OPT)
parser.add_option(cli.VERBOSE_OPT)
parser.add_option("--ignore-hostname", dest="ignore_hostname",
action="store_true", default=False,
help="Don't abort if hostname doesn't match")
parser.add_option("--path", help="Convert configuration in this"
" directory instead of '%s'" % pathutils.DATA_DIR,
default=pathutils.DATA_DIR, dest="data_dir")
parser.add_option("--confdir",
help=("Use this directory instead of '%s'" %
pathutils.CONF_DIR),
default=pathutils.CONF_DIR, dest="conf_dir")
parser.add_option("--no-verify",
help="Do not verify configuration after upgrade",
action="store_true", dest="no_verify", default=False)
parser.add_option("--downgrade",
help="Downgrade to the previous stable version",
action="store_true", dest="downgrade", default=False)
return parser.parse_args()
def _ComposePaths():
# We need to keep filenames locally because they might be renamed between
# versions.
options.data_dir = os.path.abspath(options.data_dir)
options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
options.CLIENT_PEM_PATH = options.data_dir + "/client.pem"
options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
def _AskUser():
if not options.force:
if options.downgrade:
usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
" Some configuration data might be removed if they don't fit"
" in the old format. Please make sure you have read the"
" upgrade notes (available in the UPGRADE file and included"
" in other documentation formats) to understand what they"
" are. Continue with *DOWNGRADING* the configuration?" %
(DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
else:
usertext = ("Please make sure you have read the upgrade notes for"
" Ganeti %s (available in the UPGRADE file and included"
" in other documentation formats). Continue with upgrading"
" configuration?" % constants.RELEASE_VERSION)
if not cli.AskUser(usertext):
sys.exit(constants.EXIT_FAILURE)
def _Downgrade(config_major, config_minor, config_version, config_data,
config_revision):
if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
(config_major == DOWNGRADE_MAJOR and
config_minor == DOWNGRADE_MINOR)):
raise Error("Downgrade supported only from the latest version (%s.%s),"
" found %s (%s.%s.%s) instead" %
(TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
config_minor, config_revision))
DowngradeAll(config_data)
def _TestLoadingConfigFile():
# test loading the config file
all_ok = True
if not (options.dry_run or options.no_verify):
logging.info("Testing the new config file...")
cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
accept_foreign=options.ignore_hostname,
offline=True)
# if we reached this, it's all fine
vrfy = cfg.VerifyConfig()
if vrfy:
logging.error("Errors after conversion:")
for item in vrfy:
logging.error(" - %s", item)
all_ok = False
else:
logging.info("File loaded successfully after upgrading")
del cfg
if options.downgrade:
action = "downgraded"
out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
else:
action = "upgraded"
out_ver = constants.RELEASE_VERSION
if all_ok:
cli.ToStderr("Configuration successfully %s to version %s.",
action, out_ver)
else:
cli.ToStderr("Configuration %s to version %s, but there are errors."
"\nPlease review the file.", action, out_ver)
def main():
"""Main program.
"""
global options, args # pylint: disable=W0603
(options, args) = _ParseOptions()
_ComposePaths()
SetupLogging()
# Option checking
if args:
raise Error("No arguments expected")
if options.downgrade and not options.no_verify:
options.no_verify = True
# Check master name
if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
logging.error("Aborting due to hostname mismatch")
sys.exit(constants.EXIT_FAILURE)
_AskUser()
# Check whether it's a Ganeti configuration directory
if not (os.path.isfile(options.CONFIG_DATA_PATH) and
os.path.isfile(options.SERVER_PEM_PATH) and
os.path.isfile(options.KNOWN_HOSTS_PATH)):
raise Error(("%s does not seem to be a Ganeti configuration"
" directory") % options.data_dir)
if not os.path.isdir(options.conf_dir):
raise Error("Not a directory: %s" % options.conf_dir)
config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
try:
config_version = config_data["version"]
except KeyError:
raise Error("Unable to determine configuration version")
(config_major, config_minor, config_revision) = \
version.SplitVersion(config_version)
logging.info("Found configuration version %s (%d.%d.%d)",
config_version, config_major, config_minor, config_revision)
if "config_version" in config_data["cluster"]:
raise Error("Inconsistent configuration: found config_version in"
" configuration file")
# Downgrade to the previous stable version
if options.downgrade:
_Downgrade(config_major, config_minor, config_version, config_data,
config_revision)
# Upgrade from 2.{0..12} to 2.13
elif config_major == 2 and config_minor in range(0, 13):
if config_revision != 0:
logging.warning("Config revision is %s, not 0", config_revision)
UpgradeAll(config_data)
elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
logging.info("No changes necessary")
else:
raise Error("Configuration version %d.%d.%d not supported by this tool" %
(config_major, config_minor, config_revision))
try:
logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
data=serializer.DumpJson(config_data),
mode=0600,
dry_run=options.dry_run,
backup=True)
if not options.dry_run:
# This creates the cluster certificate if it does not exist yet.
# In this case, we do not automatically create a client certificate
# as well, because if the cluster certificate did not exist before,
# no client certificate will exist on any node yet. In this case
# all client certificate should be renewed by 'gnt-cluster
# renew-crypto --new-node-certificates'. This will be enforced
# by a nagging warning in 'gnt-cluster verify'.
bootstrap.GenerateClusterCrypto(
False, False, False, False, False, False, None,
nodecert_file=options.SERVER_PEM_PATH,
rapicert_file=options.RAPI_CERT_FILE,
spicecert_file=options.SPICE_CERT_FILE,
spicecacert_file=options.SPICE_CACERT_FILE,
hmackey_file=options.CONFD_HMAC_KEY,
cds_file=options.CDS_FILE)
except Exception:
logging.critical("Writing configuration failed. It is probably in an"
" inconsistent state and needs manual intervention.")
raise
_TestLoadingConfigFile()
if __name__ == "__main__":
main()