Merge branch 'stable-2.16' into stable-2.17
* stable-2.16
Improve error reporting in _VerifyClientCertificates
Simplify some inscrutable map/map/ifilter/zip code
Avoid overuse of operator in watcher *.py
Sprinkle some more list comprehensions
Replace map/partial with list comprehension
Replace uses of map/lambda with more Pythonic code
Replace map(operator.attrgetter, ...) uses
Fix typos in gnt-cluster man page
Hide errors for expected inotify failures in unittest
Add gnt-instance rename --force option
Improve documentation for gnt-instance failover
Allow master failover to ignore offline nodes
Fix LogicalVolume code to work with older /sbin/lvs
Shorten verifyMasterVote failure message
Adding a confirmation before gnt-node --offline no
Removed unnecessary dependency from rpc in cli
Refactor cli exception to its appropriate module
Clean-up of code and fix of pylint warnings
Use fork instead of spawnv in the watcher
Make 'make pep8' happy
Manually fix merge conflicts in src/Ganeti/Utils.py
Signed-off-by: Brian Foley <bpfoley@google.com>
Reviewed-by: Viktor Bachraty <vbachraty@google.com>
diff --git a/lib/backend.py b/lib/backend.py
index e597410..58c8b3a 100644
--- a/lib/backend.py
+++ b/lib/backend.py
@@ -3822,7 +3822,7 @@
lvs_cache = None
is_plain_disk = compat.any([_CheckForPlainDisk(d) for d in disks])
if is_plain_disk:
- lvs_cache = bdev.LogicalVolume._GetLvGlobalInfo()
+ lvs_cache = bdev.LogicalVolume.GetLvGlobalInfo()
for disk in disks:
try:
rbd = _RecursiveFindBD(disk, lvs_cache=lvs_cache)
diff --git a/lib/bootstrap.py b/lib/bootstrap.py
index fc19a06..8eb0b4c 100644
--- a/lib/bootstrap.py
+++ b/lib/bootstrap.py
@@ -961,6 +961,11 @@
current master to cease being master, and the non-master to become
new master.
+ Note: The call to MasterFailover from lib/client/gnt_cluster.py checks that
+ a majority of nodes are healthy and responding before calling this. If this
+ function is called from somewhere else, the caller should also verify that a
+ majority of nodes are healthy.
+
@type no_voting: boolean
@param no_voting: force the operation without remote nodes agreement
(dangerous)
@@ -989,14 +994,10 @@
errors.ECODE_STATE)
if not no_voting:
- vote_list = GatherMasterVotes(node_names)
-
+ vote_list = _GatherMasterVotes(node_names)
if vote_list:
voted_master = vote_list[0][0]
- if voted_master is None:
- raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
- " not respond.", errors.ECODE_ENVIRON)
- elif voted_master != old_master:
+ if voted_master != old_master:
raise errors.OpPrereqError("I have a wrong configuration, I believe"
" the master is %s but the other nodes"
" voted %s. Please resync the configuration"
@@ -1152,11 +1153,11 @@
return old_master
-def GatherMasterVotes(node_names):
+def _GatherMasterVotes(node_names):
"""Check the agreement on who is the master.
This function will return a list of (node, number of votes), ordered
- by the number of votes. Errors will be denoted by the key 'None'.
+ by the number of votes.
Note that the sum of votes is the number of nodes this machine
knows, whereas the number of entries in the list could be different
@@ -1176,32 +1177,28 @@
# this should not happen (unless internal error in rpc)
logging.critical("Can't complete rpc call, aborting master startup")
return [(None, len(node_names))]
- votes = {}
- for node_name in results:
- nres = results[node_name]
- msg = nres.fail_msg
+ votes = {}
+ for (node_name, nres) in results.iteritems():
+ msg = nres.fail_msg
if msg:
logging.warning("Error contacting node %s: %s", node_name, msg)
- node = None
- else:
- node = nres.payload
-
+ continue
+ node = nres.payload
+ if not node:
+ logging.warning(('Expected a Node, encountered a None. Skipping this'
+ ' voting result.'))
if node not in votes:
votes[node] = 1
else:
votes[node] += 1
- vote_list = [v for v in votes.items()]
- # sort first on number of votes then on name, since we want None
- # sorted later if we have the half of the nodes not responding, and
- # half voting all for the same master
- vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
-
+ vote_list = votes.items()
+ vote_list.sort(key=lambda x: x[1], reverse=True)
return vote_list
-def MajorityHealthy():
+def MajorityHealthy(ignore_offline_nodes=False):
"""Check if the majority of nodes is healthy
Gather master votes from all nodes known to this node;
@@ -1210,13 +1207,32 @@
not guarantee any node to win an election but it ensures that
a standard master-failover is still possible.
+ @return: tuple of (boolean, [str]); the first is if a majority of nodes are
+ healthy, the second is a list of the node names that are not considered
+ healthy.
"""
- node_names = ssconf.SimpleStore().GetNodeList()
+ if ignore_offline_nodes:
+ node_names = ssconf.SimpleStore().GetOnlineNodeList()
+ else:
+ node_names = ssconf.SimpleStore().GetNodeList()
+
node_count = len(node_names)
- vote_list = GatherMasterVotes(node_names)
- if vote_list is None:
- return False
+ vote_list = _GatherMasterVotes(node_names)
+
+ if not vote_list:
+ logging.warning(('Voting list was None; cannot determine if a majority of '
+ 'nodes are healthy'))
+ return (False, node_names)
+
total_votes = sum([count for (node, count) in vote_list if node is not None])
+ majority_healthy = 2 * total_votes > node_count
+
+ # The list of nodes that did not vote is calculated to provide useful
+ # debugging information to the client.
+ voting_nodes = [node for (node, _) in vote_list]
+ nonvoting_nodes = [node for node in node_names if node not in voting_nodes]
+
logging.info("Total %d nodes, %d votes: %s", node_count, total_votes,
vote_list)
- return 2 * total_votes > node_count
+
+ return (majority_healthy, nonvoting_nodes)
diff --git a/lib/cli.py b/lib/cli.py
index 1a1815c..73c9b96 100644
--- a/lib/cli.py
+++ b/lib/cli.py
@@ -46,7 +46,6 @@
from ganeti import constants
from ganeti import opcodes
import ganeti.rpc.errors as rpcerr
-import ganeti.rpc.node as rpc
from ganeti import ssh
from ganeti import compat
from ganeti import netutils
@@ -76,14 +75,12 @@
"GetNodesSshPorts",
"GetNodeUUIDs",
"JobExecutor",
- "JobSubmittedException",
"ParseTimespec",
"RunWhileClusterStopped",
"RunWhileDaemonsStopped",
"SubmitOpCode",
"SubmitOpCodeToDrainedQueue",
"SubmitOrSend",
- "UsesRPC",
# Formatting functions
"ToStderr", "ToStdout",
"ToStdoutAndLoginfo",
@@ -616,9 +613,6 @@
return selected.split(",")
-UsesRPC = rpc.RunWithRPC
-
-
def AskUser(text, choices=None):
"""Ask the user a question.
@@ -676,17 +670,6 @@
return answer
-class JobSubmittedException(Exception):
- """Job was submitted, client should exit.
-
- This exception has one argument, the ID of the job that was
- submitted. The handler should print this ID.
-
- This is not an error, just a structured way to exit from clients.
-
- """
-
-
def SendJob(ops, cl=None):
"""Function to submit an opcode without waiting for the results.
@@ -1055,7 +1038,7 @@
job_id = SendJob(job, cl=cl)
if opts.print_jobid:
ToStdout("%d" % job_id)
- raise JobSubmittedException(job_id)
+ raise errors.JobSubmittedException(job_id)
else:
return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
@@ -1193,7 +1176,7 @@
obuf.write("\n".join(err.GetDetails()))
elif isinstance(err, errors.GenericError):
obuf.write("Unhandled Ganeti error: %s" % msg)
- elif isinstance(err, JobSubmittedException):
+ elif isinstance(err, errors.JobSubmittedException):
obuf.write("JobID: %s\n" % err.args[0])
retcode = 0
else:
@@ -1269,7 +1252,7 @@
try:
result = func(options, args)
except (errors.GenericError, rpcerr.ProtocolError,
- JobSubmittedException), err:
+ errors.JobSubmittedException), err:
result, err_msg = FormatError(err)
logging.exception("Error during command processing")
ToStderr(err_msg)
@@ -2358,10 +2341,10 @@
@rtype: a list of tuples
"""
- return map(lambda t: t[0],
+ return [t[0] for t in
cl.QueryNodes(names=nodes,
fields=["ndp/ssh_port"],
- use_locking=False))
+ use_locking=False)]
def GetNodeUUIDs(nodes, cl):
@@ -2375,10 +2358,10 @@
@rtype: a list of tuples
"""
- return map(lambda t: t[0],
+ return [t[0] for t in
cl.QueryNodes(names=nodes,
fields=["uuid"],
- use_locking=False))
+ use_locking=False)]
def _ToStream(stream, txt, *args):
diff --git a/lib/cli_opts.py b/lib/cli_opts.py
index 73a2ca9..c81355d 100644
--- a/lib/cli_opts.py
+++ b/lib/cli_opts.py
@@ -861,7 +861,8 @@
dest="ignore_consistency",
action="store_true", default=False,
help="Ignore the consistency of the disks on"
- " the secondary")
+ " the secondary. The source node must be "
+ "marked offline first for this to succeed.")
IGNORE_HVVERSIONS_OPT = cli_option("--ignore-hvversions",
dest="ignore_hvversions",
diff --git a/lib/client/gnt_cluster.py b/lib/client/gnt_cluster.py
index 5e5f6ff..2cc8328 100644
--- a/lib/client/gnt_cluster.py
+++ b/lib/client/gnt_cluster.py
@@ -53,6 +53,7 @@
from ganeti import opcodes
from ganeti import pathutils
from ganeti import qlang
+from ganeti.rpc.node import RunWithRPC
from ganeti import serializer
from ganeti import ssconf
from ganeti import ssh
@@ -74,6 +75,10 @@
help="Override interactive check for --no-voting",
default=False, action="store_true")
+IGNORE_OFFLINE_NODES_FAILOVER = cli_option(
+ "--ignore-offline-nodes", dest="ignore_offline_nodes",
+ help="Ignores offline nodes for master failover voting", default=True)
+
FORCE_DISTRIBUTION = cli_option("--yes-do-it", dest="yes_do_it",
help="Unconditionally distribute the"
" configuration, even if the queue"
@@ -152,7 +157,7 @@
return opts.drbd_helper
-@UsesRPC
+@RunWithRPC
def InitCluster(opts, args):
"""Initialize the cluster.
@@ -355,7 +360,7 @@
return 0
-@UsesRPC
+@RunWithRPC
def DestroyCluster(opts, args):
"""Destroy the cluster.
@@ -774,15 +779,8 @@
results = jex.GetResults()
- (bad_jobs, bad_results) = \
- map(len,
- # Convert iterators to lists
- map(list,
- # Count errors
- map(compat.partial(itertools.ifilterfalse, bool),
- # Convert result to booleans in a tuple
- zip(*((job_success, len(op_results) == 1 and op_results[0])
- for (job_success, op_results) in results)))))
+ bad_jobs = sum(1 for (job_success, _) in results if not job_success)
+ bad_results = sum(1 for (_, op_res) in results if not (op_res and op_res[0]))
if bad_jobs == 0 and bad_results == 0:
rcode = constants.EXIT_SUCCESS
@@ -882,7 +880,7 @@
SubmitOpCode(op, opts=opts)
-@UsesRPC
+@RunWithRPC
def MasterFailover(opts, args):
"""Failover the master node.
@@ -897,26 +895,31 @@
@return: the desired exit code
"""
- if not opts.no_voting:
- # Verify that a majority of nodes is still healthy
- if not bootstrap.MajorityHealthy():
+ if opts.no_voting:
+ # Don't ask for confirmation if the user provides the confirmation flag.
+ if not opts.yes_do_it:
+ usertext = ("This will perform the failover even if most other nodes"
+ " are down, or if this node is outdated. This is dangerous"
+ " as it can lead to a non-consistent cluster. Check the"
+ " gnt-cluster(8) man page before proceeding. Continue?")
+ if not AskUser(usertext):
+ return 1
+ else:
+ # Verify that a majority of nodes are still healthy
+ (majority_healthy, unhealthy_nodes) = bootstrap.MajorityHealthy(
+ opts.ignore_offline_nodes)
+ if not majority_healthy:
ToStderr("Master-failover with voting is only possible if the majority"
- " of nodes is still healthy; use the --no-voting option after"
+ " of nodes are still healthy; use the --no-voting option after"
" ensuring by other means that you won't end up in a dual-master"
- " scenario.")
- return 1
- if opts.no_voting and not opts.yes_do_it:
- usertext = ("This will perform the failover even if most other nodes"
- " are down, or if this node is outdated. This is dangerous"
- " as it can lead to a non-consistent cluster. Check the"
- " gnt-cluster(8) man page before proceeding. Continue?")
- if not AskUser(usertext):
+ " scenario. Unhealthy nodes: %s" % unhealthy_nodes)
return 1
- rvlaue, msgs = bootstrap.MasterFailover(no_voting=opts.no_voting)
+ rvalue, msgs = bootstrap.MasterFailover(no_voting=opts.no_voting)
for msg in msgs:
ToStderr(msg)
- return rvlaue
+
+ return rvalue
def MasterPing(opts, args):
@@ -2530,7 +2533,8 @@
RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
"[instance...]", "Updates mismatches in recorded disk sizes"),
"master-failover": (
- MasterFailover, ARGS_NONE, [NOVOTING_OPT, FORCE_FAILOVER],
+ MasterFailover, ARGS_NONE,
+ [NOVOTING_OPT, FORCE_FAILOVER, IGNORE_OFFLINE_NODES_FAILOVER],
"", "Makes the current node the master"),
"master-ping": (
MasterPing, ARGS_NONE, [],
diff --git a/lib/client/gnt_instance.py b/lib/client/gnt_instance.py
index cc5b851..dd1013f 100644
--- a/lib/client/gnt_instance.py
+++ b/lib/client/gnt_instance.py
@@ -458,10 +458,11 @@
@return: the desired exit code
"""
- if not opts.name_check:
- if not AskUser("As you disabled the check of the DNS entry, please verify"
- " that '%s' is a FQDN. Continue?" % args[1]):
- return 1
+ if not opts.force:
+ if not opts.name_check:
+ if not AskUser("As you disabled the check of the DNS entry, please verify"
+ " that '%s' is a FQDN. Continue?" % args[1]):
+ return 1
op = opcodes.OpInstanceRename(instance_name=args[0],
new_name=args[1],
@@ -736,6 +737,7 @@
"""
cl = GetClient()
instance_name = args[0]
+ ignore_consistency = opts.ignore_consistency
force = opts.force
iallocator = opts.iallocator
target_node = opts.dst_node
@@ -753,8 +755,14 @@
if not AskUser(usertext):
return 1
+ if ignore_consistency:
+ usertext = ("To failover instance %s, the source node must be marked"
+ " offline first. Is this aready the case?") % instance_name
+ if not AskUser(usertext):
+ return 1
+
op = opcodes.OpInstanceFailover(instance_name=instance_name,
- ignore_consistency=opts.ignore_consistency,
+ ignore_consistency=ignore_consistency,
shutdown_timeout=opts.shutdown_timeout,
iallocator=iallocator,
target_node=target_node,
@@ -1641,7 +1649,7 @@
"rename": (
RenameInstance,
[ArgInstance(min=1, max=1), ArgHost(min=1, max=1)],
- [NOIPCHECK_OPT, NONAMECHECK_OPT] + SUBMIT_OPTS
+ [FORCE_OPT, NOIPCHECK_OPT, NONAMECHECK_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT],
"<instance> <new_name>", "Rename the instance"),
"replace-disks": (
diff --git a/lib/client/gnt_node.py b/lib/client/gnt_node.py
index 59b7a77..bac95e9 100644
--- a/lib/client/gnt_node.py
+++ b/lib/client/gnt_node.py
@@ -47,6 +47,7 @@
from ganeti import errors
from ganeti import netutils
from ganeti import pathutils
+from ganeti.rpc.node import RunWithRPC
from ganeti import ssh
from ganeti import compat
@@ -259,7 +260,7 @@
ssh.AddPublicKey(node, pub_key)
-@UsesRPC
+@RunWithRPC
def AddNode(opts, args):
"""Add a node to the cluster.
@@ -991,6 +992,18 @@
else:
disk_state = {}
+ # Comparing explicitly to false to distinguish between a parameter
+ # modification that doesn't set the node online (where the value will be None)
+ # and modifying the node to bring it online.
+ if opts.offline is False:
+ usertext = ("You are setting this node online manually. If the"
+ " configuration has changed, this can cause issues such as"
+ " split brain. To safely bring a node back online, please use"
+ " --readd instead. If you are confident that the configuration"
+ " hasn't changed, continue?")
+ if not AskUser(usertext):
+ return 1
+
hv_state = dict(opts.hv_state)
op = opcodes.OpNodeSetParams(node_name=args[0],
diff --git a/lib/cmdlib/cluster/__init__.py b/lib/cmdlib/cluster/__init__.py
index 74d109c..8182910 100644
--- a/lib/cmdlib/cluster/__init__.py
+++ b/lib/cmdlib/cluster/__init__.py
@@ -667,8 +667,10 @@
if self.wanted_names is None:
self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
- self.wanted_instances = \
- map(compat.snd, self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
+ self.wanted_instances = [
+ info
+ for (_, info) in self.cfg.GetMultiInstanceInfoByName(self.wanted_names)
+ ]
def _EnsureChildSizes(self, disk):
"""Ensure children of the disk have the needed disk size.
diff --git a/lib/cmdlib/cluster/verify.py b/lib/cmdlib/cluster/verify.py
index b5cbf1f..8c68039 100644
--- a/lib/cmdlib/cluster/verify.py
+++ b/lib/cmdlib/cluster/verify.py
@@ -786,7 +786,7 @@
if constants.NV_MASTERIP not in nresult:
self._ErrorMsg(constants.CV_ENODENET, ninfo.name,
"node hasn't returned node master IP reachability data")
- elif nresult[constants.NV_MASTERIP] == False: # be explicit, could be None
+ elif nresult[constants.NV_MASTERIP] is False: # be explicit, could be None
if ninfo.uuid == self.master_node:
msg = "the master node cannot reach the master IP (not configured?)"
else:
@@ -1032,6 +1032,11 @@
" should node %s fail (%dMiB needed, %dMiB available)",
self.cfg.GetNodeName(prinode), needed_mem, n_img.mfree)
+ def _CertError(self, *args):
+ """Helper function for _VerifyClientCertificates."""
+ self._Error(constants.CV_ECLUSTERCLIENTCERT, None, *args)
+ self._cert_error_found = True
+
def _VerifyClientCertificates(self, nodes, all_nvinfo):
"""Verifies the consistency of the client certificates.
@@ -1046,20 +1051,25 @@
all nodes
"""
- candidate_certs = self.cfg.GetClusterInfo().candidate_certs
- if candidate_certs is None or len(candidate_certs) == 0:
- self._ErrorIf(
- True, constants.CV_ECLUSTERCLIENTCERT, None,
- "The cluster's list of master candidate certificates is empty."
- " If you just updated the cluster, please run"
+
+ rebuild_certs_msg = (
+ "To rebuild node certificates, please run"
" 'gnt-cluster renew-crypto --new-node-certificates'.")
+
+ self._cert_error_found = False
+
+ candidate_certs = self.cfg.GetClusterInfo().candidate_certs
+ if not candidate_certs:
+ self._CertError(
+ "The cluster's list of master candidate certificates is empty."
+ " This may be because you just updated the cluster. " +
+ rebuild_certs_msg)
return
- self._ErrorIf(
- len(candidate_certs) != len(set(candidate_certs.values())),
- constants.CV_ECLUSTERCLIENTCERT, None,
- "There are at least two master candidates configured to use the same"
- " certificate.")
+ if len(candidate_certs) != len(set(candidate_certs.values())):
+ self._CertError(
+ "There are at least two master candidates configured to use the same"
+ " certificate.")
# collect the client certificate
for node in nodes:
@@ -1072,45 +1082,42 @@
(errcode, msg) = nresult.payload.get(constants.NV_CLIENT_CERT, None)
- self._ErrorIf(
- errcode is not None, constants.CV_ECLUSTERCLIENTCERT, None,
- "Client certificate of node '%s' failed validation: %s (code '%s')",
- node.uuid, msg, errcode)
-
+ if errcode is not None:
+ self._CertError(
+ "Client certificate of node '%s' failed validation: %s (code '%s')",
+ node.uuid, msg, errcode)
if not errcode:
digest = msg
if node.master_candidate:
if node.uuid in candidate_certs:
- self._ErrorIf(
- digest != candidate_certs[node.uuid],
- constants.CV_ECLUSTERCLIENTCERT, None,
- "Client certificate digest of master candidate '%s' does not"
- " match its entry in the cluster's map of master candidate"
- " certificates. Expected: %s Got: %s", node.uuid,
- digest, candidate_certs[node.uuid])
+ if digest != candidate_certs[node.uuid]:
+ self._CertError(
+ "Client certificate digest of master candidate '%s' does not"
+ " match its entry in the cluster's map of master candidate"
+ " certificates. Expected: %s Got: %s", node.uuid,
+ digest, candidate_certs[node.uuid])
else:
- self._ErrorIf(
- True, constants.CV_ECLUSTERCLIENTCERT, None,
+ self._CertError(
"The master candidate '%s' does not have an entry in the"
" map of candidate certificates.", node.uuid)
- self._ErrorIf(
- digest in candidate_certs.values(),
- constants.CV_ECLUSTERCLIENTCERT, None,
- "Master candidate '%s' is using a certificate of another node.",
- node.uuid)
+ if digest in candidate_certs.values():
+ self._CertError(
+ "Master candidate '%s' is using a certificate of another node.",
+ node.uuid)
else:
- self._ErrorIf(
- node.uuid in candidate_certs,
- constants.CV_ECLUSTERCLIENTCERT, None,
- "Node '%s' is not a master candidate, but still listed in the"
- " map of master candidate certificates.", node.uuid)
- self._ErrorIf(
- (node.uuid not in candidate_certs) and
- (digest in candidate_certs.values()),
- constants.CV_ECLUSTERCLIENTCERT, None,
- "Node '%s' is not a master candidate and is incorrectly using a"
- " certificate of another node which is master candidate.",
- node.uuid)
+ if node.uuid in candidate_certs:
+ self._CertError(
+ "Node '%s' is not a master candidate, but still listed in the"
+ " map of master candidate certificates.", node.uuid)
+ if (node.uuid not in candidate_certs and
+ digest in candidate_certs.values()):
+ self._CertError(
+ "Node '%s' is not a master candidate and is incorrectly using a"
+ " certificate of another node which is master candidate.",
+ node.uuid)
+
+ if self._cert_error_found:
+ self._CertError(rebuild_certs_msg)
def _VerifySshSetup(self, nodes, all_nvinfo):
"""Evaluates the verification results of the SSH setup and clutter test.
@@ -1158,8 +1165,7 @@
filenodes = nodes
else:
filenodes = filter(fn, nodes)
- nodefiles.update((filename,
- frozenset(map(operator.attrgetter("uuid"), filenodes)))
+ nodefiles.update((filename, frozenset(fn.uuid for fn in filenodes))
for filename in files)
assert set(nodefiles) == (files_all | files_mc | files_vm)
@@ -1214,23 +1220,22 @@
"File %s is optional, but it must exist on all or no"
" nodes (not found on %s)",
filename,
- utils.CommaJoin(
- utils.NiceSort(
- map(self.cfg.GetNodeName, missing_file))))
+ utils.CommaJoin(utils.NiceSort(
+ self.cfg.GetNodeName(n) for n in missing_file)))
else:
self._ErrorIf(missing_file, constants.CV_ECLUSTERFILECHECK, None,
"File %s is missing from node(s) %s", filename,
- utils.CommaJoin(
- utils.NiceSort(
- map(self.cfg.GetNodeName, missing_file))))
+ utils.CommaJoin(utils.NiceSort(
+ self.cfg.GetNodeName(n) for n in missing_file)))
# Warn if a node has a file it shouldn't
unexpected = with_file - expected_nodes
self._ErrorIf(unexpected,
constants.CV_ECLUSTERFILECHECK, None,
"File %s should not exist on node(s) %s",
- filename, utils.CommaJoin(
- utils.NiceSort(map(self.cfg.GetNodeName, unexpected))))
+ filename,
+ utils.CommaJoin(utils.NiceSort(
+ self.cfg.GetNodeName(n) for n in unexpected)))
# See if there are multiple versions of the file
test = len(checksums) > 1
@@ -1238,7 +1243,7 @@
variants = ["variant %s on %s" %
(idx + 1,
utils.CommaJoin(utils.NiceSort(
- map(self.cfg.GetNodeName, node_uuids))))
+ self.cfg.GetNodeName(n) for n in node_uuids)))
for (idx, (checksum, node_uuids)) in
enumerate(sorted(checksums.items()))]
else:
@@ -1761,7 +1766,7 @@
keyfunc = operator.attrgetter("group")
return map(itertools.cycle,
- [sorted(map(operator.attrgetter("name"), names))
+ [sorted(n.name for n in names)
for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
keyfunc)])
@@ -1932,10 +1937,10 @@
node_verify_param = {
constants.NV_FILELIST:
- map(vcluster.MakeVirtualPath,
- utils.UniqueSequence(filename
- for files in filemap
- for filename in files)),
+ [vcluster.MakeVirtualPath(f)
+ for f in utils.UniqueSequence(filename
+ for files in filemap
+ for filename in files)],
constants.NV_NODELIST:
self._SelectSshCheckNodes(node_data_list, self.group_uuid,
self.all_node_info.values()),
diff --git a/lib/cmdlib/common.py b/lib/cmdlib/common.py
index a15f95e..6ee86b9 100644
--- a/lib/cmdlib/common.py
+++ b/lib/cmdlib/common.py
@@ -35,7 +35,6 @@
import os
import urllib2
-from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import hypervisor
@@ -1000,9 +999,13 @@
(name, _NodeEvacDest(use_nodes, group, node_names))
for (name, group, node_names) in moved))
- return [map(compat.partial(_SetOpEarlyRelease, early_release),
- map(opcodes.OpCode.LoadOpCode, ops))
- for ops in jobs]
+ return [
+ [
+ _SetOpEarlyRelease(early_release, opcodes.OpCode.LoadOpCode(o))
+ for o in ops
+ ]
+ for ops in jobs
+ ]
def _NodeEvacDest(use_nodes, group, node_names):
diff --git a/lib/cmdlib/instance_query.py b/lib/cmdlib/instance_query.py
index 5aec4c1..1a8f954 100644
--- a/lib/cmdlib/instance_query.py
+++ b/lib/cmdlib/instance_query.py
@@ -32,7 +32,6 @@
import itertools
-from ganeti import compat
from ganeti import constants
from ganeti import locking
from ganeti import utils
@@ -194,10 +193,10 @@
dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
if dev.children:
- dev_children = map(compat.partial(self._ComputeDiskStatusInner,
- instance, snode_uuid,
- node_uuid2name_fn),
- dev.children)
+ dev_children = [
+ self._ComputeDiskStatusInner(instance, snode_uuid, node_uuid2name_fn, d)
+ for d in dev.children
+ ]
else:
dev_children = []
@@ -274,9 +273,8 @@
node_uuid2name_fn = lambda uuid: nodes[uuid].name
disk_objects = self.cfg.GetInstanceDisks(instance.uuid)
- output_disks = map(compat.partial(self._ComputeDiskStatus, instance,
- node_uuid2name_fn),
- disk_objects)
+ output_disks = [self._ComputeDiskStatus(instance, node_uuid2name_fn, d)
+ for d in disk_objects]
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
snodes_group_uuids = [nodes[snode_uuid].group
diff --git a/lib/cmdlib/instance_storage.py b/lib/cmdlib/instance_storage.py
index d92a9e8..513d61b 100644
--- a/lib/cmdlib/instance_storage.py
+++ b/lib/cmdlib/instance_storage.py
@@ -1347,7 +1347,7 @@
if disks is None:
disks = [(0, inst_disks[0])]
else:
- disks = map(lambda idx: (idx, inst_disks[idx]), disks)
+ disks = [(idx, inst_disks[idx]) for idx in disks]
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
diff --git a/lib/config/__init__.py b/lib/config/__init__.py
index 346ac26..16b6ee1 100644
--- a/lib/config/__init__.py
+++ b/lib/config/__init__.py
@@ -1131,8 +1131,7 @@
if self._offline:
raise errors.ProgrammerError("Can't call ComputeDRBDMap in offline mode")
else:
- return dict(map(lambda (k, v): (k, dict(v)),
- self._wconfd.ComputeDRBDMap()))
+ return dict((k, dict(v)) for (k, v) in self._wconfd.ComputeDRBDMap())
def AllocateDRBDMinor(self, node_uuids, disk_uuid):
"""Allocate a drbd minor.
@@ -1591,8 +1590,8 @@
dictionaries.
"""
- return dict(map(lambda (uuid, ng): (uuid, ng.ToDict()),
- self._UnlockedGetAllNodeGroupsInfo().items()))
+ return dict((uuid, ng.ToDict()) for (uuid, ng) in
+ self._UnlockedGetAllNodeGroupsInfo().items())
@ConfigSync(shared=1)
def GetNodeGroupList(self):
diff --git a/lib/errors.py b/lib/errors.py
index e8671a2..826e761 100644
--- a/lib/errors.py
+++ b/lib/errors.py
@@ -469,6 +469,17 @@
"""
+class JobSubmittedException(Exception):
+ """Job was submitted, client should exit.
+
+ This exception has one argument, the ID of the job that was
+ submitted. The handler should print this ID.
+
+ This is not an error, just a structured way to exit from clients.
+
+ """
+
+
# errors should be added above
diff --git a/lib/hypervisor/hv_xen.py b/lib/hypervisor/hv_xen.py
index f7410f4..fc4f7b1 100644
--- a/lib/hypervisor/hv_xen.py
+++ b/lib/hypervisor/hv_xen.py
@@ -224,7 +224,7 @@
]
def _RunningWithSuffix(suffix):
- return map(lambda x: x + suffix, allowable_running_prefixes)
+ return [x + suffix for x in allowable_running_prefixes]
# The shutdown suspend ("ss") state is encountered during migration, where
# the instance is still considered to be running.
@@ -347,7 +347,7 @@
if len(fields) < 2:
continue
- (key, val) = map(lambda s: s.strip(), fields)
+ (key, val) = (s.strip() for s in fields)
# Note: in Xen 3, memory has changed to total_memory
if key in ("memory", "total_memory"):
diff --git a/lib/jqueue/__init__.py b/lib/jqueue/__init__.py
index b4cedb4..9384f55 100644
--- a/lib/jqueue/__init__.py
+++ b/lib/jqueue/__init__.py
@@ -1399,8 +1399,8 @@
if archived:
archive_path = pathutils.JOB_QUEUE_ARCHIVE_DIR
- result.extend(map(compat.partial(utils.PathJoin, archive_path),
- utils.ListVisibleFiles(archive_path)))
+ result.extend(utils.PathJoin(archive_path, job_file) for job_file in
+ utils.ListVisibleFiles(archive_path))
return result
diff --git a/lib/luxi.py b/lib/luxi.py
index 0c50c99..4391a4f 100644
--- a/lib/luxi.py
+++ b/lib/luxi.py
@@ -116,9 +116,9 @@
return self.CallMethod(REQ_PICKUP_JOB, (job,))
def SubmitJob(self, ops):
- ops_state = map(lambda op: op.__getstate__()
+ ops_state = [op.__getstate__()
if not isinstance(op, objects.ConfigObject)
- else op.ToDict(_with_private=True), ops)
+ else op.ToDict(_with_private=True) for op in ops]
return self.CallMethod(REQ_SUBMIT_JOB, (ops_state, ))
def SubmitJobToDrainedQueue(self, ops):
diff --git a/lib/mcpu.py b/lib/mcpu.py
index 0ab5cc0..41021ef 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -52,7 +52,6 @@
from ganeti import cmdlib
from ganeti import locking
from ganeti import utils
-from ganeti import compat
from ganeti import wconfd
@@ -232,9 +231,8 @@
"""
if isinstance(result, cmdlib.ResultWithJobs):
# Copy basic parameters (e.g. priority)
- map(compat.partial(_SetBaseOpParams, op,
- "Submitted by %s" % op.OP_ID),
- itertools.chain(*result.jobs))
+ for op2 in itertools.chain(*result.jobs):
+ _SetBaseOpParams(op, "Submitted by %s" % op.OP_ID, op2)
# Submit jobs
job_submission = submit_fn(result.jobs)
diff --git a/lib/qlang.py b/lib/qlang.py
index 9194cf5..e424abe 100644
--- a/lib/qlang.py
+++ b/lib/qlang.py
@@ -48,7 +48,6 @@
from ganeti import constants
from ganeti import errors
from ganeti import utils
-from ganeti import compat
OP_OR = constants.QLANG_OP_OR
@@ -323,8 +322,10 @@
result = ParseFilter(filter_text)
elif args:
- result = [OP_OR] + map(compat.partial(_MakeFilterPart, namefield,
- isnumeric=isnumeric), args)
+ result = [OP_OR] + [
+ _MakeFilterPart(namefield, arg, isnumeric=isnumeric)
+ for arg in args
+ ]
else:
result = None
diff --git a/lib/query.py b/lib/query.py
index 6cea103..86c72b6 100644
--- a/lib/query.py
+++ b/lib/query.py
@@ -2179,18 +2179,21 @@
(_MakeField("snodes", "Secondary_Nodes", QFT_OTHER,
"Secondary nodes; usually this will just be one node"),
IQ_NODES, 0,
- lambda ctx, inst: map(compat.partial(_GetNodeName, ctx, None),
- inst.secondary_nodes)),
+ lambda ctx, inst: [
+ _GetNodeName(ctx, None, uuid) for uuid in inst.secondary_nodes
+ ]),
(_MakeField("snodes.group", "SecondaryNodesGroups", QFT_OTHER,
"Node groups of secondary nodes"),
IQ_NODES, 0,
- lambda ctx, inst: map(compat.partial(_GetInstNodeGroupName, ctx, None),
- inst.secondary_nodes)),
+ lambda ctx, inst: [
+ _GetInstNodeGroupName(ctx, None, uuid) for uuid in inst.secondary_nodes
+ ]),
(_MakeField("snodes.group.uuid", "SecondaryNodesGroupsUUID", QFT_OTHER,
"Node group UUIDs of secondary nodes"),
IQ_NODES, 0,
- lambda ctx, inst: map(compat.partial(_GetInstNodeGroup, ctx, None),
- inst.secondary_nodes)),
+ lambda ctx, inst: [
+ _GetInstNodeGroup(ctx, None, uuid) for uuid in inst.secondary_nodes
+ ]),
(_MakeField("admin_state", "InstanceState", QFT_TEXT,
"Desired state of the instance"),
IQ_CONFIG, 0, _GetItemAttr("admin_state")),
diff --git a/lib/rpc/node.py b/lib/rpc/node.py
index 43f843f..e0f4659 100644
--- a/lib/rpc/node.py
+++ b/lib/rpc/node.py
@@ -393,7 +393,7 @@
assert isinstance(body, dict)
assert len(body) == len(hosts)
assert compat.all(isinstance(v, str) for v in body.values())
- assert frozenset(map(lambda x: x[2], hosts)) == frozenset(body.keys()), \
+ assert frozenset(h[2] for h in hosts) == frozenset(body.keys()), \
"%s != %s" % (hosts, body.keys())
for (name, ip, original_name) in hosts:
@@ -519,8 +519,8 @@
# encode the arguments for each node individually, pass them and the node
# name to the prep_fn, and serialise its return value
- encode_args_fn = lambda node: map(compat.partial(self._encoder, node),
- zip(map(compat.snd, argdefs), args))
+ encode_args_fn = lambda node: [self._encoder(node, (argdef[1], val)) for
+ (argdef, val) in zip(argdefs, args)]
pnbody = dict(
(n,
serializer.DumpJson(prep_fn(n, encode_args_fn(n)),
@@ -532,8 +532,7 @@
req_resolver_opts)
if postproc_fn:
- return dict(map(lambda (key, value): (key, postproc_fn(value)),
- result.items()))
+ return dict((k, postproc_fn(v)) for (k, v) in result.items())
else:
return result
@@ -551,7 +550,7 @@
"""Converts a list of L{objects} to dictionaries.
"""
- return map(compat.partial(_ObjectToDict, node), value)
+ return [_ObjectToDict(node, v) for v in value]
def _PrepareFileUpload(getents_fn, node, filename):
diff --git a/lib/storage/base.py b/lib/storage/base.py
index 461fdad..c81c608 100644
--- a/lib/storage/base.py
+++ b/lib/storage/base.py
@@ -96,7 +96,7 @@
def __eq__(self, other):
if not isinstance(self, type(other)):
return False
- return (self._children == other._children and
+ return (self._children == other._children and # pylint: disable=W0212
self.dev_path == other.dev_path and
self.unique_id == other.unique_id and
self.major == other.major and
@@ -120,7 +120,7 @@
"""
pass
- def Attach(self):
+ def Attach(self, **kwargs):
"""Find a device which matches our config and attach to it.
"""
diff --git a/lib/storage/bdev.py b/lib/storage/bdev.py
index 7c1897c..81e60d0 100644
--- a/lib/storage/bdev.py
+++ b/lib/storage/bdev.py
@@ -137,7 +137,7 @@
empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
if max_pvs is not None:
empty_pvs = empty_pvs[:max_pvs]
- return map((lambda pv: pv.name), empty_pvs)
+ return [pv.name for pv in empty_pvs]
@classmethod
def Create(cls, unique_id, children, size, spindles, params, excl_stor,
@@ -444,7 +444,7 @@
@staticmethod
def _ParseLvInfoLine(line, sep):
- """Parse one line of the lvs output used in L{_GetLvGlobalInfo}.
+ """Parse one line of the lvs output used in L{GetLvGlobalInfo}.
"""
elems = line.strip().split(sep)
@@ -453,13 +453,14 @@
# separator to the right of the output. The PV info might be empty for
# thin volumes, so stripping off the separators might cut off the last
# empty element - do this instead.
- if len(elems) == 8 and elems[-1] == "":
+ if len(elems) == 9 and elems[-1] == "":
elems.pop()
- if len(elems) != 7:
- base.ThrowError("Can't parse LVS output, len(%s) != 7", str(elems))
+ if len(elems) != 8:
+ base.ThrowError("Can't parse LVS output, len(%s) != 8", str(elems))
- (path, status, major, minor, pe_size, stripes, pvs) = elems
+ (vg_name, lv_name, status, major, minor, pe_size, stripes, pvs) = elems
+ path = os.path.join(os.environ.get('DM_DEV_DIR', '/dev'), vg_name, lv_name)
if len(status) < 6:
base.ThrowError("lvs lv_attr is not at least 6 characters (%s)", status)
@@ -490,7 +491,7 @@
return (path, (status, major, minor, pe_size, stripes, pv_names))
@staticmethod
- def _GetLvGlobalInfo(_run_cmd=utils.RunCmd):
+ def GetLvGlobalInfo(_run_cmd=utils.RunCmd):
"""Obtain the current state of the existing LV disks.
@return: a dict containing the state of each disk with the disk path as key
@@ -499,8 +500,8 @@
sep = "|"
result = _run_cmd(["lvs", "--noheadings", "--separator=%s" % sep,
"--units=k", "--nosuffix",
- "-olv_path,lv_attr,lv_kernel_major,lv_kernel_minor,"
- "vg_extent_size,stripes,devices"])
+ "-ovg_name,lv_name,lv_attr,lv_kernel_major,"
+ "lv_kernel_minor,vg_extent_size,stripes,devices"])
if result.failed:
logging.warning("lvs command failed, the LV cache will be empty!")
logging.info("lvs failure: %r", result.stderr)
@@ -512,7 +513,7 @@
return {}
return dict([LogicalVolume._ParseLvInfoLine(line, sep) for line in out])
- def Attach(self, lv_info=None):
+ def Attach(self, lv_info=None, **kwargs):
"""Attach to an existing LV.
This method will try to see if an existing and active LV exists
@@ -522,7 +523,7 @@
"""
self.attached = False
if not lv_info:
- lv_info = LogicalVolume._GetLvGlobalInfo().get(self.dev_path)
+ lv_info = LogicalVolume.GetLvGlobalInfo().get(self.dev_path)
if not lv_info:
return False
(status, major, minor, pe_size, stripes, pv_names) = lv_info
@@ -1101,7 +1102,7 @@
lines = output.splitlines()
# Try parsing the new output format (ceph >= 0.55).
- splitted_lines = map(lambda l: l.split(), lines)
+ splitted_lines = [l.split() for l in lines]
# Check for empty output.
if not splitted_lines:
@@ -1112,7 +1113,7 @@
if field_cnt != allfields:
# Parsing the new format failed. Fallback to parsing the old output
# format (< 0.55).
- splitted_lines = map(lambda l: l.split("\t"), lines)
+ splitted_lines = [l.split("\t") for l in lines]
if field_cnt != allfields:
base.ThrowError("Cannot parse rbd showmapped output expected %s fields,"
" found %s", allfields, field_cnt)
diff --git a/lib/storage/drbd.py b/lib/storage/drbd.py
index 8f4bc54..c7b8912 100644
--- a/lib/storage/drbd.py
+++ b/lib/storage/drbd.py
@@ -792,7 +792,7 @@
dual_pri=multimaster, hmac=constants.DRBD_HMAC_ALG,
secret=self._secret)
- def Attach(self):
+ def Attach(self, **kwargs):
"""Check if our minor is configured.
This doesn't do any device configurations - it only checks if the
diff --git a/lib/storage/extstorage.py b/lib/storage/extstorage.py
index ea750d3..6004ac7 100644
--- a/lib/storage/extstorage.py
+++ b/lib/storage/extstorage.py
@@ -119,7 +119,7 @@
"""
pass
- def Attach(self):
+ def Attach(self, **kwargs):
"""Attach to an existing extstorage device.
This method maps the extstorage volume that matches our name with
diff --git a/lib/storage/filestorage.py b/lib/storage/filestorage.py
index aac83b4..75961fc 100644
--- a/lib/storage/filestorage.py
+++ b/lib/storage/filestorage.py
@@ -265,7 +265,7 @@
return
self.file.Grow(amount, dryrun, backingstore, excl_stor)
- def Attach(self):
+ def Attach(self, **kwargs):
"""Attach to an existing file.
Check if this file already exists.
@@ -350,8 +350,8 @@
])
for prefix in ["", "/usr", "/usr/local"]:
- paths.update(map(lambda s: "%s/%s" % (prefix, s),
- ["bin", "lib", "lib32", "lib64", "sbin"]))
+ paths.update(["%s/%s" % (prefix, s) for s in
+ ["bin", "lib", "lib32", "lib64", "sbin"]])
return compat.UniqueFrozenset(map(os.path.normpath, paths))
diff --git a/lib/storage/gluster.py b/lib/storage/gluster.py
index b352d61..9c41f6b 100644
--- a/lib/storage/gluster.py
+++ b/lib/storage/gluster.py
@@ -381,7 +381,7 @@
"""
self.file.Grow(amount, dryrun, backingstore, excl_stor)
- def Attach(self):
+ def Attach(self, **kwargs):
"""Attach to an existing file.
Check if this file already exists.
diff --git a/lib/tools/node_cleanup.py b/lib/tools/node_cleanup.py
index 08a9548..1324db8 100644
--- a/lib/tools/node_cleanup.py
+++ b/lib/tools/node_cleanup.py
@@ -93,9 +93,9 @@
(pathutils.CLUSTER_CONF_FILE, True),
(pathutils.CLUSTER_DOMAIN_SECRET_FILE, True),
]
- clean_files.extend(map(lambda s: (s, True), pathutils.ALL_CERT_FILES))
- clean_files.extend(map(lambda s: (s, False),
- ssconf.SimpleStore().GetFileList()))
+ clean_files.extend((s, True) for s in pathutils.ALL_CERT_FILES)
+ clean_files.extend((s, False) for s in
+ ssconf.SimpleStore().GetFileList())
if not opts.yes_do_it:
cli.ToStderr("Cleaning a node is irreversible. If you really want to"
diff --git a/lib/watcher/__init__.py b/lib/watcher/__init__.py
index 700c843..5a557c8 100644
--- a/lib/watcher/__init__.py
+++ b/lib/watcher/__init__.py
@@ -39,9 +39,9 @@
import os
import os.path
import sys
+import signal
import time
import logging
-import operator
import errno
from optparse import OptionParser
@@ -533,8 +533,7 @@
filename, len(data))
utils.WriteFile(filename,
- data="".join(map(compat.partial(operator.mod, "%s %s\n"),
- sorted(data))))
+ data="\n".join("%s %s" % (n, s) for (n, s) in sorted(data)))
def _UpdateInstanceStatus(filename, instances):
@@ -660,34 +659,35 @@
children = []
for (idx, (name, uuid)) in enumerate(result):
- args = sys.argv + [cli.NODEGROUP_OPT_NAME, uuid]
-
if idx > 0:
# Let's not kill the system
time.sleep(CHILD_PROCESS_DELAY)
- logging.debug("Spawning child for group '%s' (%s), arguments %s",
- name, uuid, args)
+ logging.debug("Spawning child for group %r (%s).", name, uuid)
+ signal.signal(signal.SIGCHLD, signal.SIG_IGN)
try:
- # TODO: Should utils.StartDaemon be used instead?
- pid = os.spawnv(os.P_NOWAIT, args[0], args)
- except Exception: # pylint: disable=W0703
- logging.exception("Failed to start child for group '%s' (%s)",
- name, uuid)
+ pid = os.fork()
+ except OSError:
+ logging.exception("Failed to fork for group %r (%s)", name, uuid)
+
+ if pid == 0:
+ (options, _) = ParseOptions()
+ options.nodegroup = uuid
+ _GroupWatcher(options)
+ return
else:
logging.debug("Started with PID %s", pid)
children.append(pid)
if wait:
- for pid in children:
- logging.debug("Waiting for child PID %s", pid)
+ for child in children:
+ logging.debug("Waiting for child PID %s", child)
try:
- result = utils.RetryOnSignal(os.waitpid, pid, 0)
+ result = utils.RetryOnSignal(os.waitpid, child, 0)
except EnvironmentError, err:
result = str(err)
-
- logging.debug("Child PID %s exited with status %s", pid, result)
+ logging.debug("Child PID %s exited with status %s", child, result)
def _ArchiveJobs(cl, age):
@@ -789,14 +789,14 @@
[qlang.OP_EQUAL, "group.uuid", uuid]),
]
- results = []
- for what, fields, qfilter in queries:
- results.append(qcl.Query(what, fields, qfilter))
-
- results_data = map(operator.attrgetter("data"), results)
+ results_data = [
+ qcl.Query(what, field, qfilter).data
+ for (what, field, qfilter) in queries
+ ]
# Ensure results are tuples with two values
- assert compat.all(map(ht.TListOf(ht.TListOf(ht.TIsLength(2))), results_data))
+ assert compat.all(
+ ht.TListOf(ht.TListOf(ht.TIsLength(2)))(d) for d in results_data)
# Extract values ignoring result status
(raw_instances, raw_nodes) = [[map(compat.snd, values)
@@ -838,7 +838,7 @@
result = list(line.split(None, 1)[0] for line in groups
if line.strip())
- if not compat.all(map(utils.UUID_RE.match, result)):
+ if not compat.all(utils.UUID_RE.match(r) for r in result):
raise errors.GenericError("Ssconf contains invalid group UUID")
return result
@@ -933,7 +933,10 @@
logging.debug("Pause has been set, exiting")
return constants.EXIT_SUCCESS
- # Try to acquire global watcher lock in shared mode
+ # Try to acquire global watcher lock in shared mode.
+ # In case we are in the global watcher process, this lock will be held by all
+ # children processes (one for each nodegroup) and will only be released when
+ # all of them have finished running.
lock = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
try:
lock.Shared(blocking=False)
@@ -941,7 +944,6 @@
logging.error("Can't acquire lock on %s: %s",
pathutils.WATCHER_LOCK_FILE, err)
return constants.EXIT_SUCCESS
-
if options.nodegroup is None:
fn = _GlobalWatcher
else:
diff --git a/man/gnt-cluster.rst b/man/gnt-cluster.rst
index cdcc474..9d247e0 100644
--- a/man/gnt-cluster.rst
+++ b/man/gnt-cluster.rst
@@ -127,7 +127,7 @@
individual nodes.
Please note that the master node will not be turned down or up
-automatically. It will just be left in a state, where you can manully
+automatically. It will just be left in a state, where you can manually
perform the shutdown of that one node. If the master is in the list of
affected nodes and this is not a complete cluster emergency power-off
(e.g. using ``--all``), you're required to do a master failover to
@@ -608,7 +608,7 @@
the cluster. It is also not possible to disable a disk template when there
are still instances using it. The first disk template in the list of
enabled disk template is the default disk template. It will be used for
-instance creation, if no disk template is requested explicitely.
+instance creation, if no disk template is requested explicitly.
The ``--install-image`` option specifies the location of the OS image to
use to run the OS scripts inside a virtualized environment. This can be
@@ -654,7 +654,8 @@
MASTER-FAILOVER
~~~~~~~~~~~~~~~
-**master-failover** [\--no-voting] [\--yes-do-it]
+| **master-failover** [\--no-voting] [\--yes-do-it]
+| [\--ignore-offline-nodes]
Failover the master role to the current node.
@@ -680,6 +681,12 @@
the ``--no-voting`` option verifies a healthy majority of nodes and refuses
the operation otherwise.
+The ``--ignore-offline-nodes`` flag ignores offline nodes when the
+cluster is voting on the master. Any nodes that are offline are not
+counted towards the vote or towards the healthy nodes required for a
+majority, as they will be brought into sync with the rest of the cluster
+during a node readd operation.
+
MASTER-PING
~~~~~~~~~~~
@@ -808,7 +815,7 @@
The ``--enabled-data-collectors`` and ``--data-collector-interval``
options are to control the behavior of the **ganeti-mond**\(8). The
-first expects a list name=bool pairs to activate or decative the mentioned
+first expects a list name=bool pairs to activate or deactivate the mentioned
data collector. The second option expects similar pairs of collector name
and number of seconds specifying the interval at which the collector
shall be collected.
@@ -924,7 +931,7 @@
cluster-internal server SSL certificate. The option
``--new-node-certificates`` will generate new node SSL
certificates for all nodes. Note that for the regeneration of
-of the server SSL certficate will invoke a regeneration of the
+of the server SSL certificate will invoke a regeneration of the
node certificates as well, because node certificates are signed
by the server certificate and thus have to be recreated and
signed by the new server certificate. Nodes which are offline
@@ -1056,7 +1063,7 @@
operations that modify the configuration.
The ``--verify-ssh-clutter`` option checks if more than one SSH key for the
-same 'user@hostname' pair exists in the 'authorizied_keys' file. This is only
+same 'user@hostname' pair exists in the 'authorized_keys' file. This is only
checked for hostnames of nodes which belong to the cluster. This check is
optional, because there might be other systems manipulating the
'authorized_keys' files, which would cause too many false positives
diff --git a/man/gnt-instance.rst b/man/gnt-instance.rst
index caad1d0..283392c 100644
--- a/man/gnt-instance.rst
+++ b/man/gnt-instance.rst
@@ -1541,7 +1541,8 @@
RENAME
^^^^^^
-| **rename** [\--no-ip-check] [\--no-name-check] [\--submit] [\--print-jobid]
+| **rename** [\--no-ip-check] [\--no-name-check] [\--force]
+| [\--submit] [\--print-jobid]
| {*instance*} {*new\_name*}
Renames the given instance. The instance must be stopped when running
@@ -1561,6 +1562,9 @@
is used to compute the IP address, if you pass this option you must also
pass the ``--no-ip-check`` option.
+The ``--force`` option is used to skip the interactive confirmation
+when ``--no-name-check`` is passed.
+
See **ganeti**\(7) for a description of ``--submit`` and other common
options.
@@ -2043,7 +2047,8 @@
for this purpose. Note that this option can be dangerous as errors in
shutting down the instance will be ignored, resulting in possibly
having the instance running on two machines in parallel (on
-disconnected DRBD drives).
+disconnected DRBD drives). This flag requires the source node to be
+marked offline first to succeed.
The ``--shutdown-timeout`` is used to specify how much time to wait
before forcing the shutdown (xm destroy in xen, killing the kvm
diff --git a/qa/qa_instance_utils.py b/qa/qa_instance_utils.py
index 1ae9448..7c99bbe 100644
--- a/qa/qa_instance_utils.py
+++ b/qa/qa_instance_utils.py
@@ -32,8 +32,6 @@
"""
-import operator
-
from ganeti import utils
from ganeti import constants
from ganeti import pathutils
@@ -132,8 +130,7 @@
"""
assert len(nodes) > 1
return _CreateInstanceByDiskTemplateRaw(
- ":".join(map(operator.attrgetter("primary"), nodes)),
- constants.DT_DRBD8, fail=fail)
+ ":".join(n.primary for n in nodes), constants.DT_DRBD8, fail=fail)
def CreateInstanceByDiskTemplateOneNode(nodes, disk_template, fail=False):
diff --git a/qa/qa_rapi.py b/qa/qa_rapi.py
index 9830066..18142f6 100644
--- a/qa/qa_rapi.py
+++ b/qa/qa_rapi.py
@@ -33,7 +33,6 @@
"""
import copy
-import functools
import itertools
import os.path
import random
@@ -981,8 +980,7 @@
instance_two = qa_config.AcquireInstance()
instance_list = [instance_one, instance_two]
try:
- rapi_dicts = map(functools.partial(_GenInstanceAllocationDict, node),
- instance_list)
+ rapi_dicts = [_GenInstanceAllocationDict(node, i) for i in instance_list]
job_id = _rapi_client.InstancesMultiAlloc(rapi_dicts)
diff --git a/src/Ganeti/Daemon/Utils.hs b/src/Ganeti/Daemon/Utils.hs
index bd4c3ed..34bea4b 100644
--- a/src/Ganeti/Daemon/Utils.hs
+++ b/src/Ganeti/Daemon/Utils.hs
@@ -51,6 +51,7 @@
import Ganeti.Logging
import Ganeti.Objects
import qualified Ganeti.Path as Path
+import Ganeti.Utils (frequency)
import Ganeti.Rpc
-- | Gather votes from all nodes and verify that we we are
@@ -73,14 +74,14 @@
unknown = length missing
liftIO . unless (null noDataNodes) . logWarning
. (++) "No voting RPC result from " $ show noDataNodes
- liftIO . logDebug . (++) "Valid votes: " $ show validVotes
+ liftIO . logDebug . (++) "Valid votes: " $ show (frequency validVotes)
if 2 * inFavor > voters
then return True
else if 2 * (inFavor + unknown) > voters
then return False
else fail $ "Voting cannot be won by " ++ myName
++ ", valid votes of " ++ show voters
- ++ " are " ++ show validVotes
+ ++ " are " ++ show (frequency validVotes)
-- | Verify, by voting, that this node is the master. Bad if we're not.
-- Allow the given number of retries to wait for not available nodes.
diff --git a/src/Ganeti/Utils.hs b/src/Ganeti/Utils.hs
index 42a8db9..0c2a0ac 100644
--- a/src/Ganeti/Utils.hs
+++ b/src/Ganeti/Utils.hs
@@ -102,6 +102,7 @@
, monotoneFind
, iterateJust
, partitionM
+ , frequency
) where
import Prelude ()
@@ -122,7 +123,9 @@
import Data.List ( intercalate
, find
, foldl'
+ , group
, transpose
+ , sort
, sortBy
, isPrefixOf
, maximumBy)
@@ -884,3 +887,8 @@
where f (a, b) x = do
pv <- p x
return $ if pv then (x : a, b) else (a, x : b)
+
+-- | Returns a list of tuples of elements and the number of times they occur
+-- in a list
+frequency :: Ord t => [t] -> [(Int, t)]
+frequency xs = map (\x -> (length x, head x)) . group . sort $ xs
diff --git a/test/py/cmdlib/cluster_unittest.py b/test/py/cmdlib/cluster_unittest.py
index 225c40d..1bdac3f 100644
--- a/test/py/cmdlib/cluster_unittest.py
+++ b/test/py/cmdlib/cluster_unittest.py
@@ -36,7 +36,6 @@
import copy
import unittest
-import operator
import re
import shutil
import os
@@ -99,7 +98,7 @@
objects.Node(name="node50", group="aaa", offline=False,
master_candidate=True),
] + mygroupnodes
- assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes))
+ assert not utils.FindDuplicates(n.name for n in nodes)
(online, perhost, _) = fn(mygroupnodes, "my", nodes)
self.assertEqual(online, ["node%s" % i for i in range(20, 26)])
@@ -126,7 +125,7 @@
objects.Node(name="node4", group="default", offline=True,
master_candidate=True),
]
- assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes))
+ assert not utils.FindDuplicates(n.name for n in nodes)
(online, perhost, _) = fn(nodes, "default", nodes)
self.assertEqual(online, ["node2", "node3"])
@@ -1232,8 +1231,13 @@
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
- self.mcpu.assertLogContainsRegex("Client certificate")
- self.mcpu.assertLogContainsRegex("failed validation")
+ regexps = (
+ "Client certificate",
+ "failed validation",
+ "gnt-cluster renew-crypto --new-node-certificates",
+ )
+ for r in regexps:
+ self.mcpu.assertLogContainsRegex(r)
def testVerifyNoMasterCandidateMap(self):
client_cert = "client-cert-digest"
@@ -1247,6 +1251,8 @@
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex(
"list of master candidate certificates is empty")
+ self.mcpu.assertLogContainsRegex(
+ "gnt-cluster renew-crypto --new-node-certificates")
def testVerifyNoSharingMasterCandidates(self):
client_cert = "client-cert-digest"
@@ -1262,6 +1268,8 @@
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex(
"two master candidates configured to use the same")
+ self.mcpu.assertLogContainsRegex(
+ "gnt-cluster renew-crypto --new-node-certificates")
def testVerifyMasterCandidateCertMismatch(self):
client_cert = "client-cert-digest"
@@ -1274,6 +1282,8 @@
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("does not match its entry")
+ self.mcpu.assertLogContainsRegex(
+ "gnt-cluster renew-crypto --new-node-certificates")
def testVerifyMasterCandidateUnregistered(self):
client_cert = "client-cert-digest"
@@ -1286,6 +1296,8 @@
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("does not have an entry")
+ self.mcpu.assertLogContainsRegex(
+ "gnt-cluster renew-crypto --new-node-certificates")
def testVerifyMasterCandidateOtherNodesCert(self):
client_cert = "client-cert-digest"
@@ -1298,6 +1310,8 @@
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("using a certificate of another node")
+ self.mcpu.assertLogContainsRegex(
+ "gnt-cluster renew-crypto --new-node-certificates")
def testNormalNodeStillInList(self):
self._AddNormalNode()
@@ -1315,8 +1329,13 @@
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
- self.mcpu.assertLogContainsRegex("not a master candidate")
- self.mcpu.assertLogContainsRegex("still listed")
+ regexps = (
+ "not a master candidate",
+ "still listed",
+ "gnt-cluster renew-crypto --new-node-certificates",
+ )
+ for r in regexps:
+ self.mcpu.assertLogContainsRegex(r)
def testNormalNodeStealingMasterCandidateCert(self):
self._AddNormalNode()
@@ -1332,9 +1351,13 @@
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
- self.mcpu.assertLogContainsRegex("not a master candidate")
- self.mcpu.assertLogContainsRegex(
- "certificate of another node which is master candidate")
+ regexps = (
+ "not a master candidate",
+ "certificate of another node which is master candidate",
+ "gnt-cluster renew-crypto --new-node-certificates",
+ )
+ for r in regexps:
+ self.mcpu.assertLogContainsRegex(r)
class TestLUClusterVerifyGroupMethods(CmdlibTestCase):
@@ -1891,7 +1914,7 @@
.AddSuccessfulNode(node4, {}) \
.AddOfflineNode(node5) \
.Build()
- assert set(nvinfo.keys()) == set(map(operator.attrgetter("uuid"), nodeinfo))
+ assert set(nvinfo.keys()) == set(ni.uuid for ni in nodeinfo)
lu._VerifyFiles(nodeinfo, self.master_uuid, nvinfo,
(files_all, files_opt, files_mc, files_vm))
diff --git a/test/py/cmdlib/instance_unittest.py b/test/py/cmdlib/instance_unittest.py
index 1facb51..d725015 100644
--- a/test/py/cmdlib/instance_unittest.py
+++ b/test/py/cmdlib/instance_unittest.py
@@ -37,7 +37,6 @@
import re
import unittest
import mock
-import operator
import os
from ganeti import backend
@@ -1314,9 +1313,8 @@
file_driver=NotImplemented):
gdt = instance_storage.GenerateDiskTemplate
- map(lambda params: utils.ForceDictType(params,
- constants.IDISK_PARAMS_TYPES),
- disk_info)
+ for params in disk_info:
+ utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
# Check if non-empty list of secondaries is rejected
self.assertRaises(errors.ProgrammerError, gdt, self.lu,
@@ -1344,7 +1342,7 @@
return result
def _CheckIvNames(self, disks, base_index, end_index):
- self.assertEqual(map(operator.attrgetter("iv_name"), disks),
+ self.assertEqual([d.iv_name for d in disks],
["disk/%s" % i for i in range(base_index, end_index)])
def testPlain(self):
@@ -1360,11 +1358,11 @@
result = self._TestTrivialDisk(constants.DT_PLAIN, disk_info, 3,
constants.DT_PLAIN)
- self.assertEqual(map(operator.attrgetter("logical_id"), result), [
+ self.assertEqual([d.logical_id for d in result], [
("xenvg", "ec1-uq0.disk3"),
("othervg", "ec1-uq1.disk4"),
])
- self.assertEqual(map(operator.attrgetter("nodes"), result), [
+ self.assertEqual([d.nodes for d in result], [
["node21741.example.com"], ["node21741.example.com"]])
@@ -1399,17 +1397,17 @@
expected = [(constants.FD_BLKTAP,
'ganeti/inst21662.example.com.%d' % x)
for x in (2,3,4)]
- self.assertEqual(map(operator.attrgetter("logical_id"), result),
+ self.assertEqual([d.logical_id for d in result],
expected)
- self.assertEqual(map(operator.attrgetter("nodes"), result), [
+ self.assertEqual([d.nodes for d in result], [
[], [], []])
else:
if disk_template == constants.DT_FILE:
- self.assertEqual(map(operator.attrgetter("nodes"), result), [
+ self.assertEqual([d.nodes for d in result], [
["node21741.example.com"], ["node21741.example.com"],
["node21741.example.com"]])
else:
- self.assertEqual(map(operator.attrgetter("nodes"), result), [
+ self.assertEqual([d.nodes for d in result], [
[], [], []])
for (idx, disk) in enumerate(result):
@@ -1429,10 +1427,10 @@
result = self._TestTrivialDisk(constants.DT_BLOCK, disk_info, 10,
constants.DT_BLOCK)
- self.assertEqual(map(operator.attrgetter("logical_id"), result), [
+ self.assertEqual([d.logical_id for d in result], [
(constants.BLOCKDEV_DRIVER_MANUAL, "/tmp/some/block/dev"),
])
- self.assertEqual(map(operator.attrgetter("nodes"), result), [[]])
+ self.assertEqual([d.nodes for d in result], [[]])
def testRbd(self):
disk_info = [{
@@ -1446,11 +1444,11 @@
result = self._TestTrivialDisk(constants.DT_RBD, disk_info, 0,
constants.DT_RBD)
- self.assertEqual(map(operator.attrgetter("logical_id"), result), [
+ self.assertEqual([d.logical_id for d in result], [
("rbd", "ec1-uq0.rbd.disk0"),
("rbd", "ec1-uq1.rbd.disk1"),
])
- self.assertEqual(map(operator.attrgetter("nodes"), result), [[], []])
+ self.assertEqual([d.nodes for d in result], [[], []])
def testDrbd8(self):
gdt = instance_storage.GenerateDiskTemplate
@@ -1487,9 +1485,8 @@
assert len(exp_logical_ids) == len(disk_info)
- map(lambda params: utils.ForceDictType(params,
- constants.IDISK_PARAMS_TYPES),
- disk_info)
+ for params in disk_info:
+ utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
# Check if empty list of secondaries is rejected
self.assertRaises(errors.ProgrammerError, gdt, self.lu, constants.DT_DRBD8,
@@ -1514,7 +1511,7 @@
self.assertTrue(child.children is None)
self.assertEqual(child.nodes, exp_nodes)
- self.assertEqual(map(operator.attrgetter("logical_id"), disk.children),
+ self.assertEqual([d.logical_id for d in disk.children],
exp_logical_ids[idx])
self.assertEqual(disk.nodes, exp_nodes)
@@ -1526,7 +1523,7 @@
_UpdateIvNames(0, result)
self._CheckIvNames(result, 0, len(disk_info))
- self.assertEqual(map(operator.attrgetter("logical_id"), result), [
+ self.assertEqual([d.logical_id for d in result], [
("node1334.example.com", "node12272.example.com",
constants.FIRST_DRBD_PORT, 20, 21, "ec1-secret0"),
("node1334.example.com", "node12272.example.com",
diff --git a/test/py/docs_unittest.py b/test/py/docs_unittest.py
index 6802d48..aa8a971 100755
--- a/test/py/docs_unittest.py
+++ b/test/py/docs_unittest.py
@@ -33,7 +33,6 @@
import unittest
import re
import itertools
-import operator
from ganeti import _constants
from ganeti import utils
@@ -303,8 +302,7 @@
def _CheckTagHandlers(self, handlers):
tag_handlers = filter(lambda x: issubclass(x, rlib2._R_Tags), handlers)
- self.assertEqual(frozenset(map(operator.attrgetter("TAG_LEVEL"),
- tag_handlers)),
+ self.assertEqual(frozenset(tag.TAG_LEVEL for tag in tag_handlers),
constants.VALID_TAG_TYPES)
diff --git a/test/py/ganeti.asyncnotifier_unittest.py b/test/py/ganeti.asyncnotifier_unittest.py
index 2b3098f..b71df8c 100755
--- a/test/py/ganeti.asyncnotifier_unittest.py
+++ b/test/py/ganeti.asyncnotifier_unittest.py
@@ -30,6 +30,7 @@
"""Script for unittesting the asyncnotifier module"""
+import logging
import unittest
import signal
import os
@@ -178,10 +179,15 @@
handler = asyncnotifier.SingleFileEventHandler(wm, None,
utils.PathJoin(self.tmpdir,
"nonexist"))
- self.assertRaises(errors.InotifyError, handler.enable)
- self.assertRaises(errors.InotifyError, handler.enable)
- handler.disable()
- self.assertRaises(errors.InotifyError, handler.enable)
+ logger = logging.getLogger('pyinotify')
+ logger.disabled = True
+ try:
+ self.assertRaises(errors.InotifyError, handler.enable)
+ self.assertRaises(errors.InotifyError, handler.enable)
+ handler.disable()
+ self.assertRaises(errors.InotifyError, handler.enable)
+ finally:
+ logger.disabled = False
if __name__ == "__main__":
diff --git a/test/py/ganeti.cli_unittest.py b/test/py/ganeti.cli_unittest.py
index a3e28af..42cc3a4 100755
--- a/test/py/ganeti.cli_unittest.py
+++ b/test/py/ganeti.cli_unittest.py
@@ -1005,7 +1005,7 @@
self.assertEqual(frozenset(cli._QFT_NAMES), constants.QFT_ALL)
def testUnique(self):
- lcnames = map(lambda s: s.lower(), cli._QFT_NAMES.values())
+ lcnames = [s.lower() for s in cli._QFT_NAMES.values()]
self.assertFalse(utils.FindDuplicates(lcnames))
def testUppercase(self):
diff --git a/test/py/ganeti.config_unittest.py b/test/py/ganeti.config_unittest.py
index 23eac25..2e4c96e 100755
--- a/test/py/ganeti.config_unittest.py
+++ b/test/py/ganeti.config_unittest.py
@@ -462,8 +462,7 @@
uuid="798d0de3-680f-4a0e-b29a-0f54f693b3f1")
grp2_serial = 1
cfg.AddNodeGroup(grp2, "job")
- self.assertEqual(set(map(operator.attrgetter("name"),
- cfg.GetAllNodeGroupsInfo().values())),
+ self.assertEqual(set(ng.name for ng in cfg.GetAllNodeGroupsInfo().values()),
set(["grp1", "grp2", constants.INITIAL_NODE_GROUP_NAME]))
# No-op
@@ -659,7 +658,7 @@
self.assertEqual(instance_disks, [disk])
def _IsErrorInList(err_str, err_list):
- return any(map(lambda e: err_str in e, err_list))
+ return any((err_str in e) for e in err_list)
class TestTRM(unittest.TestCase):
diff --git a/test/py/ganeti.jqueue_unittest.py b/test/py/ganeti.jqueue_unittest.py
index c258f65..05cb7aa 100755
--- a/test/py/ganeti.jqueue_unittest.py
+++ b/test/py/ganeti.jqueue_unittest.py
@@ -38,7 +38,6 @@
import errno
import itertools
import random
-import operator
try:
# pylint: disable=E0611
@@ -266,7 +265,7 @@
self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT)
self.assertTrue(compat.all(op.priority == constants.OP_PRIO_DEFAULT
for op in job.ops))
- self.assertEqual(map(operator.attrgetter("status"), job.ops), [
+ self.assertEqual([op.status for op in job.ops], [
constants.OP_STATUS_SUCCESS,
constants.OP_STATUS_SUCCESS,
constants.OP_STATUS_SUCCESS,
@@ -289,7 +288,7 @@
self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT)
self.assertTrue(compat.all(op.priority == constants.OP_PRIO_DEFAULT
for op in job.ops))
- self.assertEqual(map(operator.attrgetter("status"), job.ops), [
+ self.assertEqual([op.status for op in job.ops], [
constants.OP_STATUS_SUCCESS,
constants.OP_STATUS_SUCCESS,
constants.OP_STATUS_CANCELING,
@@ -310,9 +309,9 @@
self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT)
result = job.ChangePriority(7)
self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT)
- self.assertEqual(map(operator.attrgetter("priority"), job.ops),
+ self.assertEqual([op.priority for op in job.ops],
[constants.OP_PRIO_DEFAULT, 7, 7, 7])
- self.assertEqual(map(operator.attrgetter("status"), job.ops), [
+ self.assertEqual([op.status for op in job.ops], [
constants.OP_STATUS_RUNNING,
constants.OP_STATUS_QUEUED,
constants.OP_STATUS_QUEUED,
@@ -337,7 +336,7 @@
self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT)
self.assertTrue(compat.all(op.priority == constants.OP_PRIO_DEFAULT
for op in job.ops))
- self.assertEqual(map(operator.attrgetter("status"), job.ops), [
+ self.assertEqual([op.status for op in job.ops], [
constants.OP_STATUS_SUCCESS,
constants.OP_STATUS_SUCCESS,
constants.OP_STATUS_SUCCESS,
@@ -357,10 +356,10 @@
self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_RUNNING)
result = job.ChangePriority(-19)
self.assertEqual(job.CalcPriority(), -19)
- self.assertEqual(map(operator.attrgetter("priority"), job.ops),
+ self.assertEqual([op.priority for op in job.ops],
[constants.OP_PRIO_DEFAULT, constants.OP_PRIO_DEFAULT,
-19, -19])
- self.assertEqual(map(operator.attrgetter("status"), job.ops), [
+ self.assertEqual([op.status for op in job.ops], [
constants.OP_STATUS_SUCCESS,
constants.OP_STATUS_RUNNING,
constants.OP_STATUS_QUEUED,
@@ -1791,7 +1790,7 @@
# Check status
self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_SUCCESS)
self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT)
- self.assertEqual(map(operator.attrgetter("priority"), job.ops),
+ self.assertEqual([op.priority for op in job.ops],
[constants.OP_PRIO_DEFAULT, -10, 5])
diff --git a/test/py/ganeti.rpc_unittest.py b/test/py/ganeti.rpc_unittest.py
index 54f199c..346ab46 100755
--- a/test/py/ganeti.rpc_unittest.py
+++ b/test/py/ganeti.rpc_unittest.py
@@ -859,8 +859,9 @@
result = runner._encoder(NotImplemented,
(rpc_defs.ED_OBJECT_DICT_LIST, 5 * [inst]))
- map(_CheckBasics, result)
- map(lambda r: self.assertEqual(len(r["hvparams"]), 2), result)
+ for r in result:
+ _CheckBasics(r)
+ self.assertEqual(len(r["hvparams"]), 2)
# Just an instance
result = runner._encoder(NotImplemented, (rpc_defs.ED_INST_DICT, inst))
diff --git a/test/py/ganeti.storage.bdev_unittest.py b/test/py/ganeti.storage.bdev_unittest.py
index a894e8f..2bfcb04 100755
--- a/test/py/ganeti.storage.bdev_unittest.py
+++ b/test/py/ganeti.storage.bdev_unittest.py
@@ -336,18 +336,18 @@
def testParseLvInfoLine(self):
"""Tests for LogicalVolume._ParseLvInfoLine."""
broken_lines = [
- " toomuch#devpath#-wi-ao#253#3#4096.00#2#/dev/abc(20)",
- " devpath#-wi-ao#253#3#4096.00#/dev/abc(20)",
- " devpath#-wi-a#253#3#4096.00#2#/dev/abc(20)",
- " devpath#-wi-ao#25.3#3#4096.00#2#/dev/abc(20)",
- " devpath#-wi-ao#twenty#3#4096.00#2#/dev/abc(20)",
- " devpath#-wi-ao#253#3.1#4096.00#2#/dev/abc(20)",
- " devpath#-wi-ao#253#three#4096.00#2#/dev/abc(20)",
- " devpath#-wi-ao#253#3#four#2#/dev/abc(20)",
- " devpath#-wi-ao#253#3#4096..00#2#/dev/abc(20)",
- " devpath#-wi-ao#253#3#4096.00#2.0#/dev/abc(20)",
- " devpath#-wi-ao#253#3#4096.00#two#/dev/abc(20)",
- " devpath#-wi-ao#253#3#4096.00#2#/dev/abc20",
+ " toomuch#vg#lv#-wi-ao#253#3#4096.00#2#/dev/abc(20)",
+ " vg#lv#-wi-ao#253#3#4096.00#/dev/abc(20)",
+ " vg#lv#-wi-a#253#3#4096.00#2#/dev/abc(20)",
+ " vg#lv#-wi-ao#25.3#3#4096.00#2#/dev/abc(20)",
+ " vg#lv#-wi-ao#twenty#3#4096.00#2#/dev/abc(20)",
+ " vg#lv#-wi-ao#253#3.1#4096.00#2#/dev/abc(20)",
+ " vg#lv#-wi-ao#253#three#4096.00#2#/dev/abc(20)",
+ " vg#lv#-wi-ao#253#3#four#2#/dev/abc(20)",
+ " vg#lv#-wi-ao#253#3#4096..00#2#/dev/abc(20)",
+ " vg#lv#-wi-ao#253#3#4096.00#2.0#/dev/abc(20)",
+ " vg#lv#-wi-ao#253#3#4096.00#two#/dev/abc(20)",
+ " vg#lv#-wi-ao#253#3#4096.00#2#/dev/abc20",
]
for broken in broken_lines:
self.assertRaises(errors.BlockDeviceError,
@@ -358,49 +358,54 @@
# /dev/something|-wi-ao|253|3|4096.00|2|/dev/sdb(144),/dev/sdc(0)
# /dev/somethingelse|-wi-a-|253|4|4096.00|1|/dev/sdb(208)
true_out = [
- ("/dev/path", ("-wi-ao", 253, 3, 4096.00, 2, ["/dev/abc"])),
- ("/dev/path", ("-wi-a-", 253, 7, 4096.00, 4, ["/dev/abc"])),
- ("/dev/path", ("-ri-a-", 253, 4, 4.00, 5, ["/dev/abc", "/dev/def"])),
- ("/dev/path", ("-wc-ao", 15, 18, 4096.00, 32,
+ (("vg", "lv"), ("-wi-ao", 253, 3, 4096.00, 2, ["/dev/abc"])),
+ (("vg", "lv"), ("-wi-a-", 253, 7, 4096.00, 4, ["/dev/abc"])),
+ (("vg", "lv"), ("-ri-a-", 253, 4, 4.00, 5, ["/dev/abc", "/dev/def"])),
+ (("vg", "lv"), ("-wc-ao", 15, 18, 4096.00, 32,
["/dev/abc", "/dev/def", "/dev/ghi0"])),
# Physical devices might be missing with thin volumes
- ("/dev/path", ("twc-ao", 15, 18, 4096.00, 32, [])),
+ (("vg", "lv"), ("twc-ao", 15, 18, 4096.00, 32, [])),
]
for exp in true_out:
for sep in "#;|":
- devpath = exp[0]
+ # NB We get lvs to return vg_name and lv_name separately, but
+ # _ParseLvInfoLine returns a pathname built from these, so we
+ # need to do some extra munging to round-trip this properly.
+ vg_name, lv_name = exp[0]
+ dev = os.environ.get('DM_DEV_DIR', '/dev')
+ devpath = os.path.join(dev, vg_name, lv_name)
lvs = exp[1]
pvs = ",".join("%s(%s)" % (d, i * 12) for (i, d) in enumerate(lvs[-1]))
- lvs_line = (sep.join((" %s", "%s", "%d", "%d", "%.2f", "%d", "%s")) %
- ((devpath,) + lvs[0:-1] + (pvs,)))
+ fmt_str = sep.join((" %s", "%s", "%s", "%d", "%d", "%.2f", "%d", "%s"))
+ lvs_line = fmt_str % ((vg_name, lv_name) + lvs[0:-1] + (pvs,))
parsed = bdev.LogicalVolume._ParseLvInfoLine(lvs_line, sep)
- self.assertEqual(parsed, exp)
+ self.assertEqual(parsed, (devpath,) + exp[1:])
def testGetLvGlobalInfo(self):
- """Tests for LogicalVolume._GetLvGlobalInfo."""
+ """Tests for LogicalVolume.GetLvGlobalInfo."""
- good_lines="/dev/1|-wi-ao|253|3|4096.00|2|/dev/sda(20)\n" \
- "/dev/2|-wi-ao|253|3|4096.00|2|/dev/sda(21)\n"
- expected_output = {"/dev/1": ("-wi-ao", 253, 3, 4096, 2, ["/dev/sda"]),
- "/dev/2": ("-wi-ao", 253, 3, 4096, 2, ["/dev/sda"])}
+ good_lines="vg|1|-wi-ao|253|3|4096.00|2|/dev/sda(20)\n" \
+ "vg|2|-wi-ao|253|3|4096.00|2|/dev/sda(21)\n"
+ expected_output = {"/dev/vg/1": ("-wi-ao", 253, 3, 4096, 2, ["/dev/sda"]),
+ "/dev/vg/2": ("-wi-ao", 253, 3, 4096, 2, ["/dev/sda"])}
self.assertEqual({},
- bdev.LogicalVolume._GetLvGlobalInfo(
+ bdev.LogicalVolume.GetLvGlobalInfo(
_run_cmd=lambda cmd: _FakeRunCmd(False,
"Fake error msg",
cmd)))
self.assertEqual({},
- bdev.LogicalVolume._GetLvGlobalInfo(
+ bdev.LogicalVolume.GetLvGlobalInfo(
_run_cmd=lambda cmd: _FakeRunCmd(True,
"",
cmd)))
self.assertRaises(errors.BlockDeviceError,
- bdev.LogicalVolume._GetLvGlobalInfo,
+ bdev.LogicalVolume.GetLvGlobalInfo,
_run_cmd=lambda cmd: _FakeRunCmd(True, "BadStdOut", cmd))
fake_cmd = lambda cmd: _FakeRunCmd(True, good_lines, cmd)
- good_res = bdev.LogicalVolume._GetLvGlobalInfo(_run_cmd=fake_cmd)
+ good_res = bdev.LogicalVolume.GetLvGlobalInfo(_run_cmd=fake_cmd)
self.assertEqual(expected_output, good_res)
@testutils.patch_object(bdev.LogicalVolume, "Attach")
@@ -564,7 +569,7 @@
self.test_unique_id, [], 1024, None,
self.test_params, False, {})
- @testutils.patch_object(bdev.LogicalVolume, "_GetLvGlobalInfo")
+ @testutils.patch_object(bdev.LogicalVolume, "GetLvGlobalInfo")
def testAttach(self, info_mock):
"""Test for bdev.LogicalVolume.Attach()"""
info_mock.return_value = {"/dev/fake/path": ("v", 1, 0, 1024, 0, ["test"])}
@@ -573,7 +578,7 @@
self.assertEqual(dev.Attach(), True)
- @testutils.patch_object(bdev.LogicalVolume, "_GetLvGlobalInfo")
+ @testutils.patch_object(bdev.LogicalVolume, "GetLvGlobalInfo")
def testAttachFalse(self, info_mock):
"""Test for bdev.LogicalVolume.Attach() with missing lv_info"""
info_mock.return_value = {}
diff --git a/test/py/ganeti.utils.hash_unittest.py b/test/py/ganeti.utils.hash_unittest.py
index 8210658..945fd8a 100755
--- a/test/py/ganeti.utils.hash_unittest.py
+++ b/test/py/ganeti.utils.hash_unittest.py
@@ -32,7 +32,6 @@
import unittest
import random
-import operator
import tempfile
from ganeti import constants
diff --git a/test/py/ganeti.utils.storage_unittest.py b/test/py/ganeti.utils.storage_unittest.py
index 33b6f8a..8d7438a 100755
--- a/test/py/ganeti.utils.storage_unittest.py
+++ b/test/py/ganeti.utils.storage_unittest.py
@@ -166,9 +166,8 @@
start1 = 27
start2 = 703
result1 = list(storage.GetDiskLabels("", start1))
- result2 = \
- map(lambda x: x[1:],
- list(storage.GetDiskLabels("", start2, start=start2 - start1)))
+ result2 = [x[1:] for x in
+ list(storage.GetDiskLabels("", start2, start=start2 - start1))]
self.assertEqual(result1, result2)
diff --git a/test/py/ganeti.utils_unittest.py b/test/py/ganeti.utils_unittest.py
index 5ebe78a..cf98615 100755
--- a/test/py/ganeti.utils_unittest.py
+++ b/test/py/ganeti.utils_unittest.py
@@ -34,7 +34,6 @@
import fcntl
import glob
import mock
-import operator
import os
import os.path
import random
diff --git a/test/py/qa.qa_config_unittest.py b/test/py/qa.qa_config_unittest.py
index 71de0ba..177e93f 100755
--- a/test/py/qa.qa_config_unittest.py
+++ b/test/py/qa.qa_config_unittest.py
@@ -34,7 +34,6 @@
import tempfile
import shutil
import os
-import operator
from ganeti import utils
from ganeti import serializer
@@ -336,8 +335,7 @@
qa_config._QaNode))
def testAcquireAndReleaseInstance(self):
- self.assertFalse(compat.any(map(operator.attrgetter("used"),
- self.config["instances"])))
+ self.assertFalse(compat.any(i.used for i in self.config["instances"]))
inst = qa_config.AcquireInstance(_cfg=self.config)
self.assertTrue(inst.used)
@@ -348,8 +346,7 @@
self.assertFalse(inst.used)
self.assertTrue(inst.disk_template is None)
- self.assertFalse(compat.any(map(operator.attrgetter("used"),
- self.config["instances"])))
+ self.assertFalse(compat.any(i.used for i in self.config["instances"]))
def testAcquireInstanceTooMany(self):
# Acquire all instances
@@ -363,8 +360,7 @@
qa_config.AcquireInstance, _cfg=self.config)
def testAcquireNodeNoneAdded(self):
- self.assertFalse(compat.any(map(operator.attrgetter("added"),
- self.config["nodes"])))
+ self.assertFalse(compat.any(n.added for n in self.config["nodes"]))
# First call must return master node
node = qa_config.AcquireNode(_cfg=self.config)
@@ -420,7 +416,7 @@
self.assertEqual(acquired, sorted(acquired, key=key_fn))
# Release previously acquired nodes
- qa_config.ReleaseManyNodes(map(operator.itemgetter(2), acquired))
+ qa_config.ReleaseManyNodes([a[2] for a in acquired])
# Check if nodes were actually released
for node in self.config["nodes"]:
diff --git a/test/py/testutils/config_mock.py b/test/py/testutils/config_mock.py
index b79ec29..473bede 100644
--- a/test/py/testutils/config_mock.py
+++ b/test/py/testutils/config_mock.py
@@ -614,7 +614,7 @@
return dict((node_uuid, {}) for node_uuid in self._ConfigData().nodes)
def AllocateDRBDMinor(self, node_uuids, disk_uuid):
- return map(lambda _: 0, node_uuids)
+ return [0] * len(node_uuids)
def ReleaseDRBDMinors(self, disk_uuid):
pass