Replace map/partial with list comprehension
The rather Haskellish pattern
map(compat.partial(fn, arg), xs)
can be replaced by the much more pythonic
[fn(arg, x) for x in xs]
Signed-off-by: Brian Foley <bpfoley@google.com>
Reviewed-by: Viktor Bachraty <vbachraty@google.com>
diff --git a/lib/cmdlib/common.py b/lib/cmdlib/common.py
index 638abd7..33142ef 100644
--- a/lib/cmdlib/common.py
+++ b/lib/cmdlib/common.py
@@ -35,7 +35,6 @@
import os
import urllib2
-from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import hypervisor
@@ -998,9 +997,13 @@
(name, _NodeEvacDest(use_nodes, group, node_names))
for (name, group, node_names) in moved))
- return [map(compat.partial(_SetOpEarlyRelease, early_release),
- map(opcodes.OpCode.LoadOpCode, ops))
- for ops in jobs]
+ return [
+ [
+ _SetOpEarlyRelease(early_release, opcodes.OpCode.LoadOpCode(o))
+ for o in ops
+ ]
+ for ops in jobs
+ ]
def _NodeEvacDest(use_nodes, group, node_names):
diff --git a/lib/cmdlib/instance_query.py b/lib/cmdlib/instance_query.py
index 5aec4c1..1a8f954 100644
--- a/lib/cmdlib/instance_query.py
+++ b/lib/cmdlib/instance_query.py
@@ -32,7 +32,6 @@
import itertools
-from ganeti import compat
from ganeti import constants
from ganeti import locking
from ganeti import utils
@@ -194,10 +193,10 @@
dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
if dev.children:
- dev_children = map(compat.partial(self._ComputeDiskStatusInner,
- instance, snode_uuid,
- node_uuid2name_fn),
- dev.children)
+ dev_children = [
+ self._ComputeDiskStatusInner(instance, snode_uuid, node_uuid2name_fn, d)
+ for d in dev.children
+ ]
else:
dev_children = []
@@ -274,9 +273,8 @@
node_uuid2name_fn = lambda uuid: nodes[uuid].name
disk_objects = self.cfg.GetInstanceDisks(instance.uuid)
- output_disks = map(compat.partial(self._ComputeDiskStatus, instance,
- node_uuid2name_fn),
- disk_objects)
+ output_disks = [self._ComputeDiskStatus(instance, node_uuid2name_fn, d)
+ for d in disk_objects]
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
snodes_group_uuids = [nodes[snode_uuid].group
diff --git a/lib/jqueue/__init__.py b/lib/jqueue/__init__.py
index b4cedb4..9384f55 100644
--- a/lib/jqueue/__init__.py
+++ b/lib/jqueue/__init__.py
@@ -1399,8 +1399,8 @@
if archived:
archive_path = pathutils.JOB_QUEUE_ARCHIVE_DIR
- result.extend(map(compat.partial(utils.PathJoin, archive_path),
- utils.ListVisibleFiles(archive_path)))
+ result.extend(utils.PathJoin(archive_path, job_file) for job_file in
+ utils.ListVisibleFiles(archive_path))
return result
diff --git a/lib/mcpu.py b/lib/mcpu.py
index ff8ef1f..bdcc6af 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -52,7 +52,6 @@
from ganeti import cmdlib
from ganeti import locking
from ganeti import utils
-from ganeti import compat
from ganeti import wconfd
@@ -232,9 +231,8 @@
"""
if isinstance(result, cmdlib.ResultWithJobs):
# Copy basic parameters (e.g. priority)
- map(compat.partial(_SetBaseOpParams, op,
- "Submitted by %s" % op.OP_ID),
- itertools.chain(*result.jobs))
+ for op2 in itertools.chain(*result.jobs):
+ _SetBaseOpParams(op, "Submitted by %s" % op.OP_ID, op2)
# Submit jobs
job_submission = submit_fn(result.jobs)
diff --git a/lib/qlang.py b/lib/qlang.py
index 9194cf5..e424abe 100644
--- a/lib/qlang.py
+++ b/lib/qlang.py
@@ -48,7 +48,6 @@
from ganeti import constants
from ganeti import errors
from ganeti import utils
-from ganeti import compat
OP_OR = constants.QLANG_OP_OR
@@ -323,8 +322,10 @@
result = ParseFilter(filter_text)
elif args:
- result = [OP_OR] + map(compat.partial(_MakeFilterPart, namefield,
- isnumeric=isnumeric), args)
+ result = [OP_OR] + [
+ _MakeFilterPart(namefield, arg, isnumeric=isnumeric)
+ for arg in args
+ ]
else:
result = None
diff --git a/lib/query.py b/lib/query.py
index 43d8fad..dfeccf5 100644
--- a/lib/query.py
+++ b/lib/query.py
@@ -2199,18 +2199,21 @@
(_MakeField("snodes", "Secondary_Nodes", QFT_OTHER,
"Secondary nodes; usually this will just be one node"),
IQ_NODES, 0,
- lambda ctx, inst: map(compat.partial(_GetNodeName, ctx, None),
- inst.secondary_nodes)),
+ lambda ctx, inst: [
+ _GetNodeName(ctx, None, uuid) for uuid in inst.secondary_nodes
+ ]),
(_MakeField("snodes.group", "SecondaryNodesGroups", QFT_OTHER,
"Node groups of secondary nodes"),
IQ_NODES, 0,
- lambda ctx, inst: map(compat.partial(_GetInstNodeGroupName, ctx, None),
- inst.secondary_nodes)),
+ lambda ctx, inst: [
+ _GetInstNodeGroupName(ctx, None, uuid) for uuid in inst.secondary_nodes
+ ]),
(_MakeField("snodes.group.uuid", "SecondaryNodesGroupsUUID", QFT_OTHER,
"Node group UUIDs of secondary nodes"),
IQ_NODES, 0,
- lambda ctx, inst: map(compat.partial(_GetInstNodeGroup, ctx, None),
- inst.secondary_nodes)),
+ lambda ctx, inst: [
+ _GetInstNodeGroup(ctx, None, uuid) for uuid in inst.secondary_nodes
+ ]),
(_MakeField("admin_state", "InstanceState", QFT_TEXT,
"Desired state of the instance"),
IQ_CONFIG, 0, _GetItemAttr("admin_state")),
diff --git a/lib/rpc/node.py b/lib/rpc/node.py
index 3515519..e0f4659 100644
--- a/lib/rpc/node.py
+++ b/lib/rpc/node.py
@@ -519,8 +519,8 @@
# encode the arguments for each node individually, pass them and the node
# name to the prep_fn, and serialise its return value
- encode_args_fn = lambda node: map(compat.partial(self._encoder, node),
- zip(map(compat.snd, argdefs), args))
+ encode_args_fn = lambda node: [self._encoder(node, (argdef[1], val)) for
+ (argdef, val) in zip(argdefs, args)]
pnbody = dict(
(n,
serializer.DumpJson(prep_fn(n, encode_args_fn(n)),
@@ -550,7 +550,7 @@
"""Converts a list of L{objects} to dictionaries.
"""
- return map(compat.partial(_ObjectToDict, node), value)
+ return [_ObjectToDict(node, v) for v in value]
def _PrepareFileUpload(getents_fn, node, filename):
diff --git a/qa/qa_rapi.py b/qa/qa_rapi.py
index a008247..9282587 100644
--- a/qa/qa_rapi.py
+++ b/qa/qa_rapi.py
@@ -33,7 +33,6 @@
"""
import copy
-import functools
import itertools
import os.path
import random
@@ -980,8 +979,7 @@
instance_two = qa_config.AcquireInstance()
instance_list = [instance_one, instance_two]
try:
- rapi_dicts = map(functools.partial(_GenInstanceAllocationDict, node),
- instance_list)
+ rapi_dicts = [_GenInstanceAllocationDict(node, i) for i in instance_list]
job_id = _rapi_client.InstancesMultiAlloc(rapi_dicts)