Replace uses of map/lambda with more Pythonic code
map(lambda x: expr(x), seq)
can be written more simply as
[expr(x) for x in seq]
Signal that we need the side effects of expr by replacing
map(lambda x: UpdateState(x, ...), seq)
with
for x in seq:
UpdateState(x, ...)
Signed-off-by: Brian Foley <bpfoley@google.com>
Reviewed-by: Viktor Bachraty <vbachraty@google.com>
diff --git a/lib/cli.py b/lib/cli.py
index cf89eb9..67ea375 100644
--- a/lib/cli.py
+++ b/lib/cli.py
@@ -2341,10 +2341,10 @@
@rtype: a list of tuples
"""
- return map(lambda t: t[0],
+ return [t[0] for t in
cl.QueryNodes(names=nodes,
fields=["ndp/ssh_port"],
- use_locking=False))
+ use_locking=False)]
def GetNodeUUIDs(nodes, cl):
@@ -2358,10 +2358,10 @@
@rtype: a list of tuples
"""
- return map(lambda t: t[0],
+ return [t[0] for t in
cl.QueryNodes(names=nodes,
fields=["uuid"],
- use_locking=False))
+ use_locking=False)]
def _ToStream(stream, txt, *args):
diff --git a/lib/cmdlib/instance_storage.py b/lib/cmdlib/instance_storage.py
index d92a9e8..513d61b 100644
--- a/lib/cmdlib/instance_storage.py
+++ b/lib/cmdlib/instance_storage.py
@@ -1347,7 +1347,7 @@
if disks is None:
disks = [(0, inst_disks[0])]
else:
- disks = map(lambda idx: (idx, inst_disks[idx]), disks)
+ disks = [(idx, inst_disks[idx]) for idx in disks]
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
diff --git a/lib/config/__init__.py b/lib/config/__init__.py
index 096b213..095fb88 100644
--- a/lib/config/__init__.py
+++ b/lib/config/__init__.py
@@ -1107,8 +1107,7 @@
if self._offline:
raise errors.ProgrammerError("Can't call ComputeDRBDMap in offline mode")
else:
- return dict(map(lambda (k, v): (k, dict(v)),
- self._wconfd.ComputeDRBDMap()))
+ return dict((k, dict(v)) for (k, v) in self._wconfd.ComputeDRBDMap())
def AllocateDRBDMinor(self, node_uuids, disk_uuid):
"""Allocate a drbd minor.
@@ -1561,8 +1560,8 @@
dictionaries.
"""
- return dict(map(lambda (uuid, ng): (uuid, ng.ToDict()),
- self._UnlockedGetAllNodeGroupsInfo().items()))
+ return dict((uuid, ng.ToDict()) for (uuid, ng) in
+ self._UnlockedGetAllNodeGroupsInfo().items())
@ConfigSync(shared=1)
def GetNodeGroupList(self):
diff --git a/lib/hypervisor/hv_xen.py b/lib/hypervisor/hv_xen.py
index f7410f4..fc4f7b1 100644
--- a/lib/hypervisor/hv_xen.py
+++ b/lib/hypervisor/hv_xen.py
@@ -224,7 +224,7 @@
]
def _RunningWithSuffix(suffix):
- return map(lambda x: x + suffix, allowable_running_prefixes)
+ return [x + suffix for x in allowable_running_prefixes]
# The shutdown suspend ("ss") state is encountered during migration, where
# the instance is still considered to be running.
@@ -347,7 +347,7 @@
if len(fields) < 2:
continue
- (key, val) = map(lambda s: s.strip(), fields)
+ (key, val) = (s.strip() for s in fields)
# Note: in Xen 3, memory has changed to total_memory
if key in ("memory", "total_memory"):
diff --git a/lib/luxi.py b/lib/luxi.py
index 0c50c99..4391a4f 100644
--- a/lib/luxi.py
+++ b/lib/luxi.py
@@ -116,9 +116,9 @@
return self.CallMethod(REQ_PICKUP_JOB, (job,))
def SubmitJob(self, ops):
- ops_state = map(lambda op: op.__getstate__()
+ ops_state = [op.__getstate__()
if not isinstance(op, objects.ConfigObject)
- else op.ToDict(_with_private=True), ops)
+ else op.ToDict(_with_private=True) for op in ops]
return self.CallMethod(REQ_SUBMIT_JOB, (ops_state, ))
def SubmitJobToDrainedQueue(self, ops):
diff --git a/lib/rpc/node.py b/lib/rpc/node.py
index 43f843f..3515519 100644
--- a/lib/rpc/node.py
+++ b/lib/rpc/node.py
@@ -393,7 +393,7 @@
assert isinstance(body, dict)
assert len(body) == len(hosts)
assert compat.all(isinstance(v, str) for v in body.values())
- assert frozenset(map(lambda x: x[2], hosts)) == frozenset(body.keys()), \
+ assert frozenset(h[2] for h in hosts) == frozenset(body.keys()), \
"%s != %s" % (hosts, body.keys())
for (name, ip, original_name) in hosts:
@@ -532,8 +532,7 @@
req_resolver_opts)
if postproc_fn:
- return dict(map(lambda (key, value): (key, postproc_fn(value)),
- result.items()))
+ return dict((k, postproc_fn(v)) for (k, v) in result.items())
else:
return result
diff --git a/lib/storage/bdev.py b/lib/storage/bdev.py
index a438fce..81e60d0 100644
--- a/lib/storage/bdev.py
+++ b/lib/storage/bdev.py
@@ -137,7 +137,7 @@
empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
if max_pvs is not None:
empty_pvs = empty_pvs[:max_pvs]
- return map((lambda pv: pv.name), empty_pvs)
+ return [pv.name for pv in empty_pvs]
@classmethod
def Create(cls, unique_id, children, size, spindles, params, excl_stor,
@@ -1102,7 +1102,7 @@
lines = output.splitlines()
# Try parsing the new output format (ceph >= 0.55).
- splitted_lines = map(lambda l: l.split(), lines)
+ splitted_lines = [l.split() for l in lines]
# Check for empty output.
if not splitted_lines:
@@ -1113,7 +1113,7 @@
if field_cnt != allfields:
# Parsing the new format failed. Fallback to parsing the old output
# format (< 0.55).
- splitted_lines = map(lambda l: l.split("\t"), lines)
+ splitted_lines = [l.split("\t") for l in lines]
if field_cnt != allfields:
base.ThrowError("Cannot parse rbd showmapped output expected %s fields,"
" found %s", allfields, field_cnt)
diff --git a/lib/storage/filestorage.py b/lib/storage/filestorage.py
index 271e363..75961fc 100644
--- a/lib/storage/filestorage.py
+++ b/lib/storage/filestorage.py
@@ -350,8 +350,8 @@
])
for prefix in ["", "/usr", "/usr/local"]:
- paths.update(map(lambda s: "%s/%s" % (prefix, s),
- ["bin", "lib", "lib32", "lib64", "sbin"]))
+ paths.update(["%s/%s" % (prefix, s) for s in
+ ["bin", "lib", "lib32", "lib64", "sbin"]])
return compat.UniqueFrozenset(map(os.path.normpath, paths))
diff --git a/lib/tools/node_cleanup.py b/lib/tools/node_cleanup.py
index f8ec076..7a9ff81 100644
--- a/lib/tools/node_cleanup.py
+++ b/lib/tools/node_cleanup.py
@@ -91,9 +91,9 @@
(pathutils.CLUSTER_CONF_FILE, True),
(pathutils.CLUSTER_DOMAIN_SECRET_FILE, True),
]
- clean_files.extend(map(lambda s: (s, True), pathutils.ALL_CERT_FILES))
- clean_files.extend(map(lambda s: (s, False),
- ssconf.SimpleStore().GetFileList()))
+ clean_files.extend((s, True) for s in pathutils.ALL_CERT_FILES)
+ clean_files.extend((s, False) for s in
+ ssconf.SimpleStore().GetFileList())
if not opts.yes_do_it:
cli.ToStderr("Cleaning a node is irreversible. If you really want to"
diff --git a/test/py/cmdlib/instance_unittest.py b/test/py/cmdlib/instance_unittest.py
index cbdd061..d725015 100644
--- a/test/py/cmdlib/instance_unittest.py
+++ b/test/py/cmdlib/instance_unittest.py
@@ -1313,9 +1313,8 @@
file_driver=NotImplemented):
gdt = instance_storage.GenerateDiskTemplate
- map(lambda params: utils.ForceDictType(params,
- constants.IDISK_PARAMS_TYPES),
- disk_info)
+ for params in disk_info:
+ utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
# Check if non-empty list of secondaries is rejected
self.assertRaises(errors.ProgrammerError, gdt, self.lu,
@@ -1486,9 +1485,8 @@
assert len(exp_logical_ids) == len(disk_info)
- map(lambda params: utils.ForceDictType(params,
- constants.IDISK_PARAMS_TYPES),
- disk_info)
+ for params in disk_info:
+ utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
# Check if empty list of secondaries is rejected
self.assertRaises(errors.ProgrammerError, gdt, self.lu, constants.DT_DRBD8,
diff --git a/test/py/ganeti.cli_unittest.py b/test/py/ganeti.cli_unittest.py
index a3e28af..42cc3a4 100755
--- a/test/py/ganeti.cli_unittest.py
+++ b/test/py/ganeti.cli_unittest.py
@@ -1005,7 +1005,7 @@
self.assertEqual(frozenset(cli._QFT_NAMES), constants.QFT_ALL)
def testUnique(self):
- lcnames = map(lambda s: s.lower(), cli._QFT_NAMES.values())
+ lcnames = [s.lower() for s in cli._QFT_NAMES.values()]
self.assertFalse(utils.FindDuplicates(lcnames))
def testUppercase(self):
diff --git a/test/py/ganeti.config_unittest.py b/test/py/ganeti.config_unittest.py
index 7253f66..2e4c96e 100755
--- a/test/py/ganeti.config_unittest.py
+++ b/test/py/ganeti.config_unittest.py
@@ -658,7 +658,7 @@
self.assertEqual(instance_disks, [disk])
def _IsErrorInList(err_str, err_list):
- return any(map(lambda e: err_str in e, err_list))
+ return any((err_str in e) for e in err_list)
class TestTRM(unittest.TestCase):
diff --git a/test/py/ganeti.rpc_unittest.py b/test/py/ganeti.rpc_unittest.py
index 54f199c..346ab46 100755
--- a/test/py/ganeti.rpc_unittest.py
+++ b/test/py/ganeti.rpc_unittest.py
@@ -859,8 +859,9 @@
result = runner._encoder(NotImplemented,
(rpc_defs.ED_OBJECT_DICT_LIST, 5 * [inst]))
- map(_CheckBasics, result)
- map(lambda r: self.assertEqual(len(r["hvparams"]), 2), result)
+ for r in result:
+ _CheckBasics(r)
+ self.assertEqual(len(r["hvparams"]), 2)
# Just an instance
result = runner._encoder(NotImplemented, (rpc_defs.ED_INST_DICT, inst))
diff --git a/test/py/ganeti.utils.storage_unittest.py b/test/py/ganeti.utils.storage_unittest.py
index 33b6f8a..8d7438a 100755
--- a/test/py/ganeti.utils.storage_unittest.py
+++ b/test/py/ganeti.utils.storage_unittest.py
@@ -166,9 +166,8 @@
start1 = 27
start2 = 703
result1 = list(storage.GetDiskLabels("", start1))
- result2 = \
- map(lambda x: x[1:],
- list(storage.GetDiskLabels("", start2, start=start2 - start1)))
+ result2 = [x[1:] for x in
+ list(storage.GetDiskLabels("", start2, start=start2 - start1))]
self.assertEqual(result1, result2)
diff --git a/test/py/testutils/config_mock.py b/test/py/testutils/config_mock.py
index 1d70798..6dbdbda 100644
--- a/test/py/testutils/config_mock.py
+++ b/test/py/testutils/config_mock.py
@@ -613,7 +613,7 @@
return dict((node_uuid, {}) for node_uuid in self._ConfigData().nodes)
def AllocateDRBDMinor(self, node_uuids, disk_uuid):
- return map(lambda _: 0, node_uuids)
+ return [0] * len(node_uuids)
def ReleaseDRBDMinors(self, disk_uuid):
pass