Merge branch 'stable-2.13' into stable-2.14

* stable-2.13
  Bugfix: migrate needs HypervisorClass, not an instance

Signed-off-by: Brian Foley <bpfoley@google.com>
Reviewed-by: Viktor Bachraty <vbachraty@google.com>
diff --git a/.gitignore b/.gitignore
index 2c03279..e653ffc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,13 +25,13 @@
 # /
 /.hsenv
 /Makefile
-/hs-pkg-versions
 /Makefile.ghc
 /Makefile.ghc.bak
 /Makefile.in
 /Makefile.local
 /Session.vim
 /TAGS*
+/apps/
 /aclocal.m4
 /autom4te.cache
 /autotools/install-sh
@@ -39,6 +39,7 @@
 /autotools/py-compile
 /autotools/replace_vars.sed
 /autotools/shell-env-init
+/cabal_macros.h
 /config.log
 /config.status
 /configure
@@ -46,8 +47,12 @@
 /devel/squeeze-amd64.conf
 /devel/wheezy-amd64.tar.gz
 /devel/wheezy-amd64.conf
+/dist/
+/empty-cabal-config
 /epydoc.conf
 /ganeti
+/ganeti.cabal
+/ganeti.depsflags
 /stamp-srclinks
 /stamp-directories
 /vcs-version
diff --git a/INSTALL b/INSTALL
index 05b24e2..145b5fc 100644
--- a/INSTALL
+++ b/INSTALL
@@ -127,6 +127,9 @@
 - or even better, `The Haskell Platform
   <http://hackage.haskell.org/platform/>`_ which gives you a simple way
   to bootstrap Haskell
+- `cabal-install <http://hackage.haskell.org/package/json>`_ and
+  `Cabal <http://hackage.haskell.org/package/json>`_, the Common Architecture
+  for Building Haskell Applications and Libraries (executable and library)
 - `json <http://hackage.haskell.org/package/json>`_, a JSON library
 - `network <http://hackage.haskell.org/package/network>`_, a basic
   network library
@@ -163,7 +166,8 @@
 
 Some of these are also available as package in Debian/Ubuntu::
 
-  $ apt-get install ghc libghc-json-dev libghc-network-dev \
+  $ apt-get install ghc cabal-install libghc-cabal-dev \
+                    libghc-json-dev libghc-network-dev \
                     libghc-parallel-dev \
                     libghc-utf8-string-dev libghc-curl-dev \
                     libghc-hslogger-dev \
@@ -190,36 +194,19 @@
 need to be installed using ``cabal``.
 
 If using a distribution which does not provide these libraries, first
-install the Haskell platform. You can also install ``cabal`` manually::
+install the Haskell platform. Then run::
 
-  $ apt-get install cabal-install
   $ cabal update
 
 Then install the additional native libraries::
 
   $ apt-get install libpcre3-dev libcurl4-openssl-dev
 
-And finally the libraries required for building the packages (only the
-ones not available in your distribution packages) via ``cabal``::
+And finally the libraries required for building the packages via ``cabal``
+(it will automatically pick only those that are not already installed via your
+distribution packages)::
 
-  $ cabal install json network parallel utf8-string curl hslogger \
-                  Crypto text hinotify==0.3.2 regex-pcre \
-                  attoparsec vector base64-bytestring \
-                  lifted-base==0.2.0.3 lens==3.10
-
-(The specified versions are suitable for Debian Wheezy, for other
-distributions different versions might be needed.)
-
-.. _cabal-order-note:
-.. note::
-  When installing additional libraries using ``cabal``, be sure to first
-  install all the required libraries available in your distribution and
-  only then install the rest using ``cabal``.
-  Otherwise cabal might install different versions of libraries that are
-  available in your distribution, causing conflicts during the
-  compilation.
-  This applies in particular when installing libraries for the optional
-  features.
+  $ cabal install --only-dependencies cabal/ganeti.template.cabal
 
 Haskell optional features
 ~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -242,15 +229,13 @@
 
 or ``cabal``::
 
-  $ cabal install snap-server PSQueue
+  $ cabal install --only-dependencies cabal/ganeti.template.cabal \
+                  --flags="confd mond metad"
 
 to install them.
 
 .. _cabal-note:
 .. note::
-  If one of the cabal packages fails to install due to unfulfilled
-  dependencies, you can try enabling symlinks in ``~/.cabal/config``.
-
   Make sure that your ``~/.cabal/bin`` directory (or whatever else
   is defined as ``bindir``) is in your ``PATH``.
 
diff --git a/Makefile.am b/Makefile.am
index a30d82e..d61df4c 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -89,6 +89,8 @@
 
 clientdir = $(pkgpythondir)/client
 cmdlibdir = $(pkgpythondir)/cmdlib
+cmdlib_clusterdir = $(pkgpythondir)/cmdlib/cluster
+configdir = $(pkgpythondir)/config
 hypervisordir = $(pkgpythondir)/hypervisor
 hypervisor_hv_kvmdir = $(pkgpythondir)/hypervisor/hv_kvm
 jqueuedir = $(pkgpythondir)/jqueue
@@ -185,6 +187,7 @@
 DIRS = \
 	$(HS_DIRS) \
 	autotools \
+	cabal \
 	daemons \
 	devel \
 	devel/data \
@@ -203,7 +206,9 @@
 	lib/build \
 	lib/client \
 	lib/cmdlib \
+	lib/cmdlib/cluster \
 	lib/confd \
+	lib/config \
 	lib/jqueue \
 	lib/http \
 	lib/hypervisor \
@@ -239,6 +244,7 @@
         test/data/cgroup_root/devices/some_group/lxc \
         test/data/cgroup_root/devices/some_group/lxc/instance1 \
 	test/py \
+	test/py/testutils \
 	test/py/cmdlib \
 	test/py/cmdlib/testsupport \
 	tools
@@ -259,6 +265,8 @@
 
 BUILDTIME_DIRS = \
 	$(BUILDTIME_DIR_AUTOCREATE) \
+	apps \
+	dist \
 	doc/html \
 	doc/man-html
 
@@ -294,7 +302,12 @@
 	$(addsuffix /*.o,$(HS_DIRS)) \
 	$(addsuffix /*.$(HTEST_SUFFIX)_hi,$(HS_DIRS)) \
 	$(addsuffix /*.$(HTEST_SUFFIX)_o,$(HS_DIRS)) \
-	hs-pkg-versions \
+	$(HASKELL_PACKAGE_VERSIONS_FILE) \
+	$(CABAL_EXECUTABLES_APPS_STAMPS) \
+	empty-cabal-config \
+	ganeti.cabal \
+	$(HASKELL_PACKAGE_IDS_FILE) \
+	$(HASKELL_PACKAGE_VERSIONS_FILE) \
 	Makefile.ghc \
 	Makefile.ghc.bak \
 	$(PYTHON_BOOTSTRAP) \
@@ -350,6 +363,8 @@
 
 clean-local:
 	rm -rf tools/shebang
+	rm -rf apps
+	rm -rf dist
 
 HS_GENERATED_FILES = $(HS_PROGS) src/hluxid src/ganeti-luxid \
 	src/hconfd src/ganeti-confd
@@ -374,10 +389,6 @@
 	$(nodist_pkgpython_PYTHON) \
 	$(nodist_pkgpython_rpc_stub_PYTHON)
 
-# Generating the RPC wrappers depends on many things, so make sure
-# it's built at the end of the built sources
-lib/_generated_rpc.py: | $(built_base_sources) $(built_python_base_sources)
-
 # these are all built from the underlying %.in sources
 BUILT_EXAMPLES = \
 	doc/examples/ganeti-kvm-poweroff.initd \
@@ -428,7 +439,6 @@
 	lib/cli.py \
 	lib/cli_opts.py \
 	lib/compat.py \
-	lib/config.py \
 	lib/constants.py \
 	lib/daemon.py \
 	lib/errors.py \
@@ -476,13 +486,14 @@
 	lib/cmdlib/__init__.py \
 	lib/cmdlib/backup.py \
 	lib/cmdlib/base.py \
-	lib/cmdlib/cluster.py \
 	lib/cmdlib/common.py \
 	lib/cmdlib/group.py \
 	lib/cmdlib/instance.py \
+	lib/cmdlib/instance_create.py \
 	lib/cmdlib/instance_migration.py \
 	lib/cmdlib/instance_operation.py \
 	lib/cmdlib/instance_query.py \
+	lib/cmdlib/instance_set_params.py \
 	lib/cmdlib/instance_storage.py \
 	lib/cmdlib/instance_utils.py \
 	lib/cmdlib/misc.py \
@@ -493,6 +504,16 @@
 	lib/cmdlib/tags.py \
 	lib/cmdlib/test.py
 
+cmdlib_cluster_PYTHON = \
+	lib/cmdlib/cluster/__init__.py \
+	lib/cmdlib/cluster/verify.py
+
+config_PYTHON = \
+	lib/config/__init__.py \
+	lib/config/verify.py \
+	lib/config/temporary_reservations.py \
+	lib/config/utils.py
+
 hypervisor_PYTHON = \
 	lib/hypervisor/__init__.py \
 	lib/hypervisor/hv_base.py \
@@ -579,7 +600,8 @@
 	lib/tools/node_daemon_setup.py \
 	lib/tools/prepare_node_join.py \
 	lib/tools/ssh_update.py \
-	lib/tools/ssl_update.py
+	lib/tools/ssl_update.py \
+	lib/tools/cfgupgrade.py
 
 utils_PYTHON = \
 	lib/utils/__init__.py \
@@ -622,6 +644,7 @@
 	doc/design-2.11.rst \
 	doc/design-2.12.rst \
 	doc/design-2.13.rst \
+	doc/design-2.14.rst \
 	doc/design-autorepair.rst \
 	doc/design-bulk-create.rst \
 	doc/design-ceph-ganeti-support.rst \
@@ -635,6 +658,7 @@
 	doc/design-disk-conversion.rst \
 	doc/design-disks.rst \
 	doc/design-draft.rst \
+	doc/design-file-based-disks-ownership.rst \
 	doc/design-file-based-storage.rst \
 	doc/design-glusterfs-ganeti-support.rst \
 	doc/design-hotplug.rst \
@@ -761,6 +785,9 @@
 	-O -Wall -isrc \
 	-fwarn-monomorphism-restriction \
 	-fwarn-tabs \
+	-optP-include -optP$(HASKELL_PACKAGE_VERSIONS_FILE) \
+	-hide-all-packages \
+	`cat $(HASKELL_PACKAGE_IDS_FILE)` \
 	$(GHC_BYVERSION_FLAGS)
 if DEVELOPER_MODE
 HFLAGS += -Werror
@@ -913,7 +940,10 @@
 	src/Ganeti/Network.hs \
 	src/Ganeti/Objects.hs \
 	src/Ganeti/Objects/BitArray.hs \
+	src/Ganeti/Objects/Disk.hs \
+	src/Ganeti/Objects/Instance.hs \
 	src/Ganeti/Objects/Lens.hs \
+	src/Ganeti/Objects/Nic.hs \
 	src/Ganeti/OpCodes.hs \
 	src/Ganeti/OpCodes/Lens.hs \
 	src/Ganeti/OpParams.hs \
@@ -936,6 +966,7 @@
 	src/Ganeti/Query/Query.hs \
 	src/Ganeti/Query/Server.hs \
 	src/Ganeti/Query/Types.hs \
+	src/Ganeti/PartialParams.hs \
 	src/Ganeti/Rpc.hs \
 	src/Ganeti/Runtime.hs \
 	src/Ganeti/SlotMap.hs \
@@ -970,6 +1001,7 @@
 	src/Ganeti/Utils/Validate.hs \
 	src/Ganeti/VCluster.hs \
 	src/Ganeti/WConfd/ConfigState.hs \
+	src/Ganeti/WConfd/ConfigModifications.hs \
 	src/Ganeti/WConfd/ConfigVerify.hs \
 	src/Ganeti/WConfd/ConfigWriter.hs \
 	src/Ganeti/WConfd/Client.hs \
@@ -1038,6 +1070,7 @@
 	test/hs/Test/Ganeti/Locking/Locks.hs \
 	test/hs/Test/Ganeti/Locking/Waiting.hs \
 	test/hs/Test/Ganeti/Network.hs \
+	test/hs/Test/Ganeti/PartialParams.hs \
 	test/hs/Test/Ganeti/Objects.hs \
 	test/hs/Test/Ganeti/Objects/BitArray.hs \
 	test/hs/Test/Ganeti/OpCodes.hs \
@@ -1273,41 +1306,15 @@
 
 HS_SRCS = $(HS_LIBTESTBUILT_SRCS)
 
-# select the last line of output and extract the version number,
-# padding with 0s if needed
-hs-pkg-versions:
-	ghc-pkg list --simple-output lens \
-	| sed -r -e '$$!d' \
-	  -e 's/^lens-([0-9]+(\.[0-9]+)*)/\1 0 0 0/' \
-	  -e 's/\./ /g' -e 's/([0-9]+) *([0-9]+) *([0-9]+) .*/\
-	      -DLENS_MAJOR=\1 -DLENS_MINOR=\2 -DLENS_REV=\3/' \
-	  -e 's/^\s*//' \
-	> $@
-	ghc-pkg list --simple-output monad-control \
-	| sed -r -e '$$!d' \
-	  -e 's/^monad-control-([0-9]+(\.[0-9]+)*)/\1 0 0 0/' \
-	  -e 's/\./ /g' -e 's/([0-9]+) *([0-9]+) *([0-9]+) .*/\
-	   -DMONAD_CONTROL_MAJOR=\1 -DMONAD_CONTROL_MINOR=\2 -DMONAD_CONTROL_REV=\3/'\
-	  -e 's/^\s*//' \
-	>> $@
-	ghc-pkg list --simple-output QuickCheck \
-	| sed -r -e '$$!d' \
-	  -e 's/^QuickCheck-([0-9]+(\.[0-9]+)*)/\1 0 0 0/' \
-	  -e 's/\./ /g' -e 's/([0-9]+) *([0-9]+) *([0-9]+) .*/\
-	   -DQUICKCHECK_MAJOR=\1 -DQUICKCHECK_MINOR=\2 -DQUICKCHECK_REV=\3/'\
-	  -e 's/^\s*//' \
-	>> $@
-
 HS_MAKEFILE_GHC_SRCS = $(HS_SRC_PROGS:%=%.hs)
 if WANT_HSTESTS
 HS_MAKEFILE_GHC_SRCS += $(HS_TEST_PROGS:%=%.hs)
 endif
-Makefile.ghc: $(HS_MAKEFILE_GHC_SRCS) Makefile hs-pkg-versions \
+Makefile.ghc: $(HS_MAKEFILE_GHC_SRCS) Makefile $(HASKELL_PACKAGE_VERSIONS_FILE) \
               | $(built_base_sources) $(HS_BUILT_SRCS)
 	$(GHC) -M -dep-makefile $@ $(DEP_SUFFIXES) $(HFLAGS) $(HFLAGS_DYNAMIC) \
 		-itest/hs \
-	  $(shell cat hs-pkg-versions) \
-		$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(HS_MAKEFILE_GHC_SRCS)
+		$(HEXTRA_COMBINED) $(HS_MAKEFILE_GHC_SRCS)
 # Since ghc -M does not generate dependency line for object files, dependencies
 # from a target executable seed object (e.g. src/hluxid.o) to objects which
 # finally will be linked to the target object (e.g. src/Ganeti/Daemon.o) are
@@ -1322,33 +1329,60 @@
 
 @include_makefile_ghc@
 
+# Contains the package-id flags for the current build: "-package-id" followed
+# by the name and hash of the package, one for each dependency.
+# Obtained from the setup-config using the Cabal API
+# (CabalDependenciesMacros.hs) after `cabal configure`.
+# This file is created along with HASKELL_PACKAGE_VERSIONS_FILE; if you want
+# to depend on it in a rule, depend on HASKELL_PACKAGE_VERSIONS_FILE instead.
+HASKELL_PACKAGE_IDS_FILE = ganeti.depsflags
+
+# Defines the MIN_VERSION_* macros for all Haskell packages used in this
+# compilation.
+# The versions are determined using `cabal configure`, which takes them from
+# the ghc-pkg database.
+# At the moment, we don't support cabal sandboxes, so we use cabal configure
+# with the --user flag.
+# Note: `cabal configure` and CabalDependenciesMacros.hs perform no
+# downloading (only `cabal install` can do that).
+HASKELL_PACKAGE_VERSIONS_FILE = cabal_macros.h
+
+$(HASKELL_PACKAGE_VERSIONS_FILE): Makefile ganeti.cabal \
+                                  cabal/CabalDependenciesMacros.hs
+	touch empty-cabal-config
+	$(CABAL) --config-file=empty-cabal-config configure --user \
+	  -f`test $(HTEST) == yes && echo "htest" || echo "-htest"` \
+	  -f`test $(ENABLE_MOND) == True && echo "mond" || echo "-mond"` \
+	  -f`test $(ENABLE_METADATA) == True && echo "metad" || echo "-metad"`
+	runhaskell $(abs_top_srcdir)/cabal/CabalDependenciesMacros.hs \
+	  ganeti.cabal \
+	  $(HASKELL_PACKAGE_IDS_FILE) \
+	  $(HASKELL_PACKAGE_VERSIONS_FILE)
+
 # Like the %.o rule, but allows access to the test/hs directory.
 # This uses HFLAGS instead of HTEST_FLAGS because it's only for generating
 # object files (.o for GHC <= 7.6, .o/.so for newer GHCs) that are loaded
 # in GHCI when evaluating TH. The actual test-with-coverage .hpc_o files
 # are created in the `%.$(HTEST_SUFFIX)_o` rule.
-test/hs/%.o: hs-pkg-versions
-	@echo '[GHC|test]: $@ <- $^'
-	@$(GHC) -c $(HFLAGS) $(HFLAGS_DYNAMIC) -itest/hs \
-	  $(shell cat hs-pkg-versions) \
-		$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(@:%.o=%.hs)
+test/hs/%.o: $(HASKELL_PACKAGE_VERSIONS_FILE)
+	@echo '[GHC|test]: $@ <- test/hs/$^'
+	@$(GHC) -c $(HFLAGS) -itest/hs $(HFLAGS_DYNAMIC) \
+		$(HEXTRA_COMBINED) $(@:%.o=%.hs)
 
-%.o: hs-pkg-versions
+%.o: $(HASKELL_PACKAGE_VERSIONS_FILE)
 	@echo '[GHC]: $@ <- $^'
 	@$(GHC) -c $(HFLAGS) $(HFLAGS_DYNAMIC) \
-	  $(shell cat hs-pkg-versions) \
-		$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(@:%.o=%.hs)
+		$(HEXTRA_COMBINED) $(@:%.o=%.hs)
 
 # For TH+profiling we need to compile twice: Once without profiling,
 # and then once with profiling. See
 # http://www.haskell.org/ghc/docs/7.0.4/html/users_guide/template-haskell.html#id636646
 if HPROFILE
-%.$(HPROF_SUFFIX)_o: %.o hs-pkg-versions
+%.$(HPROF_SUFFIX)_o: %.o
 	@echo '[GHC|prof]: $@ <- $^'
 	@$(GHC) -c $(HFLAGS) \
-	  $(shell cat hs-pkg-versions) \
 	  $(HPROFFLAGS) \
-		$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) \
+		$(HEXTRA_COMBINED) \
 		$(@:%.$(HPROF_SUFFIX)_o=%.hs)
 endif
 
@@ -1356,11 +1390,10 @@
 # file for GHC > 7.6 ghci dynamic loading for TH, and creating the .o file
 # will create the .so file since we use -dynamic-too (using the `test/hs/%.o`
 # rule).
-%.$(HTEST_SUFFIX)_o: %.o hs-pkg-versions
+%.$(HTEST_SUFFIX)_o: %.o
 	@echo '[GHC|test]: $@ <- $^'
 	@$(GHC) -c $(HTEST_FLAGS) \
-	  $(shell cat hs-pkg-versions) \
-		$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(@:%.$(HTEST_SUFFIX)_o=%.hs)
+		$(HEXTRA_COMBINED) $(@:%.$(HTEST_SUFFIX)_o=%.hs)
 
 %.hi: %.o ;
 %.$(HTEST_SUFFIX)_hi: %.$(HTEST_SUFFIX)_o ;
@@ -1370,19 +1403,17 @@
 $(HS_SRC_PROGS): %: %.$(HPROF_SUFFIX)_o | stamp-directories
 	@echo '[GHC-link]: $@'
 	$(GHC) $(HFLAGS) $(HPROFFLAGS) \
-		$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) --make $(@:%=%.hs)
+		$(HEXTRA_COMBINED) --make $(@:%=%.hs)
 else
-$(HS_SRC_PROGS): %: %.o hs-pkg-versions | stamp-directories
-endif
+$(HS_SRC_PROGS): %: %.o | stamp-directories
 	@echo '[GHC-link]: $@'
 	$(GHC) $(HFLAGS) $(HFLAGS_DYNAMIC) \
-	  $(shell cat hs-pkg-versions) \
-		$(HPROFFLAGS) \
-		$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) --make $(@:%=%.hs)
+		$(HEXTRA_COMBINED) --make $(@:%=%.hs)
+endif
 	@rm -f $(notdir $@).tix
 	@touch "$@"
 
-$(HS_TEST_PROGS): %: %.$(HTEST_SUFFIX)_o hs-pkg-versions \
+$(HS_TEST_PROGS): %: %.$(HTEST_SUFFIX)_o \
 	                   | stamp-directories $(built_python_sources)
 	@if [ "$(HS_NODEV)" ]; then \
 	  echo "Error: cannot run unittests without the development" \
@@ -1391,8 +1422,7 @@
 	fi
 	@echo '[GHC-link|test]: $@'
 	$(GHC) $(HTEST_FLAGS) \
-	  $(shell cat hs-pkg-versions) \
-		$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) --make $(@:%=%.hs)
+		$(HEXTRA_COMBINED) --make $(@:%=%.hs)
 	@rm -f $(notdir $@).tix
 	@touch "$@"
 
@@ -1523,6 +1553,7 @@
 	autotools/sphinx-wrapper \
 	autotools/testrunner \
 	autotools/wrong-hardcoded-paths \
+	cabal/cabal-from-modules.py \
 	$(RUN_IN_TEMPDIR) \
 	daemons/daemon-util.in \
 	daemons/ganeti-cleaner.in \
@@ -1553,9 +1584,13 @@
 	doc/users/groupmemberships.in \
 	doc/users/groups.in \
 	doc/users/users.in \
+	ganeti.cabal \
+	cabal/ganeti.template.cabal \
+	cabal/CabalDependenciesMacros.hs \
 	$(dist_TESTS) \
 	$(TEST_FILES) \
 	$(python_test_support) \
+	$(python_test_utils) \
 	man/footer.rst \
 	$(manrst) \
 	$(maninput) \
@@ -1629,6 +1664,7 @@
 	test/data/htools/hail-alloc-invalid-network.json \
 	test/data/htools/hail-alloc-invalid-twodisks.json \
 	test/data/htools/hail-alloc-restricted-network.json \
+	test/data/htools/hail-alloc-nlocation.json \
 	test/data/htools/hail-alloc-plain-tags.json \
 	test/data/htools/hail-alloc-spindles.json \
 	test/data/htools/hail-alloc-twodisks.json \
@@ -1641,6 +1677,9 @@
 	test/data/htools/hbal-dyn.data \
 	test/data/htools/hbal-evac.data \
 	test/data/htools/hbal-excl-tags.data \
+	test/data/htools/hbal-forth.data \
+	test/data/htools/hbal-location-1.data \
+	test/data/htools/hbal-location-2.data \
 	test/data/htools/hbal-migration-1.data \
 	test/data/htools/hbal-migration-2.data \
 	test/data/htools/hbal-migration-3.data \
@@ -1673,6 +1712,7 @@
 	test/data/htools/hsqueeze-overutilized.data \
 	test/data/htools/hsqueeze-underutilized.data \
 	test/data/htools/unique-reboot-order.data \
+	test/data/mond-data.txt \
 	test/hs/shelltests/htools-balancing.test \
 	test/hs/shelltests/htools-basic.test \
 	test/hs/shelltests/htools-dynutil.test \
@@ -1720,6 +1760,7 @@
 	test/data/cluster_config_2.10.json \
 	test/data/cluster_config_2.11.json \
 	test/data/cluster_config_2.12.json \
+	test/data/cluster_config_2.13.json \
 	test/data/instance-minor-pairing.txt \
 	test/data/instance-disks.txt \
 	test/data/ip-addr-show-dummy0.txt \
@@ -1899,13 +1940,13 @@
 python_test_support = \
 	test/py/__init__.py \
 	test/py/lockperf.py \
-	test/py/testutils.py \
 	test/py/testutils_ssh.py \
 	test/py/mocks.py \
+	test/py/testutils/__init__.py \
+	test/py/testutils/config_mock.py \
 	test/py/cmdlib/__init__.py \
 	test/py/cmdlib/testsupport/__init__.py \
 	test/py/cmdlib/testsupport/cmdlib_testcase.py \
-	test/py/cmdlib/testsupport/config_mock.py \
 	test/py/cmdlib/testsupport/iallocator_mock.py \
 	test/py/cmdlib/testsupport/livelock_mock.py \
 	test/py/cmdlib/testsupport/netutils_mock.py \
@@ -1968,6 +2009,8 @@
 	$(pkgpython_PYTHON) \
 	$(client_PYTHON) \
 	$(cmdlib_PYTHON) \
+	$(cmdlib_cluster_PYTHON) \
+	$(config_PYTHON) \
 	$(hypervisor_PYTHON) \
 	$(hypervisor_hv_kvm_PYTHON) \
 	$(jqueue_PYTHON) \
@@ -1989,6 +2032,7 @@
 if PY_UNIT
 all_python_code += $(python_tests)
 all_python_code += $(python_test_support)
+all_python_code += $(python_test_utils)
 endif
 
 srclink_files = \
@@ -2043,6 +2087,7 @@
 	$(gnt_python_sbin_SCRIPTS) \
 	qa \
 	$(python_test_support)
+	$(python_test_utils)
 
 test/py/daemon-util_unittest.bash: daemons/daemon-util
 
@@ -2329,7 +2374,9 @@
 	src/hs2py --opcodes >> $@
 	cat $(abs_top_srcdir)/lib/opcodes.py.in_after >> $@
 
-lib/_generated_rpc.py: lib/rpc_defs.py $(BUILD_RPC)
+# Generating the RPC wrappers depends on many things, so make sure
+# it's built at the end of the built sources
+lib/_generated_rpc.py: lib/rpc_defs.py $(BUILD_RPC) | $(built_base_sources) $(built_python_base_sources)
 	PYTHONPATH=. $(RUN_IN_TEMPDIR) $(CURDIR)/$(BUILD_RPC) lib/rpc_defs.py > $@
 
 lib/rpc/stub/wconfd.py: Makefile src/hs2py | stamp-directories
@@ -2619,7 +2666,7 @@
 	@test -n "$(PYLINT)" || { echo 'pylint' not found during configure; exit 1; }
 	cd $(top_srcdir) && \
 		PYTHONPATH=.:./test/py $(PYLINT) $(LINT_OPTS_ALL) \
-		--rcfile=pylintrc-test  $(python_test_support)
+		--rcfile=pylintrc-test  $(python_test_support) $(python_test_utils)
 
 .PHONY: pep8
 pep8: $(GENERATED_FILES)
@@ -2640,7 +2687,9 @@
 	  --ignore "Use &&" \
 	  --ignore "Use void" \
 	  --ignore "Reduce duplication" \
+	  --ignore "Use import/export shortcut" \
 	  --hint src/lint-hints \
+	  --cpp-file=$(HASKELL_PACKAGE_VERSIONS_FILE) \
 	  $(filter-out $(HLINT_EXCLUDES),$(HS_LIBTEST_SRCS) $(HS_PROG_SRCS))
 	@if [ ! -f doc/hs-lint.html ]; then \
 	  echo "All good" > doc/hs-lint.html; \
@@ -2776,12 +2825,7 @@
 	set -e ; \
 	export LC_ALL=en_US.UTF-8; \
 	OPTGHC="--optghc=-isrc --optghc=-itest/hs"; \
-	if [ "$(HS_PARALLEL3)" ]; \
-	then OPTGHC="$$OPTGHC --optghc=$(HS_PARALLEL3)"; \
-	fi; \
-	if [ "$(HS_REGEX_PCRE)" ]; \
-	then OPTGHC="$$OPTGHC --optghc=$(HS_REGEX_PCRE)"; \
-	fi; \
+	OPTGHC="$$OPTGHC --optghc=-optP-include --optghc=-optP$(HASKELL_PACKAGE_VERSIONS_FILE)"; \
 	for file in $(HS_LIBTESTBUILT_SRCS); do \
 	  f_nosrc=$${file##src/}; \
 	  f_notst=$${f_nosrc##test/hs/}; \
@@ -2796,15 +2840,13 @@
 	  $(HS_LIBTESTBUILT_SRCS)
 
 .PHONY: TAGS
-TAGS: $(GENERATED_FILES) hs-pkg-versions
+TAGS: $(GENERATED_FILES)
 	rm -f TAGS
 	$(GHC) -e ":etags TAGS_hs" -v0 \
 	  $(filter-out -O -Werror,$(HFLAGS)) \
-	  $(shell cat hs-pkg-versions) \
 		-osuf tags.o \
 		-hisuf tags.hi \
     -lcurl \
-	  $(HS_PARALLEL3) $(HS_REGEX_PCRE) \
 	  $(HS_LIBTEST_SRCS)
 	find . -path './lib/*.py' -o -path './scripts/gnt-*' -o \
 	  -path './daemons/ganeti-*' -o -path './tools/*' -o \
@@ -2884,6 +2926,39 @@
 .PHONY: man
 man: $(man_MANS) $(manhtml)
 
+CABAL_EXECUTABLES = $(HS_DEFAULT_PROGS)
+CABAL_EXECUTABLES_HS = $(patsubst %,%.hs,$(CABAL_EXECUTABLES))
+CABAL_EXECUTABLES_APPS_STAMPS = $(patsubst src/%,apps/%.hs.stamp,$(patsubst test/hs/%,apps/%.hs.stamp,$(CABAL_EXECUTABLES)))
+
+# Executable symlinks
+apps/%.hs.stamp: Makefile
+	mkdir -p apps
+	rm -f $(basename $@)
+	ln -s ../$(filter %/$(basename $(notdir $@)),$(CABAL_EXECUTABLES_HS)) $(basename $@)
+	touch $@
+
+# Builds the cabal file
+ganeti.cabal: cabal/ganeti.template.cabal Makefile cabal/cabal-from-modules.py $(CABAL_EXECUTABLES_APPS_STAMPS)
+	@echo $(subst /,.,$(patsubst %.hs,%,$(patsubst test/hs/%,%,$(patsubst src/%,%,$(HS_SRCS))))) \
+	  | python $(abs_top_srcdir)/cabal/cabal-from-modules.py $(abs_top_srcdir)/cabal/ganeti.template.cabal > $@
+
+	for p in $(CABAL_EXECUTABLES); do \
+	  echo                                   >> $@; \
+	  echo "executable `basename $$p`"       >> $@; \
+	  echo "  hs-source-dirs: apps"          >> $@; \
+	  echo "  main-is: `basename $$p`.hs"    >> $@; \
+	  echo "  default-language: Haskell2010" >> $@; \
+	  echo "  build-depends:"                >> $@; \
+	  echo "      base"                      >> $@; \
+	  echo "    , ganeti"                    >> $@; \
+	  if [ $$p == test/hs/htest ]; then \
+	    echo "    , hslogger"                  >> $@; \
+	    echo "    , test-framework"            >> $@; \
+	  elif [ $$p == src/rpc-test ]; then \
+	    echo "    , json"                      >> $@; \
+	  fi \
+	done
+
 # Target that builds all binaries (including those that are not
 # rebuilt except when running the tests)
 .PHONY: really-all
diff --git a/NEWS b/NEWS
index 36d9eee..f39f8c0 100644
--- a/NEWS
+++ b/NEWS
@@ -2,6 +2,235 @@
 ====
 
 
+Version 2.14.2
+--------------
+
+*(Released Tue, 15 Dec 2015)*
+
+Important changes and security notes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Security release.
+
+CVE-2015-7944
+
+Ganeti provides a RESTful control interface called the RAPI. Its HTTPS
+implementation is vulnerable to DoS attacks via client-initiated SSL
+parameter renegotiation. While the interface is not meant to be exposed
+publicly, due to the fact that it binds to all interfaces, we believe
+some users might be exposing it unintentionally and are vulnerable. A
+DoS attack can consume resources meant for Ganeti daemons and instances
+running on the master node, making both perform badly.
+
+Fixes are not feasible due to the OpenSSL Python library not exposing
+functionality needed to disable client-side renegotiation. Instead, we
+offer instructions on how to control RAPI's exposure, along with info
+on how RAPI can be setup alongside an HTTPS proxy in case users still
+want or need to expose the RAPI interface. The instructions are
+outlined in Ganeti's security document: doc/html/security.html
+
+CVE-2015-7945
+
+Ganeti leaks the DRBD secret through the RAPI interface. Examining job
+results after an instance information job reveals the secret. With the
+DRBD secret, access to the local cluster network, and ARP poisoning,
+an attacker can impersonate a Ganeti node and clone the disks of a
+DRBD-based instance. While an attacker with access to the cluster
+network is already capable of accessing any data written as DRBD
+traffic is unencrypted, having the secret expedites the process and
+allows access to the entire disk.
+
+Fixes contained in this release prevent the secret from being exposed
+via the RAPI. The DRBD secret can be changed by converting an instance
+to plain and back to DRBD, generating a new secret, but redundancy will
+be lost until the process completes.
+Since attackers with node access are capable of accessing some and
+potentially all data even without the secret, we do not recommend that
+the secret be changed for existing instances.
+
+Minor changes
+~~~~~~~~~~~~~
+
+- Allow disk attachment to diskless instances
+- Calculate correct affected nodes set in InstanceChangeGroup
+  (Issue 1144)
+- Do not retry all requests after connection timeouts to prevent
+  repeated job submission
+- Fix reason trails of expanding opcodes
+- Make lockConfig call retryable
+- Extend timeout for gnt-cluster renew-crypto
+- Return the correct error code in the post-upgrade script
+- Make OpenSSL refrain from DH altogether
+- Fix faulty iallocator type check
+- Improve cfgupgrade output in case of errors
+- Fix upgrades of instances with missing creation time
+- Make htools tolerate missing "dtotal" and "dfree" on luxi
+- Fix default for --default-iallocator-params
+- Renew-crypto: stop daemons on master node first
+- Don't warn about broken SSH setup of offline nodes (Issue 1131)
+- At IAlloc backend guess state from admin state
+- Set node tags in iallocator htools backend
+- Only search for Python-2 interpreters
+- Handle Xen 4.3 states better
+- Improve xl socat migrations
+- replace-disks: fix --ignore-ipolicy
+- Fix disabling of user shutdown reporting
+- Allow userspace-only disk templates
+- Fix instance failover in case of DTS_EXT_MIRROR
+- Fix operations on empty nodes by accepting allocation of 0 jobs
+- Fix instance multi allocation for non-DRBD disks
+- Redistribute master key on downgrade
+- Allow more failover options when using the --no-disk-moves flag
+
+
+Version 2.14.1
+--------------
+
+*(Released Fri, 10 Jul 2015)*
+
+Incompatible/important changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- The SSH security changes reduced the number of nodes which can SSH into
+  other nodes. Unfortunately enough, the Ganeti implementation of migration
+  for the xl stack of Xen required SSH to be able to migrate the instance,
+  leading to a situation where full movement of an instance around the cluster
+  was not possible. This version fixes the issue by using socat to transfer
+  instance data. While socat is less secure than SSH, it is about as secure as
+  xm migrations, and occurs over the secondary network if present. As a
+  consequence of this change, Xen instance migrations using xl cannot occur
+  between nodes running 2.14.0 and 2.14.1.
+- This release contains a fix for the problem that different encodings in
+  SSL certificates can break RPC communication (issue 1094). The fix makes
+  it necessary to rerun 'gnt-cluster renew-crypto --new-node-certificates'
+  after the cluster is fully upgraded to 2.14.1
+
+Other Changes
+~~~~~~~~~~~~~
+
+- The ``htools`` now properly work also on shared-storage clusters.
+- Instance moves now work properly also for the plain disk template.
+- Filter-evaluation for run-time data filter was fixed (issue 1100).
+- Various improvements to the documentation have been added.
+
+
+Version 2.14.0
+--------------
+
+*(Released Tue, 2 Jun 2015)*
+
+New features
+~~~~~~~~~~~~
+
+- The build system now enforces external Haskell dependencies to lie in
+  a supported range as declared by our new ganeti.cabal file.
+- Basic support for instance reservations has been added. Instance addition
+  supports a --forthcoming option telling Ganeti to only reserve the resources
+  but not create the actual instance. The instance can later be created with
+  by passing the --commit option to the instance addition command.
+- Node tags starting with htools:nlocation: now have a special meaning to htools(1).
+  They control between which nodes migration is possible, e.g., during hypervisor
+  upgrades. See hbal(1) for details.
+- The node-allocation lock as been removed for good, thus speeding up parallel
+  instance allocation and creation.
+- The external storage interface has been extended by optional ``open``
+  and ``close`` scripts.
+
+New dependencies
+~~~~~~~~~~~~~~~~
+
+- Building the Haskell part of Ganeti now requires Cabal and cabal-install.
+
+Known issues
+~~~~~~~~~~~~
+
+- Under certain conditions instance doesn't get unpaused after live
+  migration (issue #1050)
+
+Since 2.14.0 rc1
+~~~~~~~~~~~~~~~~
+
+- The call to the IAllocator in 'gnt-node evacuate' has been fixed.
+- In opportunistic locking, only ask for those node resource locks where
+  the node lock is held.
+- Lock requests are repeatable now; this avoids failure of a job in a
+  race condition with a signal sent to the job.
+- Various improvements to the QA.
+
+
+Version 2.14.0 rc2
+------------------
+
+*(Released Tue, 19 May 2015)*
+
+This was the second release candidate in the 2.14 series. All important
+changes are listed in the 2.14.0 entry.
+
+Since 2.14.0 rc1
+~~~~~~~~~~~~~~~~
+
+- private parameters are now properly exported to instance create scripts
+- unnecessary config unlocks and upgrades have been removed, improving
+  performance, in particular of cluster verification
+- some rarely occuring file-descriptor leaks have been fixed
+- The checks for orphan and lost volumes have been fixed to also work
+  correctly when multiple volume groups are used.
+
+
+Version 2.14.0 rc1
+------------------
+
+*(Released Wed, 29 Apr 2015)*
+
+This was the first release candidate in the 2.14 series. All important
+changes are listed in the latest 2.14 entry.
+
+Since 2.14.0 beta2
+~~~~~~~~~~~~~~~~~~
+
+The following issue has been fixed:
+
+- A race condition where a badly timed kill of WConfD could lead to
+  an incorrect configuration.
+
+Fixes inherited from the 2.12 branch:
+
+- Upgrade from old versions (2.5 and 2.6) was failing (issues 1070, 1019).
+- gnt-network info outputs wrong external reservations (issue 1068)
+- Refuse to demote master from master capability (issue 1023)
+
+Fixes inherited from the 2.13 branch:
+
+- bugs related to ssh-key handling of master candidate (issues 1045, 1046, 1047)
+
+
+Version 2.14.0 beta2
+--------------------
+
+*(Released Thu, 26 Mar 2015)*
+
+This was the second beta release in the 2.14 series. All important changes
+are listed in the latest 2.14 entry.
+
+Since 2.14.0 beta1
+~~~~~~~~~~~~~~~~~~
+
+The following issues have been fixed:
+
+- Issue 1018: Cluster init (and possibly other jobs) occasionally fail to start
+
+The extension of the external storage interface was not present in 2.14.0 beta1.
+
+
+Version 2.14.0 beta1
+--------------------
+
+*(Released Fri, 13 Feb 2015)*
+
+This was the first beta release of the 2.14 series. All important changes
+are listed in the latest 2.14 entry.
+
+
 Version 2.13.3
 --------------
 
diff --git a/README b/README
index e2898ad..345ef2d 100644
--- a/README
+++ b/README
@@ -1,4 +1,4 @@
-Ganeti 2.13
+Ganeti 2.14
 ===========
 
 For installation instructions, read the INSTALL and the doc/install.rst
diff --git a/cabal/CabalDependenciesMacros.hs b/cabal/CabalDependenciesMacros.hs
new file mode 100644
index 0000000..e07def7
--- /dev/null
+++ b/cabal/CabalDependenciesMacros.hs
@@ -0,0 +1,38 @@
+module Main where
+
+import Control.Applicative
+import qualified Data.Set as Set
+import qualified Distribution.Simple.Build.Macros as Macros
+import Distribution.Simple.Configure (maybeGetPersistBuildConfig)
+import Distribution.Simple.LocalBuildInfo (externalPackageDeps)
+import Distribution.PackageDescription (packageDescription)
+import Distribution.PackageDescription.Parse (readPackageDescription)
+import Distribution.Text (display)
+import Distribution.Verbosity (normal)
+import System.Environment (getArgs)
+
+
+main :: IO ()
+main = do
+  -- Get paths from program arguments.
+  (cabalPath, depsPath, macrosPath) <- do
+    args <- getArgs
+    case args of
+      [c, d, m] -> return (c, d, m)
+      _         -> error "Expected 3 arguments: cabalPath depsPath macrosPath"
+
+  -- Read the cabal file.
+  pkgDesc <- packageDescription <$> readPackageDescription normal cabalPath
+
+  -- Read the setup-config.
+  m'conf <- maybeGetPersistBuildConfig "dist"
+  case m'conf of
+    Nothing -> error "could not read dist/setup-config"
+    Just conf -> do
+
+      -- Write package dependencies.
+      let deps = map (display . fst) $ externalPackageDeps conf
+      writeFile depsPath (unwords $ map ("-package-id " ++) deps)
+
+      -- Write package MIN_VERSION_* macros.
+      writeFile macrosPath $ Macros.generate pkgDesc conf
diff --git a/cabal/cabal-from-modules.py b/cabal/cabal-from-modules.py
new file mode 100644
index 0000000..719291d
--- /dev/null
+++ b/cabal/cabal-from-modules.py
@@ -0,0 +1,8 @@
+import sys
+
+cabal_in = sys.argv[1]
+
+modules = '\n    '.join(sorted(sys.stdin.read().split()))
+template = open(cabal_in).read()
+contents = template.replace('-- AUTOGENERATED_MODULES_HERE', modules)
+sys.stdout.write(contents)
diff --git a/cabal/ganeti.template.cabal b/cabal/ganeti.template.cabal
new file mode 100644
index 0000000..3813087
--- /dev/null
+++ b/cabal/ganeti.template.cabal
@@ -0,0 +1,107 @@
+name:                ganeti
+version:             2.14
+homepage:            http://www.ganeti.org
+license:             BSD2
+license-file:        COPYING
+author:              Google Inc.
+maintainer:          ganeti-devel@googlegroups.com
+copyright:           2006-2015 Google Inc.
+category:            System
+build-type:          Simple
+extra-source-files:  README
+cabal-version:       >=1.10
+synopsis:            Cluster-based virtualization management software
+description:
+  Cluster-based virtualization management software
+  .
+  See <http://www.ganeti.org>
+
+
+flag mond
+  description: enable the ganeti monitoring daemon
+  default:     True
+
+flag metad
+  description: enable the ganeti metadata daemon
+  default:     True
+
+flag htest
+  description: enable tests
+  default:     True
+
+
+library
+  exposed-modules:
+    -- AUTOGENERATED_MODULES_HERE
+  -- other-modules:
+  other-extensions:
+      TemplateHaskell
+  build-depends:
+      base                          >= 4.5.0.0
+    , array                         >= 0.4.0.0
+    , bytestring                    >= 0.9.2.1
+    , containers                    >= 0.4.2.1
+    , deepseq                       >= 1.3.0.0
+    , directory                     >= 1.1.0.2
+    , filepath                      >= 1.3.0.0
+    , mtl                           >= 2.1.1
+    , old-time                      >= 1.1.0.0
+    , pretty                        >= 1.1.1.0
+    , process                       >= 1.1.0.1
+    , random                        >= 1.0.1.1
+    , template-haskell              >= 2.7.0.0
+    , text                          >= 0.11.1.13
+    , transformers                  >= 0.3.0.0
+    , unix                          >= 2.5.1.0
+
+    , attoparsec                    >= 0.10.1.1   && < 0.13
+    , base64-bytestring             >= 1.0.0.1    && < 1.1
+    , Crypto                        >= 4.2.4      && < 4.3
+    , curl                          >= 1.3.7      && < 1.4
+    , hinotify                      >= 0.3.2      && < 0.4
+    , hslogger                      >= 1.1.4      && < 1.3
+    , json                          >= 0.5        && < 0.9
+    , lens                          >= 3.10       && < 4.8
+    , lifted-base                   >= 0.2.0.3    && < 0.3
+    , monad-control                 >= 0.3.1.3    && < 1.1
+    , MonadCatchIO-transformers     >= 0.3.0.0    && < 0.4
+    , network                       >= 2.3.0.13   && < 2.7
+    , parallel                      >= 3.2.0.2    && < 3.3
+    , regex-pcre                    >= 0.94.2     && < 0.95
+    , temporary                     >= 1.1.2.3    && < 1.3
+    , transformers-base             >= 0.4.1      && < 0.5
+    , utf8-string                   >= 0.3.7      && < 0.4
+    , zlib                          >= 0.5.3.3    && < 0.6
+
+    -- Executables:
+    -- , happy
+    -- , hscolour
+    -- , shelltestrunner
+
+  if flag(htest)
+    build-depends:
+        HUnit                         >= 1.2.4.2    && < 1.3
+      , QuickCheck                    >= 2.4.2      && < 2.8
+      , test-framework                >= 0.6        && < 0.9
+      , test-framework-hunit          >= 0.2.7      && < 0.4
+      , test-framework-quickcheck2    >= 0.2.12.1   && < 0.4
+
+  if flag(mond)
+    build-depends:
+        PSQueue                       >= 1.1        && < 1.2
+      , snap-core                     >= 0.8.1      && < 0.10
+      , snap-server                   >= 0.8.1      && < 0.10
+
+  if flag(metad)
+    build-depends:
+        snap-core                     >= 0.8.1      && < 0.10
+      , snap-server                   >= 0.8.1      && < 0.10
+
+  hs-source-dirs:
+    src, test/hs
+  build-tools:
+    hsc2hs
+  default-language:
+    Haskell2010
+  ghc-options:
+    -Wall
diff --git a/configure.ac b/configure.ac
index 5017e95..76d2c54 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,7 +1,7 @@
 # Configure script for Ganeti
 m4_define([gnt_version_major], [2])
-m4_define([gnt_version_minor], [13])
-m4_define([gnt_version_revision], [3])
+m4_define([gnt_version_minor], [14])
+m4_define([gnt_version_revision], [2])
 m4_define([gnt_version_suffix], [])
 m4_define([gnt_version_full],
           m4_format([%d.%d.%d%s],
@@ -654,14 +654,15 @@
   AC_MSG_FAILURE([ghc-pkg not found, compilation will not be possible])
 fi
 
-# check for modules, first custom/special checks
-AC_MSG_NOTICE([checking for required haskell modules])
-HS_PARALLEL3=
-AC_GHC_PKG_CHECK([parallel-3.*], [HS_PARALLEL3=-DPARALLEL3],
-                 [AC_GHC_PKG_REQUIRE(parallel)], t)
-AC_SUBST(HS_PARALLEL3)
+# Check for cabal
+AC_ARG_VAR(CABAL, [cabal path])
+AC_PATH_PROG(CABAL, [cabal], [])
+if test -z "$CABAL"; then
+  AC_MSG_FAILURE([cabal not found, compilation will not be possible])
+fi
 
-# and now standard modules
+# check for standard modules
+AC_GHC_PKG_REQUIRE(Cabal)
 AC_GHC_PKG_REQUIRE(curl)
 AC_GHC_PKG_REQUIRE(json)
 AC_GHC_PKG_REQUIRE(network)
@@ -741,7 +742,6 @@
                                  $METAD_PKG]))
   fi
 fi
-AC_SUBST(HS_REGEX_PCRE)
 if test "$has_metad" = True; then
   AC_MSG_NOTICE([Enabling metadata usage])
 fi
diff --git a/devel/build_chroot b/devel/build_chroot
index c78fbcb..f1560c1 100755
--- a/devel/build_chroot
+++ b/devel/build_chroot
@@ -138,7 +138,7 @@
 
 APT_INSTALL="apt-get install -y --no-install-recommends"
 
-if [ DIST_RELEASE = squeeze ]
+if [ $DIST_RELEASE = squeeze ]
 then
   echo "deb http://backports.debian.org/debian-backports" \
        "$DIST_RELEASE-backports main contrib non-free" \
@@ -173,7 +173,7 @@
 function download {
   local FNAME="$1"
   local URL="$2"
-  in_chroot -- wget --output-document="$FNAME" "$URL"
+  in_chroot -- wget --no-check-certificate --output-document="$FNAME" "$URL"
   verify_sha1 "$FNAME" "$( lookup_sha1 "$URL" )"
 }
 
@@ -229,7 +229,7 @@
         libcurl4-gnutls-dev \
         libpcre3-dev \
         happy \
-        hlint hscolour pandoc \
+        hscolour pandoc \
         graphviz qemu-utils \
         python-docutils \
         python-simplejson \
@@ -290,7 +290,7 @@
         vector-0.10.9.1 \
         zlib-0.5.4.1 \
         \
-        hlint-1.8.57 \
+        'hlint>=1.9.12' \
         HUnit-1.2.5.2 \
         QuickCheck-2.6 \
         test-framework-0.8.0.3 \
@@ -323,7 +323,7 @@
       libghc-hslogger-dev libghc-crypto-dev \
       libghc-regex-pcre-dev libghc-attoparsec-dev \
       libghc-vector-dev libghc-temporary-dev \
-      libghc-snap-server-dev libpcre3 libpcre3-dev hscolour hlint pandoc \
+      libghc-snap-server-dev libpcre3 libpcre3-dev happy hscolour pandoc \
       libghc-zlib-dev libghc-psqueue-dev \
       cabal-install \
       python-setuptools python-sphinx python-epydoc graphviz python-pyparsing \
@@ -359,7 +359,8 @@
        cabal install --global \
         'base64-bytestring>=1' \
         lens-3.10.2 \
-        'lifted-base>=0.1.2'
+        'lifted-base>=0.1.2' \
+        'hlint>=1.9.12'
 ;;
 
   testing)
@@ -373,14 +374,22 @@
       libghc-hslogger-dev libghc-crypto-dev \
       libghc-regex-pcre-dev libghc-attoparsec-dev \
       libghc-vector-dev libghc-temporary-dev \
-      libghc-snap-server-dev libpcre3 libpcre3-dev hscolour hlint pandoc \
+      libghc-snap-server-dev libpcre3 libpcre3-dev happy hscolour pandoc \
       libghc-zlib-dev libghc-psqueue-dev \
       libghc-base64-bytestring-dev libghc-lens-dev libghc-lifted-base-dev \
+      libghc-cabal-dev \
       cabal-install \
       python-setuptools python-sphinx python-epydoc graphviz python-pyparsing \
       python-simplejson python-pycurl python-pyinotify python-paramiko \
       python-bitarray python-ipaddr python-yaml qemu-utils python-coverage pep8 \
       shelltestrunner python-dev pylint openssh-client vim git git-email
+
+    in_chroot -- \
+      cabal update
+
+    in_chroot -- \
+      cabal install --global \
+       'hlint>=1.9.12'
 ;;
 
   precise)
@@ -396,35 +405,43 @@
     echo "Installing packages"
     in_chroot -- \
       $APT_INSTALL \
-      autoconf automake ghc ghc-haddock libghc-network-dev \
-      libghc-test-framework{,-hunit,-quickcheck2}-dev \
-      libghc-json-dev libghc-curl-dev libghc-hinotify-dev \
+      autoconf automake ghc ghc-haddock \
+      libghc-curl-dev libghc-hinotify-dev \
       libghc-parallel-dev libghc-utf8-string-dev \
-      libghc-hslogger-dev libghc-crypto-dev \
-      libghc-regex-pcre-dev libghc-attoparsec-dev \
+      libghc-crypto-dev \
+      libghc-attoparsec-dev \
       libghc-vector-dev libghc-temporary-dev libghc-psqueue-dev \
-      libghc-snap-server-dev libpcre3 libpcre3-dev hscolour hlint pandoc \
+      libghc-cabal-dev \
+      cabal-install \
+      libpcre3 libpcre3-dev happy hscolour pandoc \
       python-setuptools python-sphinx python-epydoc graphviz python-pyparsing \
       python-simplejson python-pyinotify python-pycurl python-paramiko \
       python-bitarray python-ipaddr python-yaml qemu-utils python-coverage pep8 \
       python-dev pylint openssh-client vim git git-email \
       build-essential
 
-    echo "Installing cabal packages"
-    in_chroot -- \
-      $APT_INSTALL cabal-install
-
     in_chroot -- \
       cabal update
 
+     # Precise has network-2.4.0.0, which breaks, see
+     #   https://github.com/haskell/network/issues/60
      in_chroot -- \
        cabal install --global \
         'base64-bytestring>=1' \
+        hslogger-1.2.3 \
+        'hlint>=1.9.12' \
+        json-0.7 \
         lens-3.10.2 \
-        'lifted-base>=0.1.2'
-
-    in_chroot -- \
-      cabal install --global shelltestrunner
+        'lifted-base>=0.1.2' \
+        'network>=2.4.0.1' \
+        'regex-pcre>=0.94.4' \
+        parsec-3.1.3 \
+        shelltestrunner \
+        'snap-server>=0.8.1' \
+        test-framework-0.8.0.3 \
+        test-framework-hunit-0.3.0.1 \
+        test-framework-quickcheck2-0.3.0.2 \
+        'transformers>=0.3.0.0'
     ;;
 
   *)
@@ -437,8 +454,10 @@
       libghc-hslogger-dev libghc-crypto-dev \
       libghc-regex-pcre-dev libghc-attoparsec-dev \
       libghc-vector-dev libghc-temporary-dev libghc-psqueue-dev \
-      libghc-snap-server-dev libpcre3 libpcre3-dev hscolour hlint pandoc \
-      libghc-lifted-base-dev \
+      libghc-snap-server-dev libpcre3 libpcre3-dev happy hscolour pandoc \
+      libghc-lens-dev libghc-lifted-base-dev \
+      libghc-cabal-dev \
+      cabal-install \
       libghc-base64-bytestring-dev \
       python-setuptools python-sphinx python-epydoc graphviz python-pyparsing \
       python-simplejson python-pyinotify python-pycurl python-paramiko \
@@ -446,6 +465,12 @@
       shelltestrunner python-dev pylint openssh-client vim git git-email \
       build-essential
 
+    in_chroot -- \
+      cabal update
+
+     in_chroot -- \
+       cabal install --global \
+        'hlint>=1.9.12'
 ;;
 esac
 
diff --git a/doc/design-2.14.rst b/doc/design-2.14.rst
new file mode 100644
index 0000000..074e65f
--- /dev/null
+++ b/doc/design-2.14.rst
@@ -0,0 +1,9 @@
+==================
+Ganeti 2.14 design
+==================
+
+The following designs have been partially implemented in Ganeti 2.14.
+
+- :doc:`design-location`
+- :doc:`design-reservations`
+- :doc:`design-configlock`
diff --git a/doc/design-configlock.rst b/doc/design-configlock.rst
index e19f0b9..9e650c7 100644
--- a/doc/design-configlock.rst
+++ b/doc/design-configlock.rst
@@ -73,25 +73,23 @@
 In a second step, more specialised read functions will be added to ``WConfD``.
 This will reduce the traffic for reads.
 
+Cached Reads
+------------
+
+As jobs synchronize with each other by means of regular locks, the parts
+of the configuration relevant for a job can only change while a job waits
+for new locks. So, if a job has a copy of the configuration and not asked
+for locks afterwards, all read-only access can be done from that copy. While
+this will not affect the ``ConfigLock``, it saves traffic.
 
 Set-and-release action
 ----------------------
 
 As a typical pattern is to change the configuration and afterwards release
-the ``ConfigLock``. To avoid unncecessary delay in this operation (the next
-modification of the configuration can already happen while the last change
-is written out), WConfD will offer a combined command that will
-
-- set the configuration to the specified value,
-
-- release the config lock,
-
-- and only then wait for the configuration write to finish; it will not
-  wait for confirmation of the lock-release write.
-
-If jobs use this combined command instead of the sequential set followed
-by release, new configuration changes can come in during writeout of the
-current change; in particular, a writeout can contain more than one change.
+the ``ConfigLock``. To avoid unnecessary RPC call overhead, WConfD will offer
+a combined call. To make that call retryable, it will do nothing if the the
+``ConfigLock`` is not held by the caller; in the return value, it will indicate
+if the config lock was held when the call was made.
 
 Short-lived ``ConfigLock``
 --------------------------
@@ -116,3 +114,43 @@
 ``WConfD`` gets restarted after the lock acquisition, if that happend
 in the name of the job, it would own a lock without knowing about it,
 and hence that lock would never get released.
+
+
+Approaches considered, but not working
+======================================
+
+Set-and-release action with asynchronous writes
+-----------------------------------------------
+
+Approach
+~~~~~~~~
+
+As a typical pattern is to change the configuration and afterwards release
+the ``ConfigLock``. To avoid unnecessary delay in this operation (the next
+modification of the configuration can already happen while the last change
+is written out), WConfD will offer a combined command that will
+
+- set the configuration to the specified value,
+
+- release the config lock,
+
+- and only then wait for the configuration write to finish; it will not
+  wait for confirmation of the lock-release write.
+
+If jobs use this combined command instead of the sequential set followed
+by release, new configuration changes can come in during writeout of the
+current change; in particular, a writeout can contain more than one change.
+
+Problem
+~~~~~~~
+
+This approach works fine, as long as always either ``WConfD`` can do an ordered
+shutdown or the calling process dies as well. If however, we allow random kill
+signals to be sent to individual daemons (e.g., by an out-of-memory killer),
+the following race occurs. A process can ask for a combined write-and-unlock
+operation; while the configuration is still written out, the write out of the
+updated lock status already finishes. Now, if ``WConfD`` forcefully gets killed
+in that very moment, a restarted ``WConfD`` will read the old configuration but
+the new lock status. This will make the calling process believe that its call,
+while it didn't get an answer, succeeded nevertheless, thus resulting in a
+wrong configuration state.
diff --git a/doc/design-disks.rst b/doc/design-disks.rst
index 18fb75c..74ad409 100644
--- a/doc/design-disks.rst
+++ b/doc/design-disks.rst
@@ -149,10 +149,18 @@
 will be performed using the functions ``AttachInstanceDisk`` and
 ``DetachInstanceDisk``.
 
-Since Ganeti doesn't allow for a `Disk` object to not be attached anywhere (for
-now) we will create two wrapper functions (namely ``AddInstanceDisk`` and
-``RemoveInstanceDisk``) which will add and attach a disk at the same time
-(respectively detach and remove a disk).
+More specifically, the `add` operation will add and attach a disk at the same
+time, using a wrapper that calls the ``AddDisk`` and ``AttachInstanceDisk``
+functions. On the same vein, the `remove` operation will detach and remove a
+disk using a wrapper that calls the ``DetachInstanceDisk`` and
+``RemoveInstanceDisk``. The `attach` and `detach` operations are simpler, in
+the sense that they only call the ``AttachInstanceDisk`` and
+``DetachInstanceDisk`` functions respectively.
+
+It is important to note that the `detach` operation introduces the notion of
+disks that are not attached to any instance. For this reason, the configuration
+checks for detached disks will be removed, as the detached disks can be handled
+by the code.
 
 In addition since Ganeti doesn't allow for a `Disk` object to be attached to
 more than one `Instance` at once, when attaching a disk to an instance we have
@@ -169,6 +177,32 @@
 replace the ``disks`` slot with ``disks_info``.
 
 
+Supporting the old interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The current interface is designed with a uniform disk type in mind and
+this interface should still be supported to not break tools and
+workflows downstream.
+
+The behaviour is fully compatible for instances with constantly
+attached, uniform disks.
+
+Whenever an operation operates on an instance, the operation will only
+consider the disks attached. If the operation is specific to a disk
+type, it will throw an error if any disks of a type not supported are
+attached.
+
+When setting the disk template of an instance, we convert all currently
+attached disks to that template. This means that all disk types
+currently attached must be convertible to the new template.
+
+Since the disk template as a configuration value is going away, it needs
+to be replaced for queries. If the instance has no disks, the
+disk_template will be 'diskless', if it has disks of a single type, its
+disk_template will be that type, and if it has disks of multiple types,
+the new disk template 'mixed' will be returned.
+
+
 Eliminating the disk template from the instance
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -196,11 +230,40 @@
    required. This is incompatible as well and will need to be listed in
    the NEWS file.
 
+Attach/Detach disks from cli
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The `attach`/`detach` options should be available through the command
+``gnt-instance modify``. Like the `add`/`remove` options, the `attach`/`detach`
+options can be invoked using the legacy syntax or the new syntax that supports
+indexes. For the attach option, we can refer to the disk using either its
+`name` or `uuid`. The detach option on the other hand has the same syntax as
+the remove option, and we can refer to a disk by its `name`, `uuid` or `index`
+in the instance.
+
+The attach/detach syntax can be seen below:
+
+* **Legacy syntax**
+
+  .. code-block:: bash
+
+    gnt-instance modify --disk attach,name=*NAME* *INSTANCE*
+    gnt-instance modify --disk attach,uuid=*UUID* *INSTANCE*
+    gnt-instance modify --disk detach *INSTANCE*
+
+* **New syntax**
+
+  .. code-block:: bash
+
+    gnt-instance modify --disk *N*:attach,name=*NAME* *INSTANCE*
+    gnt-instance modify --disk *N*:attach,uuid=*UUID* *INSTANCE*
+    gnt-instance modify --disk *N*:detach *INSTANCE*
+    gnt-instance modify --disk *NAME*:detach *INSTANCE*
+    gnt-instance modify --disk *UUID*:detach *INSTANCE*
+
 
 .. TODO: Locks for Disk objects
 
-.. TODO: Attach/Detach disks
-
 .. TODO: LUs for disks
 
 
diff --git a/doc/design-draft.rst b/doc/design-draft.rst
index bc6735c..7aab851 100644
--- a/doc/design-draft.rst
+++ b/doc/design-draft.rst
@@ -2,7 +2,7 @@
 Design document drafts
 ======================
 
-.. Last updated for Ganeti 2.13
+.. Last updated for Ganeti 2.14
 
 .. toctree::
    :maxdepth: 2
diff --git a/doc/design-file-based-disks-ownership.rst b/doc/design-file-based-disks-ownership.rst
new file mode 100644
index 0000000..fc89168
--- /dev/null
+++ b/doc/design-file-based-disks-ownership.rst
@@ -0,0 +1,67 @@
+=================================
+Ganeti file-based disks ownership
+=================================
+
+.. contents:: :depth: 2
+
+This design document explains the issue that emerges from the usage of the
+`detach` operation to file-based disks and provides a simple solution to it.
+Note that this design document applies only to disks of template `file` and
+`sharedfile`, but not `gluster`. However, for brevity reasons these templates
+will go under the umbrella term `file-based`.
+
+Current state and shortcomings
+==============================
+
+When creating a file-based disk, Ganeti stores it inside a specific directory,
+called `file_storage_dir`. Inside this directory, there is a folder for each
+file-based instance and inside each folder are the files for the instance's
+disks (e.g. ``<file_storage_dir>/<instance_name>/<disk_name>``). This way of
+storing disks seems simple enough, but the
+`detach` operation does not work well with it. The reason is that if a disk is
+detached from an instance and attached to another one, the file will remain to
+the folder of the original instance.
+
+This means that if we try to destroy an instance with detached disks, Ganeti
+will correctly complain that the instance folder still has disk data. In more
+high-level terms, we need to find a way to resolve the issue of disk ownership
+at the filesystem level for file-based instances.
+
+Proposed changes
+================
+
+The change we propose is simple. Once a disk is detached from an instance, it
+will be moved out of the instance's folder. The new location will be the
+`file_storage_dir`, i.e. the disk will reside on the same level as the instance
+folders. In order to maintain a consistent configuration, the logical_id of the
+disk will be updated to point to the new path.
+
+Similarly, on the `attach` operation, the file name and logical id will change
+and the disk will be moved under the new instance's directory.
+
+Implementation details
+======================
+
+Detach operation
+~~~~~~~~~~~~~~~~
+
+Before detaching a disk from an instance, we do the following:
+
+1. Transform the current path to the new one.
+
+   <file_storage_dir>/<instance_name>/<disk_name> --> <file_storage_dir>/<disk_name>
+
+2. Use the rpc call ``call_blockdev_rename`` to move the disk to the new path.
+3. Store the new ``logical_id`` to the configuration.
+
+Attach operation
+~~~~~~~~~~~~~~~~
+
+Before attaching a disk to an instance, we do the following:
+
+1. Create the new path for the file disk. In order to construct it properly,
+   use the ``GenerateDiskTemplate`` function to create a dummy disk template
+   and get its ``logical_id``. The new ``logical_id`` contains the new path for
+   the file disk.
+2. Use the rpc call ``call_blockdev_rename`` to move the disk to the new path.
+3. Store the new ``logical_id`` to the configuration.
diff --git a/doc/design-location.rst b/doc/design-location.rst
index 5d4989a..9d0f7aa 100644
--- a/doc/design-location.rst
+++ b/doc/design-location.rst
@@ -54,13 +54,16 @@
 
 The weights for these components might have to be tuned as experience with these
 setups grows, but as a starting point, both components will have a weight of
-0.5 each. In this way, any common-failure violations are less important than
-any hard constraints missed (instances on offline nodes, N+1 redundancy,
-exclusion tags) so that the hard constraints will be restored first when
-balancing a cluster. Nevertheless, with weight 0.5 the new common-failure
-components will still be significantly more important than all the balancedness
-components (cpu, disk, memory), as the latter are standard deviations of
-fractions.
+1.0 each. In this way, any common-failure violations are less important than
+any hard constraints missed (like instances on offline nodes) so that
+the hard constraints will be restored first when balancing a cluster.
+Nevertheless, with weight 1.0 the new common-failure components will
+still be significantly more important than all the balancedness components
+(cpu, disk, memory), as the latter are standard deviations of fractions.
+It will also dominate the disk load component which, which, when only taking
+static information into account, essentially amounts to counting disks. In
+this way, Ganeti will be willing to sacrifice equal numbers of disks on every
+node in order to fulfill location requirements.
 
 Appart from changing the balancedness metric, common-failure tags will
 not have any other effect. In particular, as opposed to exclusion tags,
diff --git a/doc/design-shared-storage.rst b/doc/design-shared-storage.rst
index 793c522..0390264 100644
--- a/doc/design-shared-storage.rst
+++ b/doc/design-shared-storage.rst
@@ -185,7 +185,9 @@
 - Detach a disk from a given node
 - SetInfo to a disk (add metadata)
 - Verify its supported parameters
-- Snapshot a disk (currently used during gnt-backup export)
+- Snapshot a disk (optional)
+- Open a disk (optional)
+- Close a disk (optional)
 
 The proposed ExtStorage interface borrows heavily from the OS
 interface and follows a one-script-per-function approach. An ExtStorage
@@ -199,16 +201,19 @@
 - ``setinfo``
 - ``verify``
 - ``snapshot`` (optional)
+- ``open`` (optional)
+- ``close`` (optional)
 
 All scripts will be called with no arguments and get their input via
 environment variables. A common set of variables will be exported for
-all commands, and some of them might have extra ones.
+all commands, and some commands might have extra variables.
 
 ``VOL_NAME``
   The name of the volume. This is unique for Ganeti and it
   uses it to refer to a specific volume inside the external storage.
 ``VOL_SIZE``
   The volume's size in mebibytes.
+  Available only to the `create` and `grow` scripts.
 ``VOL_NEW_SIZE``
   Available only to the `grow` script. It declares the
   new size of the volume after grow (in mebibytes).
@@ -221,11 +226,14 @@
 ``VOL_CNAME``
   The human readable name of the disk (if any).
 ``VOL_SNAPSHOT_NAME``
-  The name of the volume's snapshot to be taken.
+  The name of the volume's snapshot.
   Available only to the `snapshot` script.
 ``VOL_SNAPSHOT_SIZE``
-  The size of the volume's snapshot to be taken.
+  The size of the volume's snapshot.
   Available only to the `snapshot` script.
+``VOL_OPEN_EXCLUSIVE``
+  Whether the volume will be accessed exclusively or not.
+  Available only to the `open` script.
 
 All scripts except `attach` should return 0 on success and non-zero on
 error, accompanied by an appropriate error message on stderr. The
@@ -233,9 +241,14 @@
 the block device's full path, after it has been successfully attached to
 the host node. On error it should return non-zero.
 
-To keep backwards compatibility we let the ``snapshot`` script be
-optional. If present then the provider will support instance backup
-export as well.
+The ``snapshot``, ``open`` and ``close`` scripts are introduced after
+the first implementation of the ExtStorage Interface. To keep backwards
+compatibility with the first implementation, we make these scripts
+optional.
+
+The ``snapshot`` script, if present, will be used for instance backup
+export. The ``open`` script makes the device ready for I/O. The ``close``
+script disables the I/O on the device.
 
 Implementation
 --------------
@@ -243,7 +256,8 @@
 To support the ExtStorage interface, we will introduce a new disk
 template called `ext`. This template will implement the existing Ganeti
 disk interface in `lib/bdev.py` (create, remove, attach, assemble,
-shutdown, grow, setinfo), and will simultaneously pass control to the
+shutdown, grow, setinfo, open, close),
+and will simultaneously pass control to the
 external scripts to actually handle the above actions. The `ext` disk
 template will act as a translation layer between the current Ganeti disk
 interface and the ExtStorage providers.
@@ -315,7 +329,9 @@
 be case insensitive. If the 'attach' script doesn't return any extra
 lines, we assume that the ExtStorage provider doesn't support userspace
 access (this way we maintain backward compatibility with the existing
-'attach' scripts).
+'attach' scripts). In case the provider supports *only* userspace
+access and thus a local block device is not available, then the first
+line should be an empty line.
 
 The 'GetUserspaceAccessUri' method of the 'ExtStorageDevice' class will
 parse the output of the 'attach' script and if the provider supports
diff --git a/doc/hooks.rst b/doc/hooks.rst
index 45d375a..f7a0a25 100644
--- a/doc/hooks.rst
+++ b/doc/hooks.rst
@@ -1,7 +1,7 @@
 Ganeti customisation using hooks
 ================================
 
-Documents Ganeti version 2.13
+Documents Ganeti version 2.14
 
 .. contents::
 
diff --git a/doc/iallocator.rst b/doc/iallocator.rst
index d5896ea..c5c360f 100644
--- a/doc/iallocator.rst
+++ b/doc/iallocator.rst
@@ -1,7 +1,7 @@
 Ganeti automatic instance allocation
 ====================================
 
-Documents Ganeti version 2.13
+Documents Ganeti version 2.14
 
 .. contents::
 
diff --git a/doc/index.rst b/doc/index.rst
index a95025d..28b37a0 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -79,6 +79,7 @@
    design-2.11.rst
    design-2.12.rst
    design-2.13.rst
+   design-2.14.rst
 
 Draft designs
 -------------
@@ -104,6 +105,7 @@
    design-disk-conversion.rst
    design-disks.rst
    design-file-based-storage.rst
+   design-file-based-disks-ownership.rst
    design-hroller.rst
    design-hsqueeze.rst
    design-hotplug.rst
diff --git a/doc/security.rst b/doc/security.rst
index 6973822..b19a9bc 100644
--- a/doc/security.rst
+++ b/doc/security.rst
@@ -1,7 +1,7 @@
 Security in Ganeti
 ==================
 
-Documents Ganeti version 2.13
+Documents Ganeti version 2.14
 
 Ganeti was developed to run on internal, trusted systems. As such, the
 security model is all-or-nothing.
diff --git a/doc/virtual-cluster.rst b/doc/virtual-cluster.rst
index 9fd37d9..7fdfc45 100644
--- a/doc/virtual-cluster.rst
+++ b/doc/virtual-cluster.rst
@@ -1,7 +1,7 @@
 Virtual cluster support
 =======================
 
-Documents Ganeti version 2.13
+Documents Ganeti version 2.14
 
 .. contents::
 
diff --git a/lib/backend.py b/lib/backend.py
index a10ddb1..92392ef 100644
--- a/lib/backend.py
+++ b/lib/backend.py
@@ -2547,6 +2547,10 @@
   @return: absolute path to the disk's symlink
 
   """
+  # In case we have only a userspace access URI, device_path is None
+  if not device_path:
+    return None
+
   link_name = _GetBlockDevSymlinkPath(instance_name, idx)
   try:
     os.symlink(device_path, link_name)
@@ -2840,21 +2844,10 @@
   @param target: target host (usually ip), on this node
 
   """
-  # TODO: why is this required only for DTS_EXT_MIRROR?
-  if instance.disk_template in constants.DTS_EXT_MIRROR:
-    # Create the symlinks, as the disks are not active
-    # in any way
-    try:
-      _GatherAndLinkBlockDevs(instance)
-    except errors.BlockDeviceError, err:
-      _Fail("Block device error: %s", err, exc=True)
-
   hyper = hypervisor.GetHypervisor(instance.hypervisor)
   try:
     hyper.AcceptInstance(instance, info, target)
   except errors.HypervisorError, err:
-    if instance.disk_template in constants.DTS_EXT_MIRROR:
-      _RemoveBlockDevLinks(instance.name, instance.disks_info)
     _Fail("Failed to accept instance: %s", err, exc=True)
 
 
@@ -4064,9 +4057,13 @@
   # Disks
   for idx, disk in enumerate(instance.disks_info):
     real_disk = _OpenRealBD(disk)
-    result["DISK_%d_PATH" % idx] = real_disk.dev_path
+    uri = _CalculateDeviceURI(instance, disk, real_disk)
     result["DISK_%d_ACCESS" % idx] = disk.mode
     result["DISK_%d_UUID" % idx] = disk.uuid
+    if real_disk.dev_path:
+      result["DISK_%d_PATH" % idx] = real_disk.dev_path
+    if uri:
+      result["DISK_%d_URI" % idx] = uri
     if disk.name:
       result["DISK_%d_NAME" % idx] = disk.name
     if constants.HV_DISK_TYPE in instance.hvparams:
@@ -4207,15 +4204,14 @@
     else:
       _Fail("Cannot find block device %s", disk)
 
-  if disk.dev_type == constants.DT_DRBD8:
-    if not disk.children:
-      _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
-            disk.unique_id)
-    return BlockdevSnapshot(disk.children[0], snap_name, snap_size)
-  elif disk.dev_type == constants.DT_PLAIN:
-    return _DiskSnapshot(disk, snap_name, snap_size)
-  elif disk.dev_type == constants.DT_EXT:
-    return _DiskSnapshot(disk, snap_name, snap_size)
+  if disk.SupportsSnapshots():
+    if disk.dev_type == constants.DT_DRBD8:
+      if not disk.children:
+        _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
+              disk.unique_id)
+      return BlockdevSnapshot(disk.children[0], snap_name, snap_size)
+    else:
+      return _DiskSnapshot(disk, snap_name, snap_size)
   else:
     _Fail("Cannot snapshot block device '%s' of type '%s'",
           disk.logical_id, disk.dev_type)
@@ -4262,6 +4258,7 @@
   """
   destdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name + ".new")
   finaldestdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name)
+  disk_template = utils.GetDiskTemplate(snap_disks)
 
   config = objects.SerializableConfigParser()
 
@@ -4283,7 +4280,7 @@
              instance.beparams[constants.BE_MAXMEM])
   config.set(constants.INISECT_INS, "vcpus", "%d" %
              instance.beparams[constants.BE_VCPUS])
-  config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
+  config.set(constants.INISECT_INS, "disk_template", disk_template)
   config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
   config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags()))
 
@@ -4310,7 +4307,7 @@
       config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
                  ("%s" % disk.iv_name))
       config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
-                 ("%s" % disk.logical_id[1]))
+                 ("%s" % disk.uuid))
       config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
                  ("%d" % disk.size))
       config.set(constants.INISECT_INS, "disk%d_name" % disk_count,
@@ -4625,12 +4622,35 @@
     except errors.BlockDeviceError, err:
       msg.append(str(err))
   if msg:
-    _Fail("Can't make devices secondary: %s", ",".join(msg))
+    _Fail("Can't close devices: %s", ",".join(msg))
   else:
     if instance_name:
       _RemoveBlockDevLinks(instance_name, disks)
 
 
+def BlockdevOpen(instance_name, disks, exclusive):
+  """Opens the given block devices.
+
+  """
+  bdevs = []
+  for cf in disks:
+    rd = _RecursiveFindBD(cf)
+    if rd is None:
+      _Fail("Can't find device %s", cf)
+    bdevs.append(rd)
+
+  msg = []
+  for idx, rd in enumerate(bdevs):
+    try:
+      rd.Open(exclusive=exclusive)
+      _SymlinkBlockDev(instance_name, rd.dev_path, idx)
+    except errors.BlockDeviceError, err:
+      msg.append(str(err))
+
+  if msg:
+    _Fail("Can't open devices: %s", ",".join(msg))
+
+
 def ValidateHVParams(hvname, hvparams):
   """Validates the given hypervisor parameters.
 
@@ -4935,22 +4955,13 @@
 
   elif ieio == constants.IEIO_RAW_DISK:
     (disk, ) = ieargs
-
     real_disk = _OpenRealBD(disk)
 
     if mode == constants.IEM_IMPORT:
-      # we use nocreat to fail if the device is not already there or we pass a
-      # wrong path; we use notrunc to no attempt truncate on an LV device
-      suffix = utils.BuildShellCmd("| dd of=%s conv=nocreat,notrunc bs=%s",
-                                   real_disk.dev_path,
-                                   str(constants.DD_BLOCK_SIZE)) # 1 MB
+      suffix = "| %s" % utils.ShellQuoteArgs(real_disk.Import())
 
     elif mode == constants.IEM_EXPORT:
-      # the block size on the read dd is 1MiB to match our units
-      prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
-                                   real_disk.dev_path,
-                                   str(constants.DD_BLOCK_SIZE), # 1 MB
-                                   str(disk.size))
+      prefix = "%s |" % utils.ShellQuoteArgs(real_disk.Export())
       exp_size = disk.size
 
   elif ieio == constants.IEIO_SCRIPT:
@@ -4962,13 +4973,28 @@
     env = OSEnvironment(instance, inst_os)
 
     if mode == constants.IEM_IMPORT:
-      env["IMPORT_DEVICE"] = env["DISK_%d_PATH" % disk_index]
+      disk_path_var = "DISK_%d_PATH" % disk_index
+      if disk_path_var in env:
+        env["IMPORT_DEVICE"] = env[disk_path_var]
+        env["IMPORT_DISK_PATH"] = env[disk_path_var]
+
+      disk_uri_var = "DISK_%d_URI" % disk_index
+      if disk_uri_var in env:
+        env["IMPORT_DISK_URI"] = env[disk_uri_var]
+
       env["IMPORT_INDEX"] = str(disk_index)
       script = inst_os.import_script
 
     elif mode == constants.IEM_EXPORT:
-      real_disk = _OpenRealBD(disk)
-      env["EXPORT_DEVICE"] = real_disk.dev_path
+      disk_path_var = "DISK_%d_PATH" % disk_index
+      if disk_path_var in env:
+        env["EXPORT_DEVICE"] = env[disk_path_var]
+        env["EXPORT_DISK_PATH"] = env[disk_path_var]
+
+      disk_uri_var = "DISK_%d_URI" % disk_index
+      if disk_uri_var in env:
+        env["EXPORT_DISK_URI"] = env[disk_uri_var]
+
       env["EXPORT_INDEX"] = str(disk_index)
       script = inst_os.export_script
 
@@ -5019,6 +5045,11 @@
   @param ieioargs: Input/output arguments
 
   """
+
+  # Use Import/Export over socat.
+  #
+  #   Export() gives a command that produces a flat stream.
+  #   Import() gives a command that reads a flat stream to a disk template.
   if mode == constants.IEM_IMPORT:
     prefix = "import"
 
@@ -5230,18 +5261,12 @@
             err, exc=True)
 
 
-def DrbdAttachNet(disks, instance_name, multimaster):
+def DrbdAttachNet(disks, multimaster):
   """Attaches the network on a list of drbd devices.
 
   """
   bdevs = _FindDisks(disks)
 
-  if multimaster:
-    for idx, rd in enumerate(bdevs):
-      try:
-        _SymlinkBlockDev(instance_name, rd.dev_path, idx)
-      except EnvironmentError, err:
-        _Fail("Can't create symlink: %s", err)
   # reconnect disks, switch to new master configuration and if
   # needed primary mode
   for rd in bdevs:
@@ -5295,14 +5320,6 @@
   except utils.RetryTimeout:
     _Fail("Timeout in disk reconnecting")
 
-  if multimaster:
-    # change to primary mode
-    for rd in bdevs:
-      try:
-        rd.Open()
-      except errors.BlockDeviceError, err:
-        _Fail("Can't change to primary mode: %s", err)
-
 
 def DrbdWaitSync(disks):
   """Wait until DRBDs have synchronized.
diff --git a/lib/bootstrap.py b/lib/bootstrap.py
index 66fddb9..7b6fbfe 100644
--- a/lib/bootstrap.py
+++ b/lib/bootstrap.py
@@ -523,7 +523,7 @@
       for entry in os.listdir(ddir):
         if not os.path.isdir(os.path.join(ddir, entry)):
           raise errors.OpPrereqError(
-            "%s contains non-directory enries like %s. Remove left-overs of an"
+            "%s contains non-directory entries like %s. Remove left-overs of an"
             " old cluster before initialising a new one" % (ddir, entry),
             errors.ECODE_STATE)
 
diff --git a/lib/cli.py b/lib/cli.py
index 9c29764..2001ed9 100644
--- a/lib/cli.py
+++ b/lib/cli.py
@@ -715,6 +715,10 @@
   @type report_cbs: Instance of L{JobPollReportCbBase}
   @param report_cbs: Reporting callbacks
 
+  @return: the opresult of the job
+  @raise errors.JobLost: If job can't be found
+  @raise errors.OpExecError: If job didn't succeed
+
   """
   prev_job_info = None
   prev_logmsg_serial = None
@@ -1288,6 +1292,13 @@
 
   """
   instance = args[0]
+  forthcoming = opts.ensure_value("forthcoming", False)
+  commit = opts.ensure_value("commit", False)
+
+  if forthcoming and commit:
+    raise errors.OpPrereqError("Creating an instance only forthcoming and"
+                               " commiting it are mutally exclusive",
+                               errors.ECODE_INVAL)
 
   (pnode, snode) = SplitNodeOption(opts.node)
 
@@ -1393,6 +1404,9 @@
     else:
       instance_communication = opts.instance_communication
   elif mode == constants.INSTANCE_IMPORT:
+    if forthcoming:
+      raise errors.OpPrereqError("forthcoming instances can only be created,"
+                                 " not imported")
     start = False
     os_type = None
     force_variant = False
@@ -1406,6 +1420,8 @@
     raise errors.ProgrammerError("Invalid creation mode %s" % mode)
 
   op = opcodes.OpInstanceCreate(
+    forthcoming=forthcoming,
+    commit=commit,
     instance_name=instance,
     disks=disks,
     disk_template=opts.disk_template,
diff --git a/lib/cli_opts.py b/lib/cli_opts.py
index 79de008..ae58ede 100644
--- a/lib/cli_opts.py
+++ b/lib/cli_opts.py
@@ -60,6 +60,7 @@
   "CLEANUP_OPT",
   "cli_option",
   "CLUSTER_DOMAIN_SECRET_OPT",
+  "COMMIT_OPT",
   "COMMON_CREATE_OPTS",
   "COMMON_OPTS",
   "COMPRESS_OPT",
@@ -94,6 +95,7 @@
   "FORCE_FILTER_OPT",
   "FORCE_OPT",
   "FORCE_VARIANT_OPT",
+  "FORTHCOMING_OPT",
   "GATEWAY6_OPT",
   "GATEWAY_OPT",
   "GLOBAL_FILEDIR_OPT",
@@ -118,6 +120,7 @@
   "IGNORE_OFFLINE_OPT",
   "IGNORE_REMOVE_FAILURES_OPT",
   "IGNORE_SECONDARIES_OPT",
+  "IGNORE_SOFT_ERRORS_OPT",
   "IGNORE_SIZE_OPT",
   "INCLUDEDEFAULTS_OPT",
   "INSTALL_IMAGE_OPT",
@@ -131,6 +134,7 @@
   "IPOLICY_STD_SPECS_OPT",
   "IPOLICY_STD_SPECS_STR",
   "IPOLICY_VCPU_RATIO",
+  "LONG_SLEEP_OPT",
   "MAC_PREFIX_OPT",
   "MAINTAIN_NODE_HEALTH_OPT",
   "MASTER_NETDEV_OPT",
@@ -583,6 +587,12 @@
                                   help=("Ignore offline nodes and do as much"
                                         " as possible"))
 
+IGNORE_SOFT_ERRORS_OPT = cli_option("--ignore-soft-errors",
+                                    dest="ignore_soft_errors",
+                                    action="store_true", default=False,
+                                    help=("Tell htools to ignore any soft"
+                                          " errors like N+1 violations"))
+
 TAG_ADD_OPT = cli_option("--tags", dest="tags",
                          default=None, help="Comma-separated list of instance"
                                             " tags")
@@ -895,6 +905,16 @@
                          action="store_false",
                          help="Don't start the instance after creation")
 
+FORTHCOMING_OPT = cli_option("--forthcoming", dest="forthcoming",
+                             action="store_true", default=False,
+                             help="Only reserve resources, but do not"
+                                  " create the instance yet")
+
+COMMIT_OPT = cli_option("--commit", dest="commit",
+                        action="store_true", default=False,
+                        help="The instance is already reserved and should"
+                             " be committed now")
+
 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
                          action="store_true", default=False,
                          help="Show command instead of executing it")
@@ -1570,6 +1590,9 @@
     help="Verify that Ganeti did not clutter"
     " up the 'authorized_keys' file", action="store_true")
 
+LONG_SLEEP_OPT = cli_option(
+    "--long-sleep", default=False, dest="long_sleep",
+    help="Allow long shutdowns when backing up instances", action="store_true")
 
 #: Options provided by all commands
 COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
diff --git a/lib/client/gnt_backup.py b/lib/client/gnt_backup.py
index 578edf9..89b0e93 100644
--- a/lib/client/gnt_backup.py
+++ b/lib/client/gnt_backup.py
@@ -109,7 +109,8 @@
     ignore_remove_failures=ignore_remove_failures,
     zero_free_space=opts.zero_free_space,
     zeroing_timeout_fixed=opts.zeroing_timeout_fixed,
-    zeroing_timeout_per_mib=opts.zeroing_timeout_per_mib
+    zeroing_timeout_per_mib=opts.zeroing_timeout_per_mib,
+    long_sleep=opts.long_sleep
   )
 
   SubmitOrSend(op, opts)
@@ -169,7 +170,7 @@
     [FORCE_OPT, SINGLE_NODE_OPT, TRANSPORT_COMPRESSION_OPT, NOSHUTDOWN_OPT,
      SHUTDOWN_TIMEOUT_OPT, REMOVE_INSTANCE_OPT, IGNORE_REMOVE_FAILURES_OPT,
      DRY_RUN_OPT, PRIORITY_OPT, ZERO_FREE_SPACE_OPT, ZEROING_TIMEOUT_FIXED_OPT,
-     ZEROING_TIMEOUT_PER_MIB_OPT] + SUBMIT_OPTS,
+     ZEROING_TIMEOUT_PER_MIB_OPT, LONG_SLEEP_OPT] + SUBMIT_OPTS,
     "-n <target_node> [opts...] <name>",
     "Exports an instance to an image"),
   "import": (
diff --git a/lib/client/gnt_cluster.py b/lib/client/gnt_cluster.py
index 954ab4b..5c2c576 100644
--- a/lib/client/gnt_cluster.py
+++ b/lib/client/gnt_cluster.py
@@ -2189,42 +2189,6 @@
   """
   ToStdoutAndLoginfo("Performing version-specific downgrade tasks.")
 
-  # Determine if this cluster is set up with SSH handling
-  # (aka not using --no-ssh-init), check if the public
-  # keyfile exists.
-  update_keys = os.path.exists(pathutils.SSH_PUB_KEYS)
-
-  if not update_keys:
-    return True
-
-  ToStdout("Replace nodes' SSH keys with the master's keys.")
-  (_, root_keyfiles) = \
-    ssh.GetAllUserFiles(constants.SSH_LOGIN_USER, mkdir=False, dircheck=False)
-
-  dsa_root_keyfiles = dict((kind, value) for (kind, value)
-                           in root_keyfiles.items()
-                           if kind == constants.SSHK_DSA)
-  master_private_keyfile, master_public_keyfile = \
-      dsa_root_keyfiles[constants.SSHK_DSA]
-
-  nodes = ssconf.SimpleStore().GetOnlineNodeList()
-  master_node = ssconf.SimpleStore().GetMasterNode()
-  cluster_name = ssconf.SimpleStore().GetClusterName()
-
-  # If master node is in 'nodes', remove it
-  if master_node in nodes:
-    nodes.remove(master_node)
-
-  srun = ssh.SshRunner(cluster_name=cluster_name)
-  for name in nodes:
-    for key_file in [master_private_keyfile, master_public_keyfile]:
-      command = utils.text.ShellQuoteArgs([
-          "scp", key_file, "%s:%s" % (name, key_file)])
-      result = srun.Run(master_node, constants.SSH_LOGIN_USER, command)
-      if result.exit_code != 0:
-        ToStderr("Overiding SSH key '%s' of node '%s' failed. You might"
-                 " want to clean up manually." % (key_file, name))
-
   return True
 
 
diff --git a/lib/client/gnt_instance.py b/lib/client/gnt_instance.py
index 1beb3f3..52da28e 100644
--- a/lib/client/gnt_instance.py
+++ b/lib/client/gnt_instance.py
@@ -1257,37 +1257,52 @@
       # Add item as last item (legacy interface)
       action = constants.DDM_ADD
       identifier = -1
+    elif identifier == constants.DDM_ATTACH:
+      # Attach item as last item (legacy interface)
+      action = constants.DDM_ATTACH
+      identifier = -1
     elif identifier == constants.DDM_REMOVE:
       # Remove last item (legacy interface)
       action = constants.DDM_REMOVE
       identifier = -1
+    elif identifier == constants.DDM_DETACH:
+      # Detach last item (legacy interface)
+      action = constants.DDM_DETACH
+      identifier = -1
     else:
-      # Modifications and adding/removing at arbitrary indices
+      # Modifications and adding/attaching/removing/detaching at arbitrary
+      # indices
       add = params.pop(constants.DDM_ADD, _MISSING)
+      attach = params.pop(constants.DDM_ATTACH, _MISSING)
       remove = params.pop(constants.DDM_REMOVE, _MISSING)
+      detach = params.pop(constants.DDM_DETACH, _MISSING)
       modify = params.pop(constants.DDM_MODIFY, _MISSING)
 
-      if modify is _MISSING:
-        if not (add is _MISSING or remove is _MISSING):
-          raise errors.OpPrereqError("Cannot add and remove at the same time",
-                                     errors.ECODE_INVAL)
-        elif add is not _MISSING:
-          action = constants.DDM_ADD
-        elif remove is not _MISSING:
-          action = constants.DDM_REMOVE
-        else:
-          action = constants.DDM_MODIFY
-
-      elif add is _MISSING and remove is _MISSING:
-        action = constants.DDM_MODIFY
-      else:
-        raise errors.OpPrereqError("Cannot modify and add/remove at the"
-                                   " same time", errors.ECODE_INVAL)
+      # Check if the user has requested more than one operation and raise an
+      # exception. If no operations have been given, default to modify.
+      action = constants.DDM_MODIFY
+      ops = {
+        constants.DDM_ADD: add,
+        constants.DDM_ATTACH: attach,
+        constants.DDM_REMOVE: remove,
+        constants.DDM_DETACH: detach,
+        constants.DDM_MODIFY: modify,
+      }
+      count = 0
+      for op, param in ops.items():
+        if param is not _MISSING:
+          count += 1
+          action = op
+      if count > 1:
+        raise errors.OpPrereqError(
+          "Cannot do more than one of the following operations at the"
+          " same time: %s" % ", ".join(ops.keys()),
+          errors.ECODE_INVAL)
 
       assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
 
-    if action == constants.DDM_REMOVE and params:
-      raise errors.OpPrereqError("Not accepting parameters on removal",
+    if action in (constants.DDM_REMOVE, constants.DDM_DETACH) and params:
+      raise errors.OpPrereqError("Not accepting parameters on removal/detach",
                                  errors.ECODE_INVAL)
 
     result.append((action, identifier, params))
@@ -1541,6 +1556,8 @@
 
 # this is defined separately due to readability only
 add_opts = [
+  FORTHCOMING_OPT,
+  COMMIT_OPT,
   NOSTART_OPT,
   OS_OPT,
   FORCE_VARIANT_OPT,
diff --git a/lib/client/gnt_node.py b/lib/client/gnt_node.py
index 4280eee..87f3d19 100644
--- a/lib/client/gnt_node.py
+++ b/lib/client/gnt_node.py
@@ -444,7 +444,8 @@
   op = opcodes.OpNodeEvacuate(node_name=args[0], mode=mode,
                               remote_node=opts.dst_node,
                               iallocator=opts.iallocator,
-                              early_release=opts.early_release)
+                              early_release=opts.early_release,
+                              ignore_soft_errors=opts.ignore_soft_errors)
   result = SubmitOrSend(op, opts, cl=cl)
 
   # Keep track of submitted jobs
@@ -1136,8 +1137,9 @@
     "Add a node to the cluster"),
   "evacuate": (
     EvacuateNode, ARGS_ONE_NODE,
-    [FORCE_OPT, IALLOCATOR_OPT, NEW_SECONDARY_OPT, EARLY_RELEASE_OPT,
-     PRIORITY_OPT, PRIMARY_ONLY_OPT, SECONDARY_ONLY_OPT] + SUBMIT_OPTS,
+    [FORCE_OPT, IALLOCATOR_OPT, IGNORE_SOFT_ERRORS_OPT, NEW_SECONDARY_OPT,
+     EARLY_RELEASE_OPT, PRIORITY_OPT, PRIMARY_ONLY_OPT, SECONDARY_ONLY_OPT]
+    + SUBMIT_OPTS,
     "[-f] {-I <iallocator> | -n <dst>} [-p | -s] [options...] <node>",
     "Relocate the primary and/or secondary instances from a node"),
   "failover": (
diff --git a/lib/cmdlib/__init__.py b/lib/cmdlib/__init__.py
index 8913305..ee02417 100644
--- a/lib/cmdlib/__init__.py
+++ b/lib/cmdlib/__init__.py
@@ -51,11 +51,12 @@
   LUClusterRename, \
   LUClusterRepairDiskSizes, \
   LUClusterSetParams, \
+  LUClusterRenewCrypto
+from ganeti.cmdlib.cluster.verify import \
   LUClusterVerify, \
   LUClusterVerifyConfig, \
   LUClusterVerifyGroup, \
-  LUClusterVerifyDisks, \
-  LUClusterRenewCrypto
+  LUClusterVerifyDisks
 from ganeti.cmdlib.group import \
   LUGroupAdd, \
   LUGroupAssignNodes, \
@@ -76,13 +77,13 @@
   LUNodeRemove, \
   LURepairNodeStorage
 from ganeti.cmdlib.instance import \
-  LUInstanceCreate, \
   LUInstanceRename, \
   LUInstanceRemove, \
   LUInstanceMove, \
   LUInstanceMultiAlloc, \
-  LUInstanceSetParams, \
   LUInstanceChangeGroup
+from ganeti.cmdlib.instance_create import \
+  LUInstanceCreate
 from ganeti.cmdlib.instance_storage import \
   LUInstanceRecreateDisks, \
   LUInstanceGrowDisk, \
@@ -98,6 +99,8 @@
   LUInstanceReinstall, \
   LUInstanceReboot, \
   LUInstanceConsole
+from ganeti.cmdlib.instance_set_params import \
+  LUInstanceSetParams
 from ganeti.cmdlib.instance_query import \
   LUInstanceQueryData
 from ganeti.cmdlib.backup import \
diff --git a/lib/cmdlib/backup.py b/lib/cmdlib/backup.py
index d305c94..3486291 100644
--- a/lib/cmdlib/backup.py
+++ b/lib/cmdlib/backup.py
@@ -157,11 +157,6 @@
       #  - removing the removal operation altogether
       self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
 
-      # Allocations should be stopped while this LU runs with node locks, but
-      # it doesn't have to be exclusive
-      self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
-      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
-
   def DeclareLocks(self, level):
     """Last minute lock declaration."""
     # All nodes are locked anyway, so nothing to do here.
@@ -283,13 +278,6 @@
       raise errors.ProgrammerError("Unhandled export mode %r" %
                                    self.op.mode)
 
-    # instance disk type verification
-    # TODO: Implement export support for file-based disks
-    for disk in self.cfg.GetInstanceDisks(self.instance.uuid):
-      if disk.dev_type in constants.DTS_FILEBASED:
-        raise errors.OpPrereqError("Export not supported for instances with"
-                                   " file-based disks", errors.ECODE_INVAL)
-
     # Check prerequisites for zeroing
     if self.op.zero_free_space:
       # Check that user shutdown detection has been enabled
@@ -297,18 +285,19 @@
       if self.instance.hypervisor == constants.HT_KVM and \
          not hvparams.get(constants.HV_KVM_USER_SHUTDOWN, False):
         raise errors.OpPrereqError("Instance shutdown detection must be "
-                                   "enabled for zeroing to work")
+                                   "enabled for zeroing to work",
+                                   errors.ECODE_INVAL)
 
       # Check that the instance is set to boot from the disk
       if constants.HV_BOOT_ORDER in hvparams and \
          hvparams[constants.HV_BOOT_ORDER] != constants.HT_BO_DISK:
         raise errors.OpPrereqError("Booting from disk must be set for zeroing "
-                                   "to work")
+                                   "to work", errors.ECODE_INVAL)
 
       # Check that the zeroing image is set
       if not self.cfg.GetZeroingImage():
         raise errors.OpPrereqError("A zeroing image must be set for zeroing to"
-                                   " work")
+                                   " work", errors.ECODE_INVAL)
 
       if self.op.zeroing_timeout_fixed is None:
         self.op.zeroing_timeout_fixed = constants.HELPER_VM_STARTUP
@@ -320,7 +309,13 @@
       if (self.op.zeroing_timeout_fixed is not None or
           self.op.zeroing_timeout_per_mib is not None):
         raise errors.OpPrereqError("Zeroing timeout options can only be used"
-                                   " only with the --zero-free-space option")
+                                   " only with the --zero-free-space option",
+                                   errors.ECODE_INVAL)
+
+    if self.op.long_sleep and not self.op.shutdown:
+      raise errors.OpPrereqError("The long sleep option only makes sense when"
+                                 " the instance can be shut down.",
+                                 errors.ECODE_INVAL)
 
     self.secondary_nodes = \
       self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
@@ -428,6 +423,34 @@
 
     feedback_fn("Zeroing completed!")
 
+  def StartInstance(self, feedback_fn, src_node_uuid):
+    """Send the node instructions to start the instance.
+
+    @raise errors.OpExecError: If the instance didn't start up.
+
+    """
+    assert self.instance.disks_active
+    feedback_fn("Starting instance %s" % self.instance.name)
+    result = self.rpc.call_instance_start(src_node_uuid,
+                                          (self.instance, None, None),
+                                          False, self.op.reason)
+    msg = result.fail_msg
+    if msg:
+      feedback_fn("Failed to start instance: %s" % msg)
+      ShutdownInstanceDisks(self, self.instance)
+      raise errors.OpExecError("Could not start instance: %s" % msg)
+
+  def TrySnapshot(self):
+    """Returns true if there is a reason to prefer a snapshot."""
+    return (not self.op.remove_instance and
+            self.instance.admin_state == constants.ADMINST_UP)
+
+  def DoReboot(self):
+    """Returns true iff the instance needs to be started after transfer."""
+    return (self.op.shutdown and
+            self.instance.admin_state == constants.ADMINST_UP and
+            not self.op.remove_instance)
+
   def Exec(self, feedback_fn):
     """Export an instance to an image in the cluster.
 
@@ -462,22 +485,25 @@
       helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
                                                      self.instance)
 
-      helper.CreateSnapshots()
-      try:
-        if (self.op.shutdown and
-            self.instance.admin_state == constants.ADMINST_UP and
-            not self.op.remove_instance):
-          assert self.instance.disks_active
-          feedback_fn("Starting instance %s" % self.instance.name)
-          result = self.rpc.call_instance_start(src_node_uuid,
-                                                (self.instance, None, None),
-                                                False, self.op.reason)
-          msg = result.fail_msg
-          if msg:
-            feedback_fn("Failed to start instance: %s" % msg)
-            ShutdownInstanceDisks(self, self.instance)
-            raise errors.OpExecError("Could not start instance: %s" % msg)
+      snapshots_available = False
+      if self.TrySnapshot():
+        snapshots_available = helper.CreateSnapshots()
+        if not snapshots_available:
+          if not self.op.shutdown:
+            raise errors.OpExecError(
+              "Not all disks could be snapshotted, and you requested a live "
+              "export; aborting"
+            )
+          if not self.op.long_sleep:
+            raise errors.OpExecError(
+              "Not all disks could be snapshotted, and you did not allow the "
+              "instance to remain offline for a longer time through the "
+              "--long-sleep option; aborting"
+            )
 
+      try:
+        if self.DoReboot() and snapshots_available:
+          self.StartInstance(feedback_fn, src_node_uuid)
         if self.op.mode == constants.EXPORT_MODE_LOCAL:
           (fin_resu, dresults) = helper.LocalExport(self.dst_node,
                                                     self.op.compress)
@@ -495,6 +521,9 @@
                                                      key_name, dest_ca_pem,
                                                      self.op.compress,
                                                      timeouts)
+
+        if self.DoReboot() and not snapshots_available:
+          self.StartInstance(feedback_fn, src_node_uuid)
       finally:
         helper.Cleanup()
 
@@ -546,15 +575,8 @@
       # we don't need to lock the instance itself, as nothing will happen to it
       # (and we can remove exports also for a removed instance)
       locking.LEVEL_NODE: locking.ALL_SET,
-
-      # Removing backups is quick, so blocking allocations is justified
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
       }
 
-    # Allocations should be stopped while this LU runs with node locks, but it
-    # doesn't have to be exclusive
-    self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
-
   def Exec(self, feedback_fn):
     """Remove any export.
 
diff --git a/lib/cmdlib/base.py b/lib/cmdlib/base.py
index 31f65d2..6307bb1 100644
--- a/lib/cmdlib/base.py
+++ b/lib/cmdlib/base.py
@@ -189,7 +189,6 @@
       locking.LEVEL_CLUSTER: (lambda: [locking.BGL]),
       locking.LEVEL_INSTANCE:
         lambda: self.cfg.GetInstanceNames(self.cfg.GetInstanceList()),
-      locking.LEVEL_NODE_ALLOC: (lambda: [locking.NAL]),
       locking.LEVEL_NODEGROUP: self.cfg.GetNodeGroupList,
       locking.LEVEL_NODE: self.cfg.GetNodeList,
       locking.LEVEL_NODE_RES: self.cfg.GetNodeList,
@@ -440,7 +439,7 @@
     # pylint: disable=W0613,R0201
     return lu_result
 
-  def _ExpandAndLockInstance(self):
+  def _ExpandAndLockInstance(self, allow_forthcoming=False):
     """Helper function to expand and lock an instance.
 
     Many LUs that work on an instance take its name in self.op.instance_name
@@ -449,6 +448,10 @@
     name. It also initializes needed_locks as a dict, if this hasn't been done
     before.
 
+    @param allow_forthcoming: if True, do not insist that the intsance be real;
+        the default behaviour is to raise a prerequisite error if the specified
+        instance is forthcoming.
+
     """
     if self.needed_locks is None:
       self.needed_locks = {}
@@ -459,6 +462,10 @@
       ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
                                 self.op.instance_name)
     self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
+    if not allow_forthcoming:
+      if self.cfg.GetInstanceInfo(self.op.instance_uuid).forthcoming:
+        raise errors.OpPrereqError(
+          "forthcoming instances not supported for this operation")
 
   def _LockInstancesNodes(self, primary_only=False,
                           level=locking.LEVEL_NODE):
@@ -510,6 +517,12 @@
 
     del self.recalculate_locks[level]
 
+  def AssertReleasedLocks(self, level):
+    """Raise AssertionError if the LU holds some locks of the given level.
+
+    """
+    assert not self.owned_locks(level)
+
 
 class NoHooksLU(LogicalUnit): # pylint: disable=W0223
   """Simple LU which runs no hooks.
diff --git a/lib/cmdlib/cluster.py b/lib/cmdlib/cluster.py
deleted file mode 100644
index cc2fa01..0000000
--- a/lib/cmdlib/cluster.py
+++ /dev/null
@@ -1,3937 +0,0 @@
-#
-#
-
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# 1. Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-"""Logical units dealing with the cluster."""
-
-import copy
-import itertools
-import logging
-import operator
-import os
-import re
-import time
-
-from ganeti import compat
-from ganeti import constants
-from ganeti import errors
-from ganeti import hypervisor
-from ganeti import locking
-from ganeti import masterd
-from ganeti import netutils
-from ganeti import objects
-from ganeti import opcodes
-from ganeti import pathutils
-from ganeti import query
-import ganeti.rpc.node as rpc
-from ganeti import runtime
-from ganeti import ssh
-from ganeti import uidpool
-from ganeti import utils
-from ganeti import vcluster
-
-from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \
-  ResultWithJobs
-from ganeti.cmdlib.common import ShareAll, RunPostHook, \
-  ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \
-  GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
-  GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
-  CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
-  ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
-  CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
-  CheckDiskAccessModeConsistency, GetClientCertDigest, \
-  AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \
-  CheckImageValidity, CheckDiskAccessModeConsistency, EnsureKvmdOnNodes, \
-  WarnAboutFailedSshUpdates
-
-import ganeti.masterd.instance
-
-
-class LUClusterRenewCrypto(NoHooksLU):
-  """Renew the cluster's crypto tokens.
-
-  """
-
-  _MAX_NUM_RETRIES = 3
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.needed_locks = {
-      locking.LEVEL_NODE: locking.ALL_SET,
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-    }
-    self.share_locks = ShareAll()
-    self.share_locks[locking.LEVEL_NODE] = 0
-    self.share_locks[locking.LEVEL_NODE_ALLOC] = 0
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks whether the cluster is empty.
-
-    Any errors are signaled by raising errors.OpPrereqError.
-
-    """
-    self._ssh_renewal_suppressed = \
-      not self.cfg.GetClusterInfo().modify_ssh_setup and self.op.ssh_keys
-
-  def _RenewNodeSslCertificates(self, feedback_fn):
-    """Renews the nodes' SSL certificates.
-
-    Note that most of this operation is done in gnt_cluster.py, this LU only
-    takes care of the renewal of the client SSL certificates.
-
-    """
-    master_uuid = self.cfg.GetMasterNode()
-    cluster = self.cfg.GetClusterInfo()
-
-    logging.debug("Renewing the master's SSL node certificate."
-                  " Master's UUID: %s.", master_uuid)
-
-    # mapping node UUIDs to client certificate digests
-    digest_map = {}
-    master_digest = utils.GetCertificateDigest(
-        cert_filename=pathutils.NODED_CLIENT_CERT_FILE)
-    digest_map[master_uuid] = master_digest
-    logging.debug("Adding the master's SSL node certificate digest to the"
-                  " configuration. Master's UUID: %s, Digest: %s",
-                  master_uuid, master_digest)
-
-    node_errors = {}
-    nodes = self.cfg.GetAllNodesInfo()
-    logging.debug("Renewing non-master nodes' node certificates.")
-    for (node_uuid, node_info) in nodes.items():
-      if node_info.offline:
-        feedback_fn("* Skipping offline node %s" % node_info.name)
-        logging.debug("Skipping offline node %s (UUID: %s).",
-                      node_info.name, node_uuid)
-        continue
-      if node_uuid != master_uuid:
-        logging.debug("Adding certificate digest of node '%s'.", node_uuid)
-        last_exception = None
-        for i in range(self._MAX_NUM_RETRIES):
-          try:
-            if node_info.master_candidate:
-              node_digest = GetClientCertDigest(self, node_uuid)
-              digest_map[node_uuid] = node_digest
-              logging.debug("Added the node's certificate to candidate"
-                            " certificate list. Current list: %s.",
-                            str(cluster.candidate_certs))
-            break
-          except errors.OpExecError as e:
-            last_exception = e
-            logging.error("Could not fetch a non-master node's SSL node"
-                          " certificate at attempt no. %s. The node's UUID"
-                          " is %s, and the error was: %s.",
-                          str(i), node_uuid, e)
-        else:
-          if last_exception:
-            node_errors[node_uuid] = last_exception
-
-    if node_errors:
-      msg = ("Some nodes' SSL client certificates could not be fetched."
-             " Please make sure those nodes are reachable and rerun"
-             " the operation. The affected nodes and their errors are:\n")
-      for uuid, e in node_errors.items():
-        msg += "Node %s: %s\n" % (uuid, e)
-      feedback_fn(msg)
-
-    self.cfg.SetCandidateCerts(digest_map)
-
-  def _RenewSshKeys(self, feedback_fn):
-    """Renew all nodes' SSH keys.
-
-    """
-    master_uuid = self.cfg.GetMasterNode()
-
-    nodes = self.cfg.GetAllNodesInfo()
-    nodes_uuid_names = [(node_uuid, node_info.name) for (node_uuid, node_info)
-                        in nodes.items() if not node_info.offline]
-    node_names = [name for (_, name) in nodes_uuid_names]
-    node_uuids = [uuid for (uuid, _) in nodes_uuid_names]
-    potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
-    master_candidate_uuids = self.cfg.GetMasterCandidateUuids()
-
-    result = self.rpc.call_node_ssh_keys_renew(
-      [master_uuid],
-      node_uuids, node_names,
-      master_candidate_uuids,
-      potential_master_candidates)
-
-    # Check if there were serious errors (for example master key files not
-    # writable).
-    result[master_uuid].Raise("Could not renew the SSH keys of all nodes")
-
-    # Process any non-disruptive errors (a few nodes unreachable etc.)
-    WarnAboutFailedSshUpdates(result, master_uuid, feedback_fn)
-
-  def Exec(self, feedback_fn):
-    if self.op.node_certificates:
-      feedback_fn("Renewing Node SSL certificates")
-      self._RenewNodeSslCertificates(feedback_fn)
-    if self.op.ssh_keys and not self._ssh_renewal_suppressed:
-      feedback_fn("Renewing SSH keys")
-      self._RenewSshKeys(feedback_fn)
-    elif self._ssh_renewal_suppressed:
-      feedback_fn("Cannot renew SSH keys if the cluster is configured to not"
-                  " modify the SSH setup.")
-
-
-class LUClusterActivateMasterIp(NoHooksLU):
-  """Activate the master IP on the master node.
-
-  """
-  def Exec(self, feedback_fn):
-    """Activate the master IP.
-
-    """
-    master_params = self.cfg.GetMasterNetworkParameters()
-    ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_activate_master_ip(master_params.uuid,
-                                                   master_params, ems)
-    result.Raise("Could not activate the master IP")
-
-
-class LUClusterDeactivateMasterIp(NoHooksLU):
-  """Deactivate the master IP on the master node.
-
-  """
-  def Exec(self, feedback_fn):
-    """Deactivate the master IP.
-
-    """
-    master_params = self.cfg.GetMasterNetworkParameters()
-    ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-    result.Raise("Could not deactivate the master IP")
-
-
-class LUClusterConfigQuery(NoHooksLU):
-  """Return configuration values.
-
-  """
-  REQ_BGL = False
-
-  def CheckArguments(self):
-    self.cq = ClusterQuery(None, self.op.output_fields, False)
-
-  def ExpandNames(self):
-    self.cq.ExpandNames(self)
-
-  def DeclareLocks(self, level):
-    self.cq.DeclareLocks(self, level)
-
-  def Exec(self, feedback_fn):
-    result = self.cq.OldStyleQuery(self)
-
-    assert len(result) == 1
-
-    return result[0]
-
-
-class LUClusterDestroy(LogicalUnit):
-  """Logical unit for destroying the cluster.
-
-  """
-  HPATH = "cluster-destroy"
-  HTYPE = constants.HTYPE_CLUSTER
-
-  # Read by the job queue to detect when the cluster is gone and job files will
-  # never be available.
-  # FIXME: This variable should be removed together with the Python job queue.
-  clusterHasBeenDestroyed = False
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    """
-    return {
-      "OP_TARGET": self.cfg.GetClusterName(),
-      }
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    return ([], [])
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks whether the cluster is empty.
-
-    Any errors are signaled by raising errors.OpPrereqError.
-
-    """
-    master = self.cfg.GetMasterNode()
-
-    nodelist = self.cfg.GetNodeList()
-    if len(nodelist) != 1 or nodelist[0] != master:
-      raise errors.OpPrereqError("There are still %d node(s) in"
-                                 " this cluster." % (len(nodelist) - 1),
-                                 errors.ECODE_INVAL)
-    instancelist = self.cfg.GetInstanceList()
-    if instancelist:
-      raise errors.OpPrereqError("There are still %d instance(s) in"
-                                 " this cluster." % len(instancelist),
-                                 errors.ECODE_INVAL)
-
-  def Exec(self, feedback_fn):
-    """Destroys the cluster.
-
-    """
-    master_params = self.cfg.GetMasterNetworkParameters()
-
-    # Run post hooks on master node before it's removed
-    RunPostHook(self, self.cfg.GetNodeName(master_params.uuid))
-
-    ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-    result.Warn("Error disabling the master IP address", self.LogWarning)
-
-    self.wconfd.Client().PrepareClusterDestruction(self.wconfdcontext)
-
-    # signal to the job queue that the cluster is gone
-    LUClusterDestroy.clusterHasBeenDestroyed = True
-
-    return master_params.uuid
-
-
-class LUClusterPostInit(LogicalUnit):
-  """Logical unit for running hooks after cluster initialization.
-
-  """
-  HPATH = "cluster-init"
-  HTYPE = constants.HTYPE_CLUSTER
-
-  def CheckArguments(self):
-    self.master_uuid = self.cfg.GetMasterNode()
-    self.master_ndparams = self.cfg.GetNdParams(self.cfg.GetMasterNodeInfo())
-
-    # TODO: When Issue 584 is solved, and None is properly parsed when used
-    # as a default value, ndparams.get(.., None) can be changed to
-    # ndparams[..] to access the values directly
-
-    # OpenvSwitch: Warn user if link is missing
-    if (self.master_ndparams[constants.ND_OVS] and not
-        self.master_ndparams.get(constants.ND_OVS_LINK, None)):
-      self.LogInfo("No physical interface for OpenvSwitch was given."
-                   " OpenvSwitch will not have an outside connection. This"
-                   " might not be what you want.")
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    """
-    return {
-      "OP_TARGET": self.cfg.GetClusterName(),
-      }
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    return ([], [self.cfg.GetMasterNode()])
-
-  def Exec(self, feedback_fn):
-    """Create and configure Open vSwitch
-
-    """
-    if self.master_ndparams[constants.ND_OVS]:
-      result = self.rpc.call_node_configure_ovs(
-                 self.master_uuid,
-                 self.master_ndparams[constants.ND_OVS_NAME],
-                 self.master_ndparams.get(constants.ND_OVS_LINK, None))
-      result.Raise("Could not successully configure Open vSwitch")
-
-    return True
-
-
-class ClusterQuery(QueryBase):
-  FIELDS = query.CLUSTER_FIELDS
-
-  #: Do not sort (there is only one item)
-  SORT_FIELD = None
-
-  def ExpandNames(self, lu):
-    lu.needed_locks = {}
-
-    # The following variables interact with _QueryBase._GetNames
-    self.wanted = locking.ALL_SET
-    self.do_locking = self.use_locking
-
-    if self.do_locking:
-      raise errors.OpPrereqError("Can not use locking for cluster queries",
-                                 errors.ECODE_INVAL)
-
-  def DeclareLocks(self, lu, level):
-    pass
-
-  def _GetQueryData(self, lu):
-    """Computes the list of nodes and their attributes.
-
-    """
-    if query.CQ_CONFIG in self.requested_data:
-      cluster = lu.cfg.GetClusterInfo()
-      nodes = lu.cfg.GetAllNodesInfo()
-    else:
-      cluster = NotImplemented
-      nodes = NotImplemented
-
-    if query.CQ_QUEUE_DRAINED in self.requested_data:
-      drain_flag = os.path.exists(pathutils.JOB_QUEUE_DRAIN_FILE)
-    else:
-      drain_flag = NotImplemented
-
-    if query.CQ_WATCHER_PAUSE in self.requested_data:
-      master_node_uuid = lu.cfg.GetMasterNode()
-
-      result = lu.rpc.call_get_watcher_pause(master_node_uuid)
-      result.Raise("Can't retrieve watcher pause from master node '%s'" %
-                   lu.cfg.GetMasterNodeName())
-
-      watcher_pause = result.payload
-    else:
-      watcher_pause = NotImplemented
-
-    return query.ClusterQueryData(cluster, nodes, drain_flag, watcher_pause)
-
-
-class LUClusterQuery(NoHooksLU):
-  """Query cluster configuration.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.needed_locks = {}
-
-  def Exec(self, feedback_fn):
-    """Return cluster config.
-
-    """
-    cluster = self.cfg.GetClusterInfo()
-    os_hvp = {}
-
-    # Filter just for enabled hypervisors
-    for os_name, hv_dict in cluster.os_hvp.items():
-      os_hvp[os_name] = {}
-      for hv_name, hv_params in hv_dict.items():
-        if hv_name in cluster.enabled_hypervisors:
-          os_hvp[os_name][hv_name] = hv_params
-
-    # Convert ip_family to ip_version
-    primary_ip_version = constants.IP4_VERSION
-    if cluster.primary_ip_family == netutils.IP6Address.family:
-      primary_ip_version = constants.IP6_VERSION
-
-    result = {
-      "software_version": constants.RELEASE_VERSION,
-      "protocol_version": constants.PROTOCOL_VERSION,
-      "config_version": constants.CONFIG_VERSION,
-      "os_api_version": max(constants.OS_API_VERSIONS),
-      "export_version": constants.EXPORT_VERSION,
-      "vcs_version": constants.VCS_VERSION,
-      "architecture": runtime.GetArchInfo(),
-      "name": cluster.cluster_name,
-      "master": self.cfg.GetMasterNodeName(),
-      "default_hypervisor": cluster.primary_hypervisor,
-      "enabled_hypervisors": cluster.enabled_hypervisors,
-      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
-                        for hypervisor_name in cluster.enabled_hypervisors]),
-      "os_hvp": os_hvp,
-      "beparams": cluster.beparams,
-      "osparams": cluster.osparams,
-      "ipolicy": cluster.ipolicy,
-      "nicparams": cluster.nicparams,
-      "ndparams": cluster.ndparams,
-      "diskparams": cluster.diskparams,
-      "candidate_pool_size": cluster.candidate_pool_size,
-      "max_running_jobs": cluster.max_running_jobs,
-      "max_tracked_jobs": cluster.max_tracked_jobs,
-      "mac_prefix": cluster.mac_prefix,
-      "master_netdev": cluster.master_netdev,
-      "master_netmask": cluster.master_netmask,
-      "use_external_mip_script": cluster.use_external_mip_script,
-      "volume_group_name": cluster.volume_group_name,
-      "drbd_usermode_helper": cluster.drbd_usermode_helper,
-      "file_storage_dir": cluster.file_storage_dir,
-      "shared_file_storage_dir": cluster.shared_file_storage_dir,
-      "maintain_node_health": cluster.maintain_node_health,
-      "ctime": cluster.ctime,
-      "mtime": cluster.mtime,
-      "uuid": cluster.uuid,
-      "tags": list(cluster.GetTags()),
-      "uid_pool": cluster.uid_pool,
-      "default_iallocator": cluster.default_iallocator,
-      "default_iallocator_params": cluster.default_iallocator_params,
-      "reserved_lvs": cluster.reserved_lvs,
-      "primary_ip_version": primary_ip_version,
-      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
-      "hidden_os": cluster.hidden_os,
-      "blacklisted_os": cluster.blacklisted_os,
-      "enabled_disk_templates": cluster.enabled_disk_templates,
-      "install_image": cluster.install_image,
-      "instance_communication_network": cluster.instance_communication_network,
-      "compression_tools": cluster.compression_tools,
-      "enabled_user_shutdown": cluster.enabled_user_shutdown,
-      }
-
-    return result
-
-
-class LUClusterRedistConf(NoHooksLU):
-  """Force the redistribution of cluster configuration.
-
-  This is a very simple LU.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.needed_locks = {
-      locking.LEVEL_NODE: locking.ALL_SET,
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-    }
-    self.share_locks = ShareAll()
-
-  def Exec(self, feedback_fn):
-    """Redistribute the configuration.
-
-    """
-    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
-    RedistributeAncillaryFiles(self)
-
-
-class LUClusterRename(LogicalUnit):
-  """Rename the cluster.
-
-  """
-  HPATH = "cluster-rename"
-  HTYPE = constants.HTYPE_CLUSTER
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    """
-    return {
-      "OP_TARGET": self.cfg.GetClusterName(),
-      "NEW_NAME": self.op.name,
-      }
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
-
-  def CheckPrereq(self):
-    """Verify that the passed name is a valid one.
-
-    """
-    hostname = netutils.GetHostname(name=self.op.name,
-                                    family=self.cfg.GetPrimaryIPFamily())
-
-    new_name = hostname.name
-    self.ip = new_ip = hostname.ip
-    old_name = self.cfg.GetClusterName()
-    old_ip = self.cfg.GetMasterIP()
-    if new_name == old_name and new_ip == old_ip:
-      raise errors.OpPrereqError("Neither the name nor the IP address of the"
-                                 " cluster has changed",
-                                 errors.ECODE_INVAL)
-    if new_ip != old_ip:
-      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
-        raise errors.OpPrereqError("The given cluster IP address (%s) is"
-                                   " reachable on the network" %
-                                   new_ip, errors.ECODE_NOTUNIQUE)
-
-    self.op.name = new_name
-
-  def Exec(self, feedback_fn):
-    """Rename the cluster.
-
-    """
-    clustername = self.op.name
-    new_ip = self.ip
-
-    # shutdown the master IP
-    master_params = self.cfg.GetMasterNetworkParameters()
-    ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-    result.Raise("Could not disable the master role")
-
-    try:
-      cluster = self.cfg.GetClusterInfo()
-      cluster.cluster_name = clustername
-      cluster.master_ip = new_ip
-      self.cfg.Update(cluster, feedback_fn)
-
-      # update the known hosts file
-      ssh.WriteKnownHostsFile(self.cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
-      node_list = self.cfg.GetOnlineNodeList()
-      try:
-        node_list.remove(master_params.uuid)
-      except ValueError:
-        pass
-      UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
-    finally:
-      master_params.ip = new_ip
-      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-      result.Warn("Could not re-enable the master role on the master,"
-                  " please restart manually", self.LogWarning)
-
-    return clustername
-
-
-class LUClusterRepairDiskSizes(NoHooksLU):
-  """Verifies the cluster disks sizes.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    if self.op.instances:
-      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
-      # Not getting the node allocation lock as only a specific set of
-      # instances (and their nodes) is going to be acquired
-      self.needed_locks = {
-        locking.LEVEL_NODE_RES: [],
-        locking.LEVEL_INSTANCE: self.wanted_names,
-        }
-      self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
-    else:
-      self.wanted_names = None
-      self.needed_locks = {
-        locking.LEVEL_NODE_RES: locking.ALL_SET,
-        locking.LEVEL_INSTANCE: locking.ALL_SET,
-
-        # This opcode is acquires the node locks for all instances
-        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-        }
-
-    self.share_locks = {
-      locking.LEVEL_NODE_RES: 1,
-      locking.LEVEL_INSTANCE: 0,
-      locking.LEVEL_NODE_ALLOC: 1,
-      }
-
-  def DeclareLocks(self, level):
-    if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
-      self._LockInstancesNodes(primary_only=True, level=level)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This only checks the optional instance list against the existing names.
-
-    """
-    if self.wanted_names is None:
-      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
-
-    self.wanted_instances = \
-        map(compat.snd, self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
-
-  def _EnsureChildSizes(self, disk):
-    """Ensure children of the disk have the needed disk size.
-
-    This is valid mainly for DRBD8 and fixes an issue where the
-    children have smaller disk size.
-
-    @param disk: an L{ganeti.objects.Disk} object
-
-    """
-    if disk.dev_type == constants.DT_DRBD8:
-      assert disk.children, "Empty children for DRBD8?"
-      fchild = disk.children[0]
-      mismatch = fchild.size < disk.size
-      if mismatch:
-        self.LogInfo("Child disk has size %d, parent %d, fixing",
-                     fchild.size, disk.size)
-        fchild.size = disk.size
-
-      # and we recurse on this child only, not on the metadev
-      return self._EnsureChildSizes(fchild) or mismatch
-    else:
-      return False
-
-  def Exec(self, feedback_fn):
-    """Verify the size of cluster disks.
-
-    """
-    # TODO: check child disks too
-    # TODO: check differences in size between primary/secondary nodes
-    per_node_disks = {}
-    for instance in self.wanted_instances:
-      pnode = instance.primary_node
-      if pnode not in per_node_disks:
-        per_node_disks[pnode] = []
-      for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
-        per_node_disks[pnode].append((instance, idx, disk))
-
-    assert not (frozenset(per_node_disks.keys()) -
-                frozenset(self.owned_locks(locking.LEVEL_NODE_RES))), \
-      "Not owning correct locks"
-    assert not self.owned_locks(locking.LEVEL_NODE)
-
-    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
-                                               per_node_disks.keys())
-
-    changed = []
-    for node_uuid, dskl in per_node_disks.items():
-      if not dskl:
-        # no disks on the node
-        continue
-
-      newl = [([v[2].Copy()], v[0]) for v in dskl]
-      node_name = self.cfg.GetNodeName(node_uuid)
-      result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
-      if result.fail_msg:
-        self.LogWarning("Failure in blockdev_getdimensions call to node"
-                        " %s, ignoring", node_name)
-        continue
-      if len(result.payload) != len(dskl):
-        logging.warning("Invalid result from node %s: len(dksl)=%d,"
-                        " result.payload=%s", node_name, len(dskl),
-                        result.payload)
-        self.LogWarning("Invalid result from node %s, ignoring node results",
-                        node_name)
-        continue
-      for ((instance, idx, disk), dimensions) in zip(dskl, result.payload):
-        if dimensions is None:
-          self.LogWarning("Disk %d of instance %s did not return size"
-                          " information, ignoring", idx, instance.name)
-          continue
-        if not isinstance(dimensions, (tuple, list)):
-          self.LogWarning("Disk %d of instance %s did not return valid"
-                          " dimension information, ignoring", idx,
-                          instance.name)
-          continue
-        (size, spindles) = dimensions
-        if not isinstance(size, (int, long)):
-          self.LogWarning("Disk %d of instance %s did not return valid"
-                          " size information, ignoring", idx, instance.name)
-          continue
-        size = size >> 20
-        if size != disk.size:
-          self.LogInfo("Disk %d of instance %s has mismatched size,"
-                       " correcting: recorded %d, actual %d", idx,
-                       instance.name, disk.size, size)
-          disk.size = size
-          self.cfg.Update(disk, feedback_fn)
-          changed.append((instance.name, idx, "size", size))
-        if es_flags[node_uuid]:
-          if spindles is None:
-            self.LogWarning("Disk %d of instance %s did not return valid"
-                            " spindles information, ignoring", idx,
-                            instance.name)
-          elif disk.spindles is None or disk.spindles != spindles:
-            self.LogInfo("Disk %d of instance %s has mismatched spindles,"
-                         " correcting: recorded %s, actual %s",
-                         idx, instance.name, disk.spindles, spindles)
-            disk.spindles = spindles
-            self.cfg.Update(disk, feedback_fn)
-            changed.append((instance.name, idx, "spindles", disk.spindles))
-        if self._EnsureChildSizes(disk):
-          self.cfg.Update(disk, feedback_fn)
-          changed.append((instance.name, idx, "size", disk.size))
-    return changed
-
-
-def _ValidateNetmask(cfg, netmask):
-  """Checks if a netmask is valid.
-
-  @type cfg: L{config.ConfigWriter}
-  @param cfg: cluster configuration
-  @type netmask: int
-  @param netmask: netmask to be verified
-  @raise errors.OpPrereqError: if the validation fails
-
-  """
-  ip_family = cfg.GetPrimaryIPFamily()
-  try:
-    ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
-  except errors.ProgrammerError:
-    raise errors.OpPrereqError("Invalid primary ip family: %s." %
-                               ip_family, errors.ECODE_INVAL)
-  if not ipcls.ValidateNetmask(netmask):
-    raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
-                               (netmask), errors.ECODE_INVAL)
-
-
-def CheckFileBasedStoragePathVsEnabledDiskTemplates(
-    logging_warn_fn, file_storage_dir, enabled_disk_templates,
-    file_disk_template):
-  """Checks whether the given file-based storage directory is acceptable.
-
-  Note: This function is public, because it is also used in bootstrap.py.
-
-  @type logging_warn_fn: function
-  @param logging_warn_fn: function which accepts a string and logs it
-  @type file_storage_dir: string
-  @param file_storage_dir: the directory to be used for file-based instances
-  @type enabled_disk_templates: list of string
-  @param enabled_disk_templates: the list of enabled disk templates
-  @type file_disk_template: string
-  @param file_disk_template: the file-based disk template for which the
-      path should be checked
-
-  """
-  assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
-            constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
-         ))
-
-  file_storage_enabled = file_disk_template in enabled_disk_templates
-  if file_storage_dir is not None:
-    if file_storage_dir == "":
-      if file_storage_enabled:
-        raise errors.OpPrereqError(
-            "Unsetting the '%s' storage directory while having '%s' storage"
-            " enabled is not permitted." %
-            (file_disk_template, file_disk_template),
-            errors.ECODE_INVAL)
-    else:
-      if not file_storage_enabled:
-        logging_warn_fn(
-            "Specified a %s storage directory, although %s storage is not"
-            " enabled." % (file_disk_template, file_disk_template))
-  else:
-    raise errors.ProgrammerError("Received %s storage dir with value"
-                                 " 'None'." % file_disk_template)
-
-
-def CheckFileStoragePathVsEnabledDiskTemplates(
-    logging_warn_fn, file_storage_dir, enabled_disk_templates):
-  """Checks whether the given file storage directory is acceptable.
-
-  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
-
-  """
-  CheckFileBasedStoragePathVsEnabledDiskTemplates(
-      logging_warn_fn, file_storage_dir, enabled_disk_templates,
-      constants.DT_FILE)
-
-
-def CheckSharedFileStoragePathVsEnabledDiskTemplates(
-    logging_warn_fn, file_storage_dir, enabled_disk_templates):
-  """Checks whether the given shared file storage directory is acceptable.
-
-  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
-
-  """
-  CheckFileBasedStoragePathVsEnabledDiskTemplates(
-      logging_warn_fn, file_storage_dir, enabled_disk_templates,
-      constants.DT_SHARED_FILE)
-
-
-def CheckGlusterStoragePathVsEnabledDiskTemplates(
-    logging_warn_fn, file_storage_dir, enabled_disk_templates):
-  """Checks whether the given gluster storage directory is acceptable.
-
-  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
-
-  """
-  CheckFileBasedStoragePathVsEnabledDiskTemplates(
-      logging_warn_fn, file_storage_dir, enabled_disk_templates,
-      constants.DT_GLUSTER)
-
-
-def CheckCompressionTools(tools):
-  """Check whether the provided compression tools look like executables.
-
-  @type tools: list of string
-  @param tools: The tools provided as opcode input
-
-  """
-  regex = re.compile('^[-_a-zA-Z0-9]+$')
-  illegal_tools = [t for t in tools if not regex.match(t)]
-
-  if illegal_tools:
-    raise errors.OpPrereqError(
-      "The tools '%s' contain illegal characters: only alphanumeric values,"
-      " dashes, and underscores are allowed" % ", ".join(illegal_tools),
-      errors.ECODE_INVAL
-    )
-
-  if constants.IEC_GZIP not in tools:
-    raise errors.OpPrereqError("For compatibility reasons, the %s utility must"
-                               " be present among the compression tools" %
-                               constants.IEC_GZIP, errors.ECODE_INVAL)
-
-  if constants.IEC_NONE in tools:
-    raise errors.OpPrereqError("%s is a reserved value used for no compression,"
-                               " and cannot be used as the name of a tool" %
-                               constants.IEC_NONE, errors.ECODE_INVAL)
-
-
-class LUClusterSetParams(LogicalUnit):
-  """Change the parameters of the cluster.
-
-  """
-  HPATH = "cluster-modify"
-  HTYPE = constants.HTYPE_CLUSTER
-  REQ_BGL = False
-
-  def CheckArguments(self):
-    """Check parameters
-
-    """
-    if self.op.uid_pool:
-      uidpool.CheckUidPool(self.op.uid_pool)
-
-    if self.op.add_uids:
-      uidpool.CheckUidPool(self.op.add_uids)
-
-    if self.op.remove_uids:
-      uidpool.CheckUidPool(self.op.remove_uids)
-
-    if self.op.mac_prefix:
-      self.op.mac_prefix = \
-          utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
-
-    if self.op.master_netmask is not None:
-      _ValidateNetmask(self.cfg, self.op.master_netmask)
-
-    if self.op.diskparams:
-      for dt_params in self.op.diskparams.values():
-        utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
-      try:
-        utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
-        CheckDiskAccessModeValidity(self.op.diskparams)
-      except errors.OpPrereqError, err:
-        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
-                                   errors.ECODE_INVAL)
-
-    if self.op.install_image is not None:
-      CheckImageValidity(self.op.install_image,
-                         "Install image must be an absolute path or a URL")
-
-  def ExpandNames(self):
-    # FIXME: in the future maybe other cluster params won't require checking on
-    # all nodes to be modified.
-    # FIXME: This opcode changes cluster-wide settings. Is acquiring all
-    # resource locks the right thing, shouldn't it be the BGL instead?
-    self.needed_locks = {
-      locking.LEVEL_NODE: locking.ALL_SET,
-      locking.LEVEL_INSTANCE: locking.ALL_SET,
-      locking.LEVEL_NODEGROUP: locking.ALL_SET,
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-    }
-    self.share_locks = ShareAll()
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    """
-    return {
-      "OP_TARGET": self.cfg.GetClusterName(),
-      "NEW_VG_NAME": self.op.vg_name,
-      }
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    mn = self.cfg.GetMasterNode()
-    return ([mn], [mn])
-
-  def _CheckVgName(self, node_uuids, enabled_disk_templates,
-                   new_enabled_disk_templates):
-    """Check the consistency of the vg name on all nodes and in case it gets
-       unset whether there are instances still using it.
-
-    """
-    lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates)
-    lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates,
-                                            new_enabled_disk_templates)
-    current_vg_name = self.cfg.GetVGName()
-
-    if self.op.vg_name == '':
-      if lvm_is_enabled:
-        raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
-                                   " disk templates are or get enabled.",
-                                   errors.ECODE_INVAL)
-
-    if self.op.vg_name is None:
-      if current_vg_name is None and lvm_is_enabled:
-        raise errors.OpPrereqError("Please specify a volume group when"
-                                   " enabling lvm-based disk-templates.",
-                                   errors.ECODE_INVAL)
-
-    if self.op.vg_name is not None and not self.op.vg_name:
-      if self.cfg.HasAnyDiskOfType(constants.DT_PLAIN):
-        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
-                                   " instances exist", errors.ECODE_INVAL)
-
-    if (self.op.vg_name is not None and lvm_is_enabled) or \
-        (self.cfg.GetVGName() is not None and lvm_gets_enabled):
-      self._CheckVgNameOnNodes(node_uuids)
-
-  def _CheckVgNameOnNodes(self, node_uuids):
-    """Check the status of the volume group on each node.
-
-    """
-    vglist = self.rpc.call_vg_list(node_uuids)
-    for node_uuid in node_uuids:
-      msg = vglist[node_uuid].fail_msg
-      if msg:
-        # ignoring down node
-        self.LogWarning("Error while gathering data on node %s"
-                        " (ignoring node): %s",
-                        self.cfg.GetNodeName(node_uuid), msg)
-        continue
-      vgstatus = utils.CheckVolumeGroupSize(vglist[node_uuid].payload,
-                                            self.op.vg_name,
-                                            constants.MIN_VG_SIZE)
-      if vgstatus:
-        raise errors.OpPrereqError("Error on node '%s': %s" %
-                                   (self.cfg.GetNodeName(node_uuid), vgstatus),
-                                   errors.ECODE_ENVIRON)
-
-  @staticmethod
-  def _GetDiskTemplateSetsInner(op_enabled_disk_templates,
-                                old_enabled_disk_templates):
-    """Computes three sets of disk templates.
-
-    @see: C{_GetDiskTemplateSets} for more details.
-
-    """
-    enabled_disk_templates = None
-    new_enabled_disk_templates = []
-    disabled_disk_templates = []
-    if op_enabled_disk_templates:
-      enabled_disk_templates = op_enabled_disk_templates
-      new_enabled_disk_templates = \
-        list(set(enabled_disk_templates)
-             - set(old_enabled_disk_templates))
-      disabled_disk_templates = \
-        list(set(old_enabled_disk_templates)
-             - set(enabled_disk_templates))
-    else:
-      enabled_disk_templates = old_enabled_disk_templates
-    return (enabled_disk_templates, new_enabled_disk_templates,
-            disabled_disk_templates)
-
-  def _GetDiskTemplateSets(self, cluster):
-    """Computes three sets of disk templates.
-
-    The three sets are:
-      - disk templates that will be enabled after this operation (no matter if
-        they were enabled before or not)
-      - disk templates that get enabled by this operation (thus haven't been
-        enabled before.)
-      - disk templates that get disabled by this operation
-
-    """
-    return self._GetDiskTemplateSetsInner(self.op.enabled_disk_templates,
-                                          cluster.enabled_disk_templates)
-
-  def _CheckIpolicy(self, cluster, enabled_disk_templates):
-    """Checks the ipolicy.
-
-    @type cluster: C{objects.Cluster}
-    @param cluster: the cluster's configuration
-    @type enabled_disk_templates: list of string
-    @param enabled_disk_templates: list of (possibly newly) enabled disk
-      templates
-
-    """
-    # FIXME: write unit tests for this
-    if self.op.ipolicy:
-      self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
-                                           group_policy=False)
-
-      CheckIpolicyVsDiskTemplates(self.new_ipolicy,
-                                  enabled_disk_templates)
-
-      all_instances = self.cfg.GetAllInstancesInfo().values()
-      violations = set()
-      for group in self.cfg.GetAllNodeGroupsInfo().values():
-        instances = frozenset(
-          [inst for inst in all_instances
-           if compat.any(nuuid in group.members
-           for nuuid in self.cfg.GetInstanceNodes(inst.uuid))])
-        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
-        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
-        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
-                                           self.cfg)
-        if new:
-          violations.update(new)
-
-      if violations:
-        self.LogWarning("After the ipolicy change the following instances"
-                        " violate them: %s",
-                        utils.CommaJoin(utils.NiceSort(violations)))
-    else:
-      CheckIpolicyVsDiskTemplates(cluster.ipolicy,
-                                  enabled_disk_templates)
-
-  def _CheckDrbdHelperOnNodes(self, drbd_helper, node_uuids):
-    """Checks whether the set DRBD helper actually exists on the nodes.
-
-    @type drbd_helper: string
-    @param drbd_helper: path of the drbd usermode helper binary
-    @type node_uuids: list of strings
-    @param node_uuids: list of node UUIDs to check for the helper
-
-    """
-    # checks given drbd helper on all nodes
-    helpers = self.rpc.call_drbd_helper(node_uuids)
-    for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids):
-      if ninfo.offline:
-        self.LogInfo("Not checking drbd helper on offline node %s",
-                     ninfo.name)
-        continue
-      msg = helpers[ninfo.uuid].fail_msg
-      if msg:
-        raise errors.OpPrereqError("Error checking drbd helper on node"
-                                   " '%s': %s" % (ninfo.name, msg),
-                                   errors.ECODE_ENVIRON)
-      node_helper = helpers[ninfo.uuid].payload
-      if node_helper != drbd_helper:
-        raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
-                                   (ninfo.name, node_helper),
-                                   errors.ECODE_ENVIRON)
-
-  def _CheckDrbdHelper(self, node_uuids, drbd_enabled, drbd_gets_enabled):
-    """Check the DRBD usermode helper.
-
-    @type node_uuids: list of strings
-    @param node_uuids: a list of nodes' UUIDs
-    @type drbd_enabled: boolean
-    @param drbd_enabled: whether DRBD will be enabled after this operation
-      (no matter if it was disabled before or not)
-    @type drbd_gets_enabled: boolen
-    @param drbd_gets_enabled: true if DRBD was disabled before this
-      operation, but will be enabled afterwards
-
-    """
-    if self.op.drbd_helper == '':
-      if drbd_enabled:
-        raise errors.OpPrereqError("Cannot disable drbd helper while"
-                                   " DRBD is enabled.", errors.ECODE_STATE)
-      if self.cfg.HasAnyDiskOfType(constants.DT_DRBD8):
-        raise errors.OpPrereqError("Cannot disable drbd helper while"
-                                   " drbd-based instances exist",
-                                   errors.ECODE_INVAL)
-
-    else:
-      if self.op.drbd_helper is not None and drbd_enabled:
-        self._CheckDrbdHelperOnNodes(self.op.drbd_helper, node_uuids)
-      else:
-        if drbd_gets_enabled:
-          current_drbd_helper = self.cfg.GetClusterInfo().drbd_usermode_helper
-          if current_drbd_helper is not None:
-            self._CheckDrbdHelperOnNodes(current_drbd_helper, node_uuids)
-          else:
-            raise errors.OpPrereqError("Cannot enable DRBD without a"
-                                       " DRBD usermode helper set.",
-                                       errors.ECODE_STATE)
-
-  def _CheckInstancesOfDisabledDiskTemplates(
-      self, disabled_disk_templates):
-    """Check whether we try to disable a disk template that is in use.
-
-    @type disabled_disk_templates: list of string
-    @param disabled_disk_templates: list of disk templates that are going to
-      be disabled by this operation
-
-    """
-    for disk_template in disabled_disk_templates:
-      if self.cfg.HasAnyDiskOfType(disk_template):
-        raise errors.OpPrereqError(
-            "Cannot disable disk template '%s', because there is at least one"
-            " instance using it." % disk_template,
-            errors.ECODE_STATE)
-
-  @staticmethod
-  def _CheckInstanceCommunicationNetwork(network, warning_fn):
-    """Check whether an existing network is configured for instance
-    communication.
-
-    Checks whether an existing network is configured with the
-    parameters that are advisable for instance communication, and
-    otherwise issue security warnings.
-
-    @type network: L{ganeti.objects.Network}
-    @param network: L{ganeti.objects.Network} object whose
-                    configuration is being checked
-    @type warning_fn: function
-    @param warning_fn: function used to print warnings
-    @rtype: None
-    @return: None
-
-    """
-    def _MaybeWarn(err, val, default):
-      if val != default:
-        warning_fn("Supplied instance communication network '%s' %s '%s',"
-                   " this might pose a security risk (default is '%s').",
-                   network.name, err, val, default)
-
-    if network.network is None:
-      raise errors.OpPrereqError("Supplied instance communication network '%s'"
-                                 " must have an IPv4 network address.",
-                                 network.name)
-
-    _MaybeWarn("has an IPv4 gateway", network.gateway, None)
-    _MaybeWarn("has a non-standard IPv4 network address", network.network,
-               constants.INSTANCE_COMMUNICATION_NETWORK4)
-    _MaybeWarn("has an IPv6 gateway", network.gateway6, None)
-    _MaybeWarn("has a non-standard IPv6 network address", network.network6,
-               constants.INSTANCE_COMMUNICATION_NETWORK6)
-    _MaybeWarn("has a non-standard MAC prefix", network.mac_prefix,
-               constants.INSTANCE_COMMUNICATION_MAC_PREFIX)
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks whether the given params don't conflict and
-    if the given volume group is valid.
-
-    """
-    node_uuids = self.owned_locks(locking.LEVEL_NODE)
-    self.cluster = cluster = self.cfg.GetClusterInfo()
-
-    vm_capable_node_uuids = [node.uuid
-                             for node in self.cfg.GetAllNodesInfo().values()
-                             if node.uuid in node_uuids and node.vm_capable]
-
-    (enabled_disk_templates, new_enabled_disk_templates,
-      disabled_disk_templates) = self._GetDiskTemplateSets(cluster)
-    self._CheckInstancesOfDisabledDiskTemplates(disabled_disk_templates)
-
-    self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
-                      new_enabled_disk_templates)
-
-    if self.op.file_storage_dir is not None:
-      CheckFileStoragePathVsEnabledDiskTemplates(
-          self.LogWarning, self.op.file_storage_dir, enabled_disk_templates)
-
-    if self.op.shared_file_storage_dir is not None:
-      CheckSharedFileStoragePathVsEnabledDiskTemplates(
-          self.LogWarning, self.op.shared_file_storage_dir,
-          enabled_disk_templates)
-
-    drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
-    drbd_gets_enabled = constants.DT_DRBD8 in new_enabled_disk_templates
-    self._CheckDrbdHelper(vm_capable_node_uuids,
-                          drbd_enabled, drbd_gets_enabled)
-
-    # validate params changes
-    if self.op.beparams:
-      objects.UpgradeBeParams(self.op.beparams)
-      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
-      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
-
-    if self.op.ndparams:
-      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
-      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
-
-      # TODO: we need a more general way to handle resetting
-      # cluster-level parameters to default values
-      if self.new_ndparams["oob_program"] == "":
-        self.new_ndparams["oob_program"] = \
-            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
-
-    if self.op.hv_state:
-      new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
-                                           self.cluster.hv_state_static)
-      self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
-                               for hv, values in new_hv_state.items())
-
-    if self.op.disk_state:
-      new_disk_state = MergeAndVerifyDiskState(self.op.disk_state,
-                                               self.cluster.disk_state_static)
-      self.new_disk_state = \
-        dict((storage, dict((name, cluster.SimpleFillDiskState(values))
-                            for name, values in svalues.items()))
-             for storage, svalues in new_disk_state.items())
-
-    self._CheckIpolicy(cluster, enabled_disk_templates)
-
-    if self.op.nicparams:
-      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
-      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
-      objects.NIC.CheckParameterSyntax(self.new_nicparams)
-      nic_errors = []
-
-      # check all instances for consistency
-      for instance in self.cfg.GetAllInstancesInfo().values():
-        for nic_idx, nic in enumerate(instance.nics):
-          params_copy = copy.deepcopy(nic.nicparams)
-          params_filled = objects.FillDict(self.new_nicparams, params_copy)
-
-          # check parameter syntax
-          try:
-            objects.NIC.CheckParameterSyntax(params_filled)
-          except errors.ConfigurationError, err:
-            nic_errors.append("Instance %s, nic/%d: %s" %
-                              (instance.name, nic_idx, err))
-
-          # if we're moving instances to routed, check that they have an ip
-          target_mode = params_filled[constants.NIC_MODE]
-          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
-            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
-                              " address" % (instance.name, nic_idx))
-      if nic_errors:
-        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
-                                   "\n".join(nic_errors), errors.ECODE_INVAL)
-
-    # hypervisor list/parameters
-    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
-    if self.op.hvparams:
-      for hv_name, hv_dict in self.op.hvparams.items():
-        if hv_name not in self.new_hvparams:
-          self.new_hvparams[hv_name] = hv_dict
-        else:
-          self.new_hvparams[hv_name].update(hv_dict)
-
-    # disk template parameters
-    self.new_diskparams = objects.FillDict(cluster.diskparams, {})
-    if self.op.diskparams:
-      for dt_name, dt_params in self.op.diskparams.items():
-        if dt_name not in self.new_diskparams:
-          self.new_diskparams[dt_name] = dt_params
-        else:
-          self.new_diskparams[dt_name].update(dt_params)
-      CheckDiskAccessModeConsistency(self.op.diskparams, self.cfg)
-
-    # os hypervisor parameters
-    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
-    if self.op.os_hvp:
-      for os_name, hvs in self.op.os_hvp.items():
-        if os_name not in self.new_os_hvp:
-          self.new_os_hvp[os_name] = hvs
-        else:
-          for hv_name, hv_dict in hvs.items():
-            if hv_dict is None:
-              # Delete if it exists
-              self.new_os_hvp[os_name].pop(hv_name, None)
-            elif hv_name not in self.new_os_hvp[os_name]:
-              self.new_os_hvp[os_name][hv_name] = hv_dict
-            else:
-              self.new_os_hvp[os_name][hv_name].update(hv_dict)
-
-    # os parameters
-    self._BuildOSParams(cluster)
-
-    # changes to the hypervisor list
-    if self.op.enabled_hypervisors is not None:
-      for hv in self.op.enabled_hypervisors:
-        # if the hypervisor doesn't already exist in the cluster
-        # hvparams, we initialize it to empty, and then (in both
-        # cases) we make sure to fill the defaults, as we might not
-        # have a complete defaults list if the hypervisor wasn't
-        # enabled before
-        if hv not in new_hvp:
-          new_hvp[hv] = {}
-        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
-        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
-
-    if self.op.hvparams or self.op.enabled_hypervisors is not None:
-      # either the enabled list has changed, or the parameters have, validate
-      for hv_name, hv_params in self.new_hvparams.items():
-        if ((self.op.hvparams and hv_name in self.op.hvparams) or
-            (self.op.enabled_hypervisors and
-             hv_name in self.op.enabled_hypervisors)):
-          # either this is a new hypervisor, or its parameters have changed
-          hv_class = hypervisor.GetHypervisorClass(hv_name)
-          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
-          hv_class.CheckParameterSyntax(hv_params)
-          CheckHVParams(self, node_uuids, hv_name, hv_params)
-
-    self._CheckDiskTemplateConsistency()
-
-    if self.op.os_hvp:
-      # no need to check any newly-enabled hypervisors, since the
-      # defaults have already been checked in the above code-block
-      for os_name, os_hvp in self.new_os_hvp.items():
-        for hv_name, hv_params in os_hvp.items():
-          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
-          # we need to fill in the new os_hvp on top of the actual hv_p
-          cluster_defaults = self.new_hvparams.get(hv_name, {})
-          new_osp = objects.FillDict(cluster_defaults, hv_params)
-          hv_class = hypervisor.GetHypervisorClass(hv_name)
-          hv_class.CheckParameterSyntax(new_osp)
-          CheckHVParams(self, node_uuids, hv_name, new_osp)
-
-    if self.op.default_iallocator:
-      alloc_script = utils.FindFile(self.op.default_iallocator,
-                                    constants.IALLOCATOR_SEARCH_PATH,
-                                    os.path.isfile)
-      if alloc_script is None:
-        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
-                                   " specified" % self.op.default_iallocator,
-                                   errors.ECODE_INVAL)
-
-    if self.op.instance_communication_network:
-      network_name = self.op.instance_communication_network
-
-      try:
-        network_uuid = self.cfg.LookupNetwork(network_name)
-      except errors.OpPrereqError:
-        network_uuid = None
-
-      if network_uuid is not None:
-        network = self.cfg.GetNetwork(network_uuid)
-        self._CheckInstanceCommunicationNetwork(network, self.LogWarning)
-
-    if self.op.compression_tools:
-      CheckCompressionTools(self.op.compression_tools)
-
-  def _BuildOSParams(self, cluster):
-    "Calculate the new OS parameters for this operation."
-
-    def _GetNewParams(source, new_params):
-      "Wrapper around GetUpdatedParams."
-      if new_params is None:
-        return source
-      result = objects.FillDict(source, {}) # deep copy of source
-      for os_name in new_params:
-        result[os_name] = GetUpdatedParams(result.get(os_name, {}),
-                                           new_params[os_name],
-                                           use_none=True)
-        if not result[os_name]:
-          del result[os_name] # we removed all parameters
-      return result
-
-    self.new_osp = _GetNewParams(cluster.osparams,
-                                 self.op.osparams)
-    self.new_osp_private = _GetNewParams(cluster.osparams_private_cluster,
-                                         self.op.osparams_private_cluster)
-
-    # Remove os validity check
-    changed_oses = (set(self.new_osp.keys()) | set(self.new_osp_private.keys()))
-    for os_name in changed_oses:
-      os_params = cluster.SimpleFillOS(
-        os_name,
-        self.new_osp.get(os_name, {}),
-        os_params_private=self.new_osp_private.get(os_name, {})
-      )
-      # check the parameter validity (remote check)
-      CheckOSParams(self, False, [self.cfg.GetMasterNode()],
-                    os_name, os_params, False)
-
-  def _CheckDiskTemplateConsistency(self):
-    """Check whether the disk templates that are going to be disabled
-       are still in use by some instances.
-
-    """
-    if self.op.enabled_disk_templates:
-      cluster = self.cfg.GetClusterInfo()
-      instances = self.cfg.GetAllInstancesInfo()
-
-      disk_templates_to_remove = set(cluster.enabled_disk_templates) \
-        - set(self.op.enabled_disk_templates)
-      for instance in instances.itervalues():
-        if instance.disk_template in disk_templates_to_remove:
-          raise errors.OpPrereqError("Cannot disable disk template '%s',"
-                                     " because instance '%s' is using it." %
-                                     (instance.disk_template, instance.name))
-
-  def _SetVgName(self, feedback_fn):
-    """Determines and sets the new volume group name.
-
-    """
-    if self.op.vg_name is not None:
-      new_volume = self.op.vg_name
-      if not new_volume:
-        new_volume = None
-      if new_volume != self.cfg.GetVGName():
-        self.cfg.SetVGName(new_volume)
-      else:
-        feedback_fn("Cluster LVM configuration already in desired"
-                    " state, not changing")
-
-  def _SetFileStorageDir(self, feedback_fn):
-    """Set the file storage directory.
-
-    """
-    if self.op.file_storage_dir is not None:
-      if self.cluster.file_storage_dir == self.op.file_storage_dir:
-        feedback_fn("Global file storage dir already set to value '%s'"
-                    % self.cluster.file_storage_dir)
-      else:
-        self.cluster.file_storage_dir = self.op.file_storage_dir
-
-  def _SetSharedFileStorageDir(self, feedback_fn):
-    """Set the shared file storage directory.
-
-    """
-    if self.op.shared_file_storage_dir is not None:
-      if self.cluster.shared_file_storage_dir == \
-          self.op.shared_file_storage_dir:
-        feedback_fn("Global shared file storage dir already set to value '%s'"
-                    % self.cluster.shared_file_storage_dir)
-      else:
-        self.cluster.shared_file_storage_dir = self.op.shared_file_storage_dir
-
-  def _SetDrbdHelper(self, feedback_fn):
-    """Set the DRBD usermode helper.
-
-    """
-    if self.op.drbd_helper is not None:
-      if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
-        feedback_fn("Note that you specified a drbd user helper, but did not"
-                    " enable the drbd disk template.")
-      new_helper = self.op.drbd_helper
-      if not new_helper:
-        new_helper = None
-      if new_helper != self.cfg.GetDRBDHelper():
-        self.cfg.SetDRBDHelper(new_helper)
-      else:
-        feedback_fn("Cluster DRBD helper already in desired state,"
-                    " not changing")
-
-  @staticmethod
-  def _EnsureInstanceCommunicationNetwork(cfg, network_name):
-    """Ensure that the instance communication network exists and is
-    connected to all groups.
-
-    The instance communication network given by L{network_name} it is
-    created, if necessary, via the opcode 'OpNetworkAdd'.  Also, the
-    instance communication network is connected to all existing node
-    groups, if necessary, via the opcode 'OpNetworkConnect'.
-
-    @type cfg: L{config.ConfigWriter}
-    @param cfg: cluster configuration
-
-    @type network_name: string
-    @param network_name: instance communication network name
-
-    @rtype: L{ganeti.cmdlib.ResultWithJobs} or L{None}
-    @return: L{ganeti.cmdlib.ResultWithJobs} if the instance
-             communication needs to be created or it needs to be
-             connected to a group, otherwise L{None}
-
-    """
-    jobs = []
-
-    try:
-      network_uuid = cfg.LookupNetwork(network_name)
-      network_exists = True
-    except errors.OpPrereqError:
-      network_exists = False
-
-    if not network_exists:
-      jobs.append(AddInstanceCommunicationNetworkOp(network_name))
-
-    for group_uuid in cfg.GetNodeGroupList():
-      group = cfg.GetNodeGroup(group_uuid)
-
-      if network_exists:
-        network_connected = network_uuid in group.networks
-      else:
-        # The network was created asynchronously by the previous
-        # opcode and, therefore, we don't have access to its
-        # network_uuid.  As a result, we assume that the network is
-        # not connected to any group yet.
-        network_connected = False
-
-      if not network_connected:
-        op = ConnectInstanceCommunicationNetworkOp(group_uuid, network_name)
-        jobs.append(op)
-
-    if jobs:
-      return ResultWithJobs([jobs])
-    else:
-      return None
-
-  @staticmethod
-  def _ModifyInstanceCommunicationNetwork(cfg, network_name, feedback_fn):
-    """Update the instance communication network stored in the cluster
-    configuration.
-
-    Compares the user-supplied instance communication network against
-    the one stored in the Ganeti cluster configuration.  If there is a
-    change, the instance communication network may be possibly created
-    and connected to all groups (see
-    L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}).
-
-    @type cfg: L{config.ConfigWriter}
-    @param cfg: cluster configuration
-
-    @type network_name: string
-    @param network_name: instance communication network name
-
-    @type feedback_fn: function
-    @param feedback_fn: see L{ganeti.cmdlist.base.LogicalUnit}
-
-    @rtype: L{LUClusterSetParams._EnsureInstanceCommunicationNetwork} or L{None}
-    @return: see L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}
-
-    """
-    config_network_name = cfg.GetInstanceCommunicationNetwork()
-
-    if network_name == config_network_name:
-      feedback_fn("Instance communication network already is '%s', nothing to"
-                  " do." % network_name)
-    else:
-      try:
-        cfg.LookupNetwork(config_network_name)
-        feedback_fn("Previous instance communication network '%s'"
-                    " should be removed manually." % config_network_name)
-      except errors.OpPrereqError:
-        pass
-
-      if network_name:
-        feedback_fn("Changing instance communication network to '%s', only new"
-                    " instances will be affected."
-                    % network_name)
-      else:
-        feedback_fn("Disabling instance communication network, only new"
-                    " instances will be affected.")
-
-      cfg.SetInstanceCommunicationNetwork(network_name)
-
-      if network_name:
-        return LUClusterSetParams._EnsureInstanceCommunicationNetwork(
-          cfg,
-          network_name)
-      else:
-        return None
-
-  def Exec(self, feedback_fn):
-    """Change the parameters of the cluster.
-
-    """
-    # re-read the fresh configuration
-    self.cluster = self.cfg.GetClusterInfo()
-    if self.op.enabled_disk_templates:
-      self.cluster.enabled_disk_templates = \
-        list(self.op.enabled_disk_templates)
-    # save the changes
-    self.cfg.Update(self.cluster, feedback_fn)
-
-    self._SetVgName(feedback_fn)
-
-    self.cluster = self.cfg.GetClusterInfo()
-    self._SetFileStorageDir(feedback_fn)
-    self._SetSharedFileStorageDir(feedback_fn)
-    self.cfg.Update(self.cluster, feedback_fn)
-    self._SetDrbdHelper(feedback_fn)
-
-    # re-read the fresh configuration again
-    self.cluster = self.cfg.GetClusterInfo()
-
-    ensure_kvmd = False
-
-    active = constants.DATA_COLLECTOR_STATE_ACTIVE
-    if self.op.enabled_data_collectors is not None:
-      for name, val in self.op.enabled_data_collectors.items():
-        self.cluster.data_collectors[name][active] = val
-
-    if self.op.data_collector_interval:
-      internal = constants.DATA_COLLECTOR_PARAMETER_INTERVAL
-      for name, val in self.op.data_collector_interval.items():
-        self.cluster.data_collectors[name][internal] = int(val)
-
-    if self.op.hvparams:
-      self.cluster.hvparams = self.new_hvparams
-    if self.op.os_hvp:
-      self.cluster.os_hvp = self.new_os_hvp
-    if self.op.enabled_hypervisors is not None:
-      self.cluster.hvparams = self.new_hvparams
-      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
-      ensure_kvmd = True
-    if self.op.beparams:
-      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
-    if self.op.nicparams:
-      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
-    if self.op.ipolicy:
-      self.cluster.ipolicy = self.new_ipolicy
-    if self.op.osparams:
-      self.cluster.osparams = self.new_osp
-    if self.op.osparams_private_cluster:
-      self.cluster.osparams_private_cluster = self.new_osp_private
-    if self.op.ndparams:
-      self.cluster.ndparams = self.new_ndparams
-    if self.op.diskparams:
-      self.cluster.diskparams = self.new_diskparams
-    if self.op.hv_state:
-      self.cluster.hv_state_static = self.new_hv_state
-    if self.op.disk_state:
-      self.cluster.disk_state_static = self.new_disk_state
-
-    if self.op.candidate_pool_size is not None:
-      self.cluster.candidate_pool_size = self.op.candidate_pool_size
-      # we need to update the pool size here, otherwise the save will fail
-      AdjustCandidatePool(self, [])
-
-    if self.op.max_running_jobs is not None:
-      self.cluster.max_running_jobs = self.op.max_running_jobs
-
-    if self.op.max_tracked_jobs is not None:
-      self.cluster.max_tracked_jobs = self.op.max_tracked_jobs
-
-    if self.op.maintain_node_health is not None:
-      self.cluster.maintain_node_health = self.op.maintain_node_health
-
-    if self.op.modify_etc_hosts is not None:
-      self.cluster.modify_etc_hosts = self.op.modify_etc_hosts
-
-    if self.op.prealloc_wipe_disks is not None:
-      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
-
-    if self.op.add_uids is not None:
-      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
-
-    if self.op.remove_uids is not None:
-      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
-
-    if self.op.uid_pool is not None:
-      self.cluster.uid_pool = self.op.uid_pool
-
-    if self.op.default_iallocator is not None:
-      self.cluster.default_iallocator = self.op.default_iallocator
-
-    if self.op.default_iallocator_params is not None:
-      self.cluster.default_iallocator_params = self.op.default_iallocator_params
-
-    if self.op.reserved_lvs is not None:
-      self.cluster.reserved_lvs = self.op.reserved_lvs
-
-    if self.op.use_external_mip_script is not None:
-      self.cluster.use_external_mip_script = self.op.use_external_mip_script
-
-    if self.op.enabled_user_shutdown is not None and \
-          self.cluster.enabled_user_shutdown != self.op.enabled_user_shutdown:
-      self.cluster.enabled_user_shutdown = self.op.enabled_user_shutdown
-      ensure_kvmd = True
-
-    def helper_os(aname, mods, desc):
-      desc += " OS list"
-      lst = getattr(self.cluster, aname)
-      for key, val in mods:
-        if key == constants.DDM_ADD:
-          if val in lst:
-            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
-          else:
-            lst.append(val)
-        elif key == constants.DDM_REMOVE:
-          if val in lst:
-            lst.remove(val)
-          else:
-            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
-        else:
-          raise errors.ProgrammerError("Invalid modification '%s'" % key)
-
-    if self.op.hidden_os:
-      helper_os("hidden_os", self.op.hidden_os, "hidden")
-
-    if self.op.blacklisted_os:
-      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
-
-    if self.op.mac_prefix:
-      self.cluster.mac_prefix = self.op.mac_prefix
-
-    if self.op.master_netdev:
-      master_params = self.cfg.GetMasterNetworkParameters()
-      ems = self.cfg.GetUseExternalMipScript()
-      feedback_fn("Shutting down master ip on the current netdev (%s)" %
-                  self.cluster.master_netdev)
-      result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
-                                                       master_params, ems)
-      if not self.op.force:
-        result.Raise("Could not disable the master ip")
-      else:
-        if result.fail_msg:
-          msg = ("Could not disable the master ip (continuing anyway): %s" %
-                 result.fail_msg)
-          feedback_fn(msg)
-      feedback_fn("Changing master_netdev from %s to %s" %
-                  (master_params.netdev, self.op.master_netdev))
-      self.cluster.master_netdev = self.op.master_netdev
-
-    if self.op.master_netmask:
-      master_params = self.cfg.GetMasterNetworkParameters()
-      feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
-      result = self.rpc.call_node_change_master_netmask(
-                 master_params.uuid, master_params.netmask,
-                 self.op.master_netmask, master_params.ip,
-                 master_params.netdev)
-      result.Warn("Could not change the master IP netmask", feedback_fn)
-      self.cluster.master_netmask = self.op.master_netmask
-
-    if self.op.install_image:
-      self.cluster.install_image = self.op.install_image
-
-    if self.op.zeroing_image is not None:
-      CheckImageValidity(self.op.zeroing_image,
-                         "Zeroing image must be an absolute path or a URL")
-      self.cluster.zeroing_image = self.op.zeroing_image
-
-    self.cfg.Update(self.cluster, feedback_fn)
-
-    if self.op.master_netdev:
-      master_params = self.cfg.GetMasterNetworkParameters()
-      feedback_fn("Starting the master ip on the new master netdev (%s)" %
-                  self.op.master_netdev)
-      ems = self.cfg.GetUseExternalMipScript()
-      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
-                                                     master_params, ems)
-      result.Warn("Could not re-enable the master ip on the master,"
-                  " please restart manually", self.LogWarning)
-
-    # Even though 'self.op.enabled_user_shutdown' is being tested
-    # above, the RPCs can only be done after 'self.cfg.Update' because
-    # this will update the cluster object and sync 'Ssconf', and kvmd
-    # uses 'Ssconf'.
-    if ensure_kvmd:
-      EnsureKvmdOnNodes(self, feedback_fn)
-
-    if self.op.compression_tools is not None:
-      self.cfg.SetCompressionTools(self.op.compression_tools)
-
-    network_name = self.op.instance_communication_network
-    if network_name is not None:
-      return self._ModifyInstanceCommunicationNetwork(self.cfg,
-                                                      network_name, feedback_fn)
-    else:
-      return None
-
-
-class LUClusterVerify(NoHooksLU):
-  """Submits all jobs necessary to verify the cluster.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.needed_locks = {}
-
-  def Exec(self, feedback_fn):
-    jobs = []
-
-    if self.op.group_name:
-      groups = [self.op.group_name]
-      depends_fn = lambda: None
-    else:
-      groups = self.cfg.GetNodeGroupList()
-
-      # Verify global configuration
-      jobs.append([
-        opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors),
-        ])
-
-      # Always depend on global verification
-      depends_fn = lambda: [(-len(jobs), [])]
-
-    jobs.extend(
-      [opcodes.OpClusterVerifyGroup(group_name=group,
-                                    ignore_errors=self.op.ignore_errors,
-                                    depends=depends_fn(),
-                                    verify_clutter=self.op.verify_clutter)]
-      for group in groups)
-
-    # Fix up all parameters
-    for op in itertools.chain(*jobs): # pylint: disable=W0142
-      op.debug_simulate_errors = self.op.debug_simulate_errors
-      op.verbose = self.op.verbose
-      op.error_codes = self.op.error_codes
-      try:
-        op.skip_checks = self.op.skip_checks
-      except AttributeError:
-        assert not isinstance(op, opcodes.OpClusterVerifyGroup)
-
-    return ResultWithJobs(jobs)
-
-
-class _VerifyErrors(object):
-  """Mix-in for cluster/group verify LUs.
-
-  It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
-  self.op and self._feedback_fn to be available.)
-
-  """
-
-  ETYPE_FIELD = "code"
-  ETYPE_ERROR = constants.CV_ERROR
-  ETYPE_WARNING = constants.CV_WARNING
-
-  def _Error(self, ecode, item, msg, *args, **kwargs):
-    """Format an error message.
-
-    Based on the opcode's error_codes parameter, either format a
-    parseable error code, or a simpler error string.
-
-    This must be called only from Exec and functions called from Exec.
-
-    """
-    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
-    itype, etxt, _ = ecode
-    # If the error code is in the list of ignored errors, demote the error to a
-    # warning
-    if etxt in self.op.ignore_errors:     # pylint: disable=E1101
-      ltype = self.ETYPE_WARNING
-    # first complete the msg
-    if args:
-      msg = msg % args
-    # then format the whole message
-    if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
-      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
-    else:
-      if item:
-        item = " " + item
-      else:
-        item = ""
-      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
-    # and finally report it via the feedback_fn
-    self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable=E1101
-    # do not mark the operation as failed for WARN cases only
-    if ltype == self.ETYPE_ERROR:
-      self.bad = True
-
-  def _ErrorIf(self, cond, *args, **kwargs):
-    """Log an error message if the passed condition is True.
-
-    """
-    if (bool(cond)
-        or self.op.debug_simulate_errors): # pylint: disable=E1101
-      self._Error(*args, **kwargs)
-
-
-def _GetAllHypervisorParameters(cluster, instances):
-  """Compute the set of all hypervisor parameters.
-
-  @type cluster: L{objects.Cluster}
-  @param cluster: the cluster object
-  @param instances: list of L{objects.Instance}
-  @param instances: additional instances from which to obtain parameters
-  @rtype: list of (origin, hypervisor, parameters)
-  @return: a list with all parameters found, indicating the hypervisor they
-       apply to, and the origin (can be "cluster", "os X", or "instance Y")
-
-  """
-  hvp_data = []
-
-  for hv_name in cluster.enabled_hypervisors:
-    hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
-
-  for os_name, os_hvp in cluster.os_hvp.items():
-    for hv_name, hv_params in os_hvp.items():
-      if hv_params:
-        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
-        hvp_data.append(("os %s" % os_name, hv_name, full_params))
-
-  # TODO: collapse identical parameter values in a single one
-  for instance in instances:
-    if instance.hvparams:
-      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
-                       cluster.FillHV(instance)))
-
-  return hvp_data
-
-
-class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
-  """Verifies the cluster config.
-
-  """
-  REQ_BGL = False
-
-  def _VerifyHVP(self, hvp_data):
-    """Verifies locally the syntax of the hypervisor parameters.
-
-    """
-    for item, hv_name, hv_params in hvp_data:
-      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
-             (item, hv_name))
-      try:
-        hv_class = hypervisor.GetHypervisorClass(hv_name)
-        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
-        hv_class.CheckParameterSyntax(hv_params)
-      except errors.GenericError, err:
-        self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
-
-  def ExpandNames(self):
-    self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
-    self.share_locks = ShareAll()
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    """
-    # Retrieve all information
-    self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
-    self.all_node_info = self.cfg.GetAllNodesInfo()
-    self.all_inst_info = self.cfg.GetAllInstancesInfo()
-
-  def Exec(self, feedback_fn):
-    """Verify integrity of cluster, performing various test on nodes.
-
-    """
-    self.bad = False
-    self._feedback_fn = feedback_fn
-
-    feedback_fn("* Verifying cluster config")
-
-    for msg in self.cfg.VerifyConfig():
-      self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg)
-
-    feedback_fn("* Verifying cluster certificate files")
-
-    for cert_filename in pathutils.ALL_CERT_FILES:
-      (errcode, msg) = utils.VerifyCertificate(cert_filename)
-      self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
-
-    self._ErrorIf(not utils.CanRead(constants.LUXID_USER,
-                                    pathutils.NODED_CERT_FILE),
-                  constants.CV_ECLUSTERCERT,
-                  None,
-                  pathutils.NODED_CERT_FILE + " must be accessible by the " +
-                    constants.LUXID_USER + " user")
-
-    feedback_fn("* Verifying hypervisor parameters")
-
-    self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
-                                                self.all_inst_info.values()))
-
-    feedback_fn("* Verifying all nodes belong to an existing group")
-
-    # We do this verification here because, should this bogus circumstance
-    # occur, it would never be caught by VerifyGroup, which only acts on
-    # nodes/instances reachable from existing node groups.
-
-    dangling_nodes = set(node for node in self.all_node_info.values()
-                         if node.group not in self.all_group_info)
-
-    dangling_instances = {}
-    no_node_instances = []
-
-    for inst in self.all_inst_info.values():
-      if inst.primary_node in [node.uuid for node in dangling_nodes]:
-        dangling_instances.setdefault(inst.primary_node, []).append(inst)
-      elif inst.primary_node not in self.all_node_info:
-        no_node_instances.append(inst)
-
-    pretty_dangling = [
-        "%s (%s)" %
-        (node.name,
-         utils.CommaJoin(inst.name for
-                         inst in dangling_instances.get(node.uuid, [])))
-        for node in dangling_nodes]
-
-    self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
-                  None,
-                  "the following nodes (and their instances) belong to a non"
-                  " existing group: %s", utils.CommaJoin(pretty_dangling))
-
-    self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
-                  None,
-                  "the following instances have a non-existing primary-node:"
-                  " %s", utils.CommaJoin(inst.name for
-                                         inst in no_node_instances))
-
-    return not self.bad
-
-
-class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
-  """Verifies the status of a node group.
-
-  """
-  HPATH = "cluster-verify"
-  HTYPE = constants.HTYPE_CLUSTER
-  REQ_BGL = False
-
-  _HOOKS_INDENT_RE = re.compile("^", re.M)
-
-  class NodeImage(object):
-    """A class representing the logical and physical status of a node.
-
-    @type uuid: string
-    @ivar uuid: the node UUID to which this object refers
-    @ivar volumes: a structure as returned from
-        L{ganeti.backend.GetVolumeList} (runtime)
-    @ivar instances: a list of running instances (runtime)
-    @ivar pinst: list of configured primary instances (config)
-    @ivar sinst: list of configured secondary instances (config)
-    @ivar sbp: dictionary of {primary-node: list of instances} for all
-        instances for which this node is secondary (config)
-    @ivar mfree: free memory, as reported by hypervisor (runtime)
-    @ivar dfree: free disk, as reported by the node (runtime)
-    @ivar offline: the offline status (config)
-    @type rpc_fail: boolean
-    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
-        not whether the individual keys were correct) (runtime)
-    @type lvm_fail: boolean
-    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
-    @type hyp_fail: boolean
-    @ivar hyp_fail: whether the RPC call didn't return the instance list
-    @type ghost: boolean
-    @ivar ghost: whether this is a known node or not (config)
-    @type os_fail: boolean
-    @ivar os_fail: whether the RPC call didn't return valid OS data
-    @type oslist: list
-    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
-    @type vm_capable: boolean
-    @ivar vm_capable: whether the node can host instances
-    @type pv_min: float
-    @ivar pv_min: size in MiB of the smallest PVs
-    @type pv_max: float
-    @ivar pv_max: size in MiB of the biggest PVs
-
-    """
-    def __init__(self, offline=False, uuid=None, vm_capable=True):
-      self.uuid = uuid
-      self.volumes = {}
-      self.instances = []
-      self.pinst = []
-      self.sinst = []
-      self.sbp = {}
-      self.mfree = 0
-      self.dfree = 0
-      self.offline = offline
-      self.vm_capable = vm_capable
-      self.rpc_fail = False
-      self.lvm_fail = False
-      self.hyp_fail = False
-      self.ghost = False
-      self.os_fail = False
-      self.oslist = {}
-      self.pv_min = None
-      self.pv_max = None
-
-  def ExpandNames(self):
-    # This raises errors.OpPrereqError on its own:
-    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
-
-    # Get instances in node group; this is unsafe and needs verification later
-    inst_uuids = \
-      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
-
-    self.needed_locks = {
-      locking.LEVEL_INSTANCE: self.cfg.GetInstanceNames(inst_uuids),
-      locking.LEVEL_NODEGROUP: [self.group_uuid],
-      locking.LEVEL_NODE: [],
-
-      # This opcode is run by watcher every five minutes and acquires all nodes
-      # for a group. It doesn't run for a long time, so it's better to acquire
-      # the node allocation lock as well.
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-      }
-
-    self.share_locks = ShareAll()
-
-  def DeclareLocks(self, level):
-    if level == locking.LEVEL_NODE:
-      # Get members of node group; this is unsafe and needs verification later
-      nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
-
-      # In Exec(), we warn about mirrored instances that have primary and
-      # secondary living in separate node groups. To fully verify that
-      # volumes for these instances are healthy, we will need to do an
-      # extra call to their secondaries. We ensure here those nodes will
-      # be locked.
-      for inst_name in self.owned_locks(locking.LEVEL_INSTANCE):
-        # Important: access only the instances whose lock is owned
-        instance = self.cfg.GetInstanceInfoByName(inst_name)
-        if instance.disk_template in constants.DTS_INT_MIRROR:
-          nodes.update(self.cfg.GetInstanceSecondaryNodes(instance.uuid))
-
-      self.needed_locks[locking.LEVEL_NODE] = nodes
-
-  def CheckPrereq(self):
-    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
-    self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
-
-    group_node_uuids = set(self.group_info.members)
-    group_inst_uuids = \
-      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
-
-    unlocked_node_uuids = \
-        group_node_uuids.difference(self.owned_locks(locking.LEVEL_NODE))
-
-    unlocked_inst_uuids = \
-        group_inst_uuids.difference(
-          [self.cfg.GetInstanceInfoByName(name).uuid
-           for name in self.owned_locks(locking.LEVEL_INSTANCE)])
-
-    if unlocked_node_uuids:
-      raise errors.OpPrereqError(
-        "Missing lock for nodes: %s" %
-        utils.CommaJoin(self.cfg.GetNodeNames(unlocked_node_uuids)),
-        errors.ECODE_STATE)
-
-    if unlocked_inst_uuids:
-      raise errors.OpPrereqError(
-        "Missing lock for instances: %s" %
-        utils.CommaJoin(self.cfg.GetInstanceNames(unlocked_inst_uuids)),
-        errors.ECODE_STATE)
-
-    self.all_node_info = self.cfg.GetAllNodesInfo()
-    self.all_inst_info = self.cfg.GetAllInstancesInfo()
-
-    self.my_node_uuids = group_node_uuids
-    self.my_node_info = dict((node_uuid, self.all_node_info[node_uuid])
-                             for node_uuid in group_node_uuids)
-
-    self.my_inst_uuids = group_inst_uuids
-    self.my_inst_info = dict((inst_uuid, self.all_inst_info[inst_uuid])
-                             for inst_uuid in group_inst_uuids)
-
-    # We detect here the nodes that will need the extra RPC calls for verifying
-    # split LV volumes; they should be locked.
-    extra_lv_nodes = set()
-
-    for inst in self.my_inst_info.values():
-      if inst.disk_template in constants.DTS_INT_MIRROR:
-        inst_nodes = self.cfg.GetInstanceNodes(inst.uuid)
-        for nuuid in inst_nodes:
-          if self.all_node_info[nuuid].group != self.group_uuid:
-            extra_lv_nodes.add(nuuid)
-
-    unlocked_lv_nodes = \
-        extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
-
-    if unlocked_lv_nodes:
-      raise errors.OpPrereqError("Missing node locks for LV check: %s" %
-                                 utils.CommaJoin(unlocked_lv_nodes),
-                                 errors.ECODE_STATE)
-    self.extra_lv_nodes = list(extra_lv_nodes)
-
-  def _VerifyNode(self, ninfo, nresult):
-    """Perform some basic validation on data returned from a node.
-
-      - check the result data structure is well formed and has all the
-        mandatory fields
-      - check ganeti version
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the results from the node
-    @rtype: boolean
-    @return: whether overall this call was successful (and we can expect
-         reasonable values in the respose)
-
-    """
-    # main result, nresult should be a non-empty dict
-    test = not nresult or not isinstance(nresult, dict)
-    self._ErrorIf(test, constants.CV_ENODERPC, ninfo.name,
-                  "unable to verify node: no data returned")
-    if test:
-      return False
-
-    # compares ganeti version
-    local_version = constants.PROTOCOL_VERSION
-    remote_version = nresult.get("version", None)
-    test = not (remote_version and
-                isinstance(remote_version, (list, tuple)) and
-                len(remote_version) == 2)
-    self._ErrorIf(test, constants.CV_ENODERPC, ninfo.name,
-                  "connection to node returned invalid data")
-    if test:
-      return False
-
-    test = local_version != remote_version[0]
-    self._ErrorIf(test, constants.CV_ENODEVERSION, ninfo.name,
-                  "incompatible protocol versions: master %s,"
-                  " node %s", local_version, remote_version[0])
-    if test:
-      return False
-
-    # node seems compatible, we can actually try to look into its results
-
-    # full package version
-    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
-                  constants.CV_ENODEVERSION, ninfo.name,
-                  "software version mismatch: master %s, node %s",
-                  constants.RELEASE_VERSION, remote_version[1],
-                  code=self.ETYPE_WARNING)
-
-    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
-    if ninfo.vm_capable and isinstance(hyp_result, dict):
-      for hv_name, hv_result in hyp_result.iteritems():
-        test = hv_result is not None
-        self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
-                      "hypervisor %s verify failure: '%s'", hv_name, hv_result)
-
-    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
-    if ninfo.vm_capable and isinstance(hvp_result, list):
-      for item, hv_name, hv_result in hvp_result:
-        self._ErrorIf(True, constants.CV_ENODEHV, ninfo.name,
-                      "hypervisor %s parameter verify failure (source %s): %s",
-                      hv_name, item, hv_result)
-
-    test = nresult.get(constants.NV_NODESETUP,
-                       ["Missing NODESETUP results"])
-    self._ErrorIf(test, constants.CV_ENODESETUP, ninfo.name,
-                  "node setup error: %s", "; ".join(test))
-
-    return True
-
-  def _VerifyNodeTime(self, ninfo, nresult,
-                      nvinfo_starttime, nvinfo_endtime):
-    """Check the node time.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @param nvinfo_starttime: the start time of the RPC call
-    @param nvinfo_endtime: the end time of the RPC call
-
-    """
-    ntime = nresult.get(constants.NV_TIME, None)
-    try:
-      ntime_merged = utils.MergeTime(ntime)
-    except (ValueError, TypeError):
-      self._ErrorIf(True, constants.CV_ENODETIME, ninfo.name,
-                    "Node returned invalid time")
-      return
-
-    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
-      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
-    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
-      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
-    else:
-      ntime_diff = None
-
-    self._ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, ninfo.name,
-                  "Node time diverges by at least %s from master node time",
-                  ntime_diff)
-
-  def _UpdateVerifyNodeLVM(self, ninfo, nresult, vg_name, nimg):
-    """Check the node LVM results and update info for cross-node checks.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @param vg_name: the configured VG name
-    @type nimg: L{NodeImage}
-    @param nimg: node image
-
-    """
-    if vg_name is None:
-      return
-
-    # checks vg existence and size > 20G
-    vglist = nresult.get(constants.NV_VGLIST, None)
-    test = not vglist
-    self._ErrorIf(test, constants.CV_ENODELVM, ninfo.name,
-                  "unable to check volume groups")
-    if not test:
-      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
-                                            constants.MIN_VG_SIZE)
-      self._ErrorIf(vgstatus, constants.CV_ENODELVM, ninfo.name, vgstatus)
-
-    # Check PVs
-    (errmsgs, pvminmax) = CheckNodePVs(nresult, self._exclusive_storage)
-    for em in errmsgs:
-      self._Error(constants.CV_ENODELVM, ninfo.name, em)
-    if pvminmax is not None:
-      (nimg.pv_min, nimg.pv_max) = pvminmax
-
-  def _VerifyGroupDRBDVersion(self, node_verify_infos):
-    """Check cross-node DRBD version consistency.
-
-    @type node_verify_infos: dict
-    @param node_verify_infos: infos about nodes as returned from the
-      node_verify call.
-
-    """
-    node_versions = {}
-    for node_uuid, ndata in node_verify_infos.items():
-      nresult = ndata.payload
-      if nresult:
-        version = nresult.get(constants.NV_DRBDVERSION, None)
-        if version:
-          node_versions[node_uuid] = version
-
-    if len(set(node_versions.values())) > 1:
-      for node_uuid, version in sorted(node_versions.items()):
-        msg = "DRBD version mismatch: %s" % version
-        self._Error(constants.CV_ENODEDRBDHELPER, node_uuid, msg,
-                    code=self.ETYPE_WARNING)
-
-  def _VerifyGroupLVM(self, node_image, vg_name):
-    """Check cross-node consistency in LVM.
-
-    @type node_image: dict
-    @param node_image: info about nodes, mapping from node to names to
-      L{NodeImage} objects
-    @param vg_name: the configured VG name
-
-    """
-    if vg_name is None:
-      return
-
-    # Only exclusive storage needs this kind of checks
-    if not self._exclusive_storage:
-      return
-
-    # exclusive_storage wants all PVs to have the same size (approximately),
-    # if the smallest and the biggest ones are okay, everything is fine.
-    # pv_min is None iff pv_max is None
-    vals = filter((lambda ni: ni.pv_min is not None), node_image.values())
-    if not vals:
-      return
-    (pvmin, minnode_uuid) = min((ni.pv_min, ni.uuid) for ni in vals)
-    (pvmax, maxnode_uuid) = max((ni.pv_max, ni.uuid) for ni in vals)
-    bad = utils.LvmExclusiveTestBadPvSizes(pvmin, pvmax)
-    self._ErrorIf(bad, constants.CV_EGROUPDIFFERENTPVSIZE, self.group_info.name,
-                  "PV sizes differ too much in the group; smallest (%s MB) is"
-                  " on %s, biggest (%s MB) is on %s",
-                  pvmin, self.cfg.GetNodeName(minnode_uuid),
-                  pvmax, self.cfg.GetNodeName(maxnode_uuid))
-
-  def _VerifyNodeBridges(self, ninfo, nresult, bridges):
-    """Check the node bridges.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @param bridges: the expected list of bridges
-
-    """
-    if not bridges:
-      return
-
-    missing = nresult.get(constants.NV_BRIDGES, None)
-    test = not isinstance(missing, list)
-    self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
-                  "did not return valid bridge information")
-    if not test:
-      self._ErrorIf(bool(missing), constants.CV_ENODENET, ninfo.name,
-                    "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
-
-  def _VerifyNodeUserScripts(self, ninfo, nresult):
-    """Check the results of user scripts presence and executability on the node
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-
-    """
-    test = not constants.NV_USERSCRIPTS in nresult
-    self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, ninfo.name,
-                  "did not return user scripts information")
-
-    broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
-    if not test:
-      self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, ninfo.name,
-                    "user scripts not present or not executable: %s" %
-                    utils.CommaJoin(sorted(broken_scripts)))
-
-  def _VerifyNodeNetwork(self, ninfo, nresult):
-    """Check the node network connectivity results.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-
-    """
-    test = constants.NV_NODELIST not in nresult
-    self._ErrorIf(test, constants.CV_ENODESSH, ninfo.name,
-                  "node hasn't returned node ssh connectivity data")
-    if not test:
-      if nresult[constants.NV_NODELIST]:
-        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
-          self._ErrorIf(True, constants.CV_ENODESSH, ninfo.name,
-                        "ssh communication with node '%s': %s", a_node, a_msg)
-
-    test = constants.NV_NODENETTEST not in nresult
-    self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
-                  "node hasn't returned node tcp connectivity data")
-    if not test:
-      if nresult[constants.NV_NODENETTEST]:
-        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
-        for anode in nlist:
-          self._ErrorIf(True, constants.CV_ENODENET, ninfo.name,
-                        "tcp communication with node '%s': %s",
-                        anode, nresult[constants.NV_NODENETTEST][anode])
-
-    test = constants.NV_MASTERIP not in nresult
-    self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
-                  "node hasn't returned node master IP reachability data")
-    if not test:
-      if not nresult[constants.NV_MASTERIP]:
-        if ninfo.uuid == self.master_node:
-          msg = "the master node cannot reach the master IP (not configured?)"
-        else:
-          msg = "cannot reach the master IP"
-        self._ErrorIf(True, constants.CV_ENODENET, ninfo.name, msg)
-
-  def _VerifyInstance(self, instance, node_image, diskstatus):
-    """Verify an instance.
-
-    This function checks to see if the required block devices are
-    available on the instance's node, and that the nodes are in the correct
-    state.
-
-    """
-    pnode_uuid = instance.primary_node
-    pnode_img = node_image[pnode_uuid]
-    groupinfo = self.cfg.GetAllNodeGroupsInfo()
-
-    node_vol_should = {}
-    self.cfg.GetInstanceLVsByNode(instance.uuid, lvmap=node_vol_should)
-
-    cluster = self.cfg.GetClusterInfo()
-    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
-                                                            self.group_info)
-    err = ComputeIPolicyInstanceViolation(ipolicy, instance, self.cfg)
-    self._ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance.name,
-                  utils.CommaJoin(err), code=self.ETYPE_WARNING)
-
-    for node_uuid in node_vol_should:
-      n_img = node_image[node_uuid]
-      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
-        # ignore missing volumes on offline or broken nodes
-        continue
-      for volume in node_vol_should[node_uuid]:
-        test = volume not in n_img.volumes
-        self._ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance.name,
-                      "volume %s missing on node %s", volume,
-                      self.cfg.GetNodeName(node_uuid))
-
-    if instance.admin_state == constants.ADMINST_UP:
-      test = instance.uuid not in pnode_img.instances and not pnode_img.offline
-      self._ErrorIf(test, constants.CV_EINSTANCEDOWN, instance.name,
-                    "instance not running on its primary node %s",
-                     self.cfg.GetNodeName(pnode_uuid))
-      self._ErrorIf(pnode_img.offline, constants.CV_EINSTANCEBADNODE,
-                    instance.name, "instance is marked as running and lives on"
-                    " offline node %s", self.cfg.GetNodeName(pnode_uuid))
-
-    diskdata = [(nname, success, status, idx)
-                for (nname, disks) in diskstatus.items()
-                for idx, (success, status) in enumerate(disks)]
-
-    for nname, success, bdev_status, idx in diskdata:
-      # the 'ghost node' construction in Exec() ensures that we have a
-      # node here
-      snode = node_image[nname]
-      bad_snode = snode.ghost or snode.offline
-      self._ErrorIf(instance.disks_active and
-                    not success and not bad_snode,
-                    constants.CV_EINSTANCEFAULTYDISK, instance.name,
-                    "couldn't retrieve status for disk/%s on %s: %s",
-                    idx, self.cfg.GetNodeName(nname), bdev_status)
-
-      if instance.disks_active and success and bdev_status.is_degraded:
-        msg = "disk/%s on %s is degraded" % (idx, self.cfg.GetNodeName(nname))
-
-        code = self.ETYPE_ERROR
-        accepted_lds = [constants.LDS_OKAY, constants.LDS_SYNC]
-
-        if bdev_status.ldisk_status in accepted_lds:
-          code = self.ETYPE_WARNING
-
-        msg += "; local disk state is '%s'" % \
-                 constants.LDS_NAMES[bdev_status.ldisk_status]
-
-        self._Error(constants.CV_EINSTANCEFAULTYDISK, instance.name, msg,
-                    code=code)
-
-    self._ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
-                  constants.CV_ENODERPC, self.cfg.GetNodeName(pnode_uuid),
-                  "instance %s, connection to primary node failed",
-                  instance.name)
-
-    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
-    self._ErrorIf(len(secondary_nodes) > 1,
-                  constants.CV_EINSTANCELAYOUT, instance.name,
-                  "instance has multiple secondary nodes: %s",
-                  utils.CommaJoin(secondary_nodes),
-                  code=self.ETYPE_WARNING)
-
-    inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
-    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes)
-    if any(es_flags.values()):
-      if instance.disk_template not in constants.DTS_EXCL_STORAGE:
-        # Disk template not compatible with exclusive_storage: no instance
-        # node should have the flag set
-        es_nodes = [n
-                    for (n, es) in es_flags.items()
-                    if es]
-        self._Error(constants.CV_EINSTANCEUNSUITABLENODE, instance.name,
-                    "instance has template %s, which is not supported on nodes"
-                    " that have exclusive storage set: %s",
-                    instance.disk_template,
-                    utils.CommaJoin(self.cfg.GetNodeNames(es_nodes)))
-      for (idx, disk) in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
-        self._ErrorIf(disk.spindles is None,
-                      constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance.name,
-                      "number of spindles not configured for disk %s while"
-                      " exclusive storage is enabled, try running"
-                      " gnt-cluster repair-disk-sizes", idx)
-
-    if instance.disk_template in constants.DTS_INT_MIRROR:
-      instance_nodes = utils.NiceSort(inst_nodes)
-      instance_groups = {}
-
-      for node_uuid in instance_nodes:
-        instance_groups.setdefault(self.all_node_info[node_uuid].group,
-                                   []).append(node_uuid)
-
-      pretty_list = [
-        "%s (group %s)" % (utils.CommaJoin(self.cfg.GetNodeNames(nodes)),
-                           groupinfo[group].name)
-        # Sort so that we always list the primary node first.
-        for group, nodes in sorted(instance_groups.items(),
-                                   key=lambda (_, nodes): pnode_uuid in nodes,
-                                   reverse=True)]
-
-      self._ErrorIf(len(instance_groups) > 1,
-                    constants.CV_EINSTANCESPLITGROUPS,
-                    instance.name, "instance has primary and secondary nodes in"
-                    " different groups: %s", utils.CommaJoin(pretty_list),
-                    code=self.ETYPE_WARNING)
-
-    inst_nodes_offline = []
-    for snode in secondary_nodes:
-      s_img = node_image[snode]
-      self._ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
-                    self.cfg.GetNodeName(snode),
-                    "instance %s, connection to secondary node failed",
-                    instance.name)
-
-      if s_img.offline:
-        inst_nodes_offline.append(snode)
-
-    # warn that the instance lives on offline nodes
-    self._ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE,
-                  instance.name, "instance has offline secondary node(s) %s",
-                  utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
-    # ... or ghost/non-vm_capable nodes
-    for node_uuid in inst_nodes:
-      self._ErrorIf(node_image[node_uuid].ghost, constants.CV_EINSTANCEBADNODE,
-                    instance.name, "instance lives on ghost node %s",
-                    self.cfg.GetNodeName(node_uuid))
-      self._ErrorIf(not node_image[node_uuid].vm_capable,
-                    constants.CV_EINSTANCEBADNODE, instance.name,
-                    "instance lives on non-vm_capable node %s",
-                    self.cfg.GetNodeName(node_uuid))
-
-  def _VerifyOrphanVolumes(self, vg_name, node_vol_should, node_image,
-                           reserved):
-    """Verify if there are any unknown volumes in the cluster.
-
-    The .os, .swap and backup volumes are ignored. All other volumes are
-    reported as unknown.
-
-    @type vg_name: string
-    @param vg_name: the name of the Ganeti-administered volume group
-    @type reserved: L{ganeti.utils.FieldSet}
-    @param reserved: a FieldSet of reserved volume names
-
-    """
-    for node_uuid, n_img in node_image.items():
-      if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
-          self.all_node_info[node_uuid].group != self.group_uuid):
-        # skip non-healthy nodes
-        continue
-      for volume in n_img.volumes:
-        # skip volumes not belonging to the ganeti-administered volume group
-        if volume.split('/')[0] != vg_name:
-          continue
-
-        test = ((node_uuid not in node_vol_should or
-                volume not in node_vol_should[node_uuid]) and
-                not reserved.Matches(volume))
-        self._ErrorIf(test, constants.CV_ENODEORPHANLV,
-                      self.cfg.GetNodeName(node_uuid),
-                      "volume %s is unknown", volume,
-                      code=_VerifyErrors.ETYPE_WARNING)
-
-  def _VerifyNPlusOneMemory(self, node_image, all_insts):
-    """Verify N+1 Memory Resilience.
-
-    Check that if one single node dies we can still start all the
-    instances it was primary for.
-
-    """
-    cluster_info = self.cfg.GetClusterInfo()
-    for node_uuid, n_img in node_image.items():
-      # This code checks that every node which is now listed as
-      # secondary has enough memory to host all instances it is
-      # supposed to should a single other node in the cluster fail.
-      # FIXME: not ready for failover to an arbitrary node
-      # FIXME: does not support file-backed instances
-      # WARNING: we currently take into account down instances as well
-      # as up ones, considering that even if they're down someone
-      # might want to start them even in the event of a node failure.
-      if n_img.offline or \
-         self.all_node_info[node_uuid].group != self.group_uuid:
-        # we're skipping nodes marked offline and nodes in other groups from
-        # the N+1 warning, since most likely we don't have good memory
-        # information from them; we already list instances living on such
-        # nodes, and that's enough warning
-        continue
-      #TODO(dynmem): also consider ballooning out other instances
-      for prinode, inst_uuids in n_img.sbp.items():
-        needed_mem = 0
-        for inst_uuid in inst_uuids:
-          bep = cluster_info.FillBE(all_insts[inst_uuid])
-          if bep[constants.BE_AUTO_BALANCE]:
-            needed_mem += bep[constants.BE_MINMEM]
-        test = n_img.mfree < needed_mem
-        self._ErrorIf(test, constants.CV_ENODEN1,
-                      self.cfg.GetNodeName(node_uuid),
-                      "not enough memory to accomodate instance failovers"
-                      " should node %s fail (%dMiB needed, %dMiB available)",
-                      self.cfg.GetNodeName(prinode), needed_mem, n_img.mfree)
-
-  def _VerifyClientCertificates(self, nodes, all_nvinfo):
-    """Verifies the consistency of the client certificates.
-
-    This includes several aspects:
-      - the individual validation of all nodes' certificates
-      - the consistency of the master candidate certificate map
-      - the consistency of the master candidate certificate map with the
-        certificates that the master candidates are actually using.
-
-    @param nodes: the list of nodes to consider in this verification
-    @param all_nvinfo: the map of results of the verify_node call to
-      all nodes
-
-    """
-    candidate_certs = self.cfg.GetClusterInfo().candidate_certs
-    if candidate_certs is None or len(candidate_certs) == 0:
-      self._ErrorIf(
-        True, constants.CV_ECLUSTERCLIENTCERT, None,
-        "The cluster's list of master candidate certificates is empty."
-        " If you just updated the cluster, please run"
-        " 'gnt-cluster renew-crypto --new-node-certificates'.")
-      return
-
-    self._ErrorIf(
-      len(candidate_certs) != len(set(candidate_certs.values())),
-      constants.CV_ECLUSTERCLIENTCERT, None,
-      "There are at least two master candidates configured to use the same"
-      " certificate.")
-
-    # collect the client certificate
-    for node in nodes:
-      if node.offline:
-        continue
-
-      nresult = all_nvinfo[node.uuid]
-      if nresult.fail_msg or not nresult.payload:
-        continue
-
-      (errcode, msg) = nresult.payload.get(constants.NV_CLIENT_CERT, None)
-
-      self._ErrorIf(
-        errcode is not None, constants.CV_ECLUSTERCLIENTCERT, None,
-        "Client certificate of node '%s' failed validation: %s (code '%s')",
-        node.uuid, msg, errcode)
-
-      if not errcode:
-        digest = msg
-        if node.master_candidate:
-          if node.uuid in candidate_certs:
-            self._ErrorIf(
-              digest != candidate_certs[node.uuid],
-              constants.CV_ECLUSTERCLIENTCERT, None,
-              "Client certificate digest of master candidate '%s' does not"
-              " match its entry in the cluster's map of master candidate"
-              " certificates. Expected: %s Got: %s", node.uuid,
-              digest, candidate_certs[node.uuid])
-          else:
-            self._ErrorIf(
-              True, constants.CV_ECLUSTERCLIENTCERT, None,
-              "The master candidate '%s' does not have an entry in the"
-              " map of candidate certificates.", node.uuid)
-            self._ErrorIf(
-              digest in candidate_certs.values(),
-              constants.CV_ECLUSTERCLIENTCERT, None,
-              "Master candidate '%s' is using a certificate of another node.",
-              node.uuid)
-        else:
-          self._ErrorIf(
-            node.uuid in candidate_certs,
-            constants.CV_ECLUSTERCLIENTCERT, None,
-            "Node '%s' is not a master candidate, but still listed in the"
-            " map of master candidate certificates.", node.uuid)
-          self._ErrorIf(
-            (node.uuid not in candidate_certs) and
-              (digest in candidate_certs.values()),
-            constants.CV_ECLUSTERCLIENTCERT, None,
-            "Node '%s' is not a master candidate and is incorrectly using a"
-            " certificate of another node which is master candidate.",
-            node.uuid)
-
-  def _VerifySshSetup(self, nodes, all_nvinfo):
-    """Evaluates the verification results of the SSH setup and clutter test.
-
-    @param nodes: List of L{objects.Node} objects
-    @param all_nvinfo: RPC results
-
-    """
-    for node in nodes:
-      if not node.offline:
-        nresult = all_nvinfo[node.uuid]
-        if nresult.fail_msg or not nresult.payload:
-          self._ErrorIf(True, constants.CV_ENODESSH, node.name,
-                        "Could not verify the SSH setup of this node.")
-          return
-        for ssh_test in [constants.NV_SSH_SETUP, constants.NV_SSH_CLUTTER]:
-          result = nresult.payload.get(ssh_test, None)
-          error_msg = ""
-          if isinstance(result, list):
-            error_msg = " ".join(result)
-          self._ErrorIf(result,
-                        constants.CV_ENODESSH, None, error_msg)
-
-  def _VerifyFiles(self, nodes, master_node_uuid, all_nvinfo,
-                   (files_all, files_opt, files_mc, files_vm)):
-    """Verifies file checksums collected from all nodes.
-
-    @param nodes: List of L{objects.Node} objects
-    @param master_node_uuid: UUID of master node
-    @param all_nvinfo: RPC results
-
-    """
-    # Define functions determining which nodes to consider for a file
-    files2nodefn = [
-      (files_all, None),
-      (files_mc, lambda node: (node.master_candidate or
-                               node.uuid == master_node_uuid)),
-      (files_vm, lambda node: node.vm_capable),
-      ]
-
-    # Build mapping from filename to list of nodes which should have the file
-    nodefiles = {}
-    for (files, fn) in files2nodefn:
-      if fn is None:
-        filenodes = nodes
-      else:
-        filenodes = filter(fn, nodes)
-      nodefiles.update((filename,
-                        frozenset(map(operator.attrgetter("uuid"), filenodes)))
-                       for filename in files)
-
-    assert set(nodefiles) == (files_all | files_mc | files_vm)
-
-    fileinfo = dict((filename, {}) for filename in nodefiles)
-    ignore_nodes = set()
-
-    for node in nodes:
-      if node.offline:
-        ignore_nodes.add(node.uuid)
-        continue
-
-      nresult = all_nvinfo[node.uuid]
-
-      if nresult.fail_msg or not nresult.payload:
-        node_files = None
-      else:
-        fingerprints = nresult.payload.get(constants.NV_FILELIST, {})
-        node_files = dict((vcluster.LocalizeVirtualPath(key), value)
-                          for (key, value) in fingerprints.items())
-        del fingerprints
-
-      test = not (node_files and isinstance(node_files, dict))
-      self._ErrorIf(test, constants.CV_ENODEFILECHECK, node.name,
-                    "Node did not return file checksum data")
-      if test:
-        ignore_nodes.add(node.uuid)
-        continue
-
-      # Build per-checksum mapping from filename to nodes having it
-      for (filename, checksum) in node_files.items():
-        assert filename in nodefiles
-        fileinfo[filename].setdefault(checksum, set()).add(node.uuid)
-
-    for (filename, checksums) in fileinfo.items():
-      assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
-
-      # Nodes having the file
-      with_file = frozenset(node_uuid
-                            for node_uuids in fileinfo[filename].values()
-                            for node_uuid in node_uuids) - ignore_nodes
-
-      expected_nodes = nodefiles[filename] - ignore_nodes
-
-      # Nodes missing file
-      missing_file = expected_nodes - with_file
-
-      if filename in files_opt:
-        # All or no nodes
-        self._ErrorIf(missing_file and missing_file != expected_nodes,
-                      constants.CV_ECLUSTERFILECHECK, None,
-                      "File %s is optional, but it must exist on all or no"
-                      " nodes (not found on %s)",
-                      filename,
-                      utils.CommaJoin(
-                        utils.NiceSort(
-                          map(self.cfg.GetNodeName, missing_file))))
-      else:
-        self._ErrorIf(missing_file, constants.CV_ECLUSTERFILECHECK, None,
-                      "File %s is missing from node(s) %s", filename,
-                      utils.CommaJoin(
-                        utils.NiceSort(
-                          map(self.cfg.GetNodeName, missing_file))))
-
-        # Warn if a node has a file it shouldn't
-        unexpected = with_file - expected_nodes
-        self._ErrorIf(unexpected,
-                      constants.CV_ECLUSTERFILECHECK, None,
-                      "File %s should not exist on node(s) %s",
-                      filename, utils.CommaJoin(
-                        utils.NiceSort(map(self.cfg.GetNodeName, unexpected))))
-
-      # See if there are multiple versions of the file
-      test = len(checksums) > 1
-      if test:
-        variants = ["variant %s on %s" %
-                    (idx + 1,
-                     utils.CommaJoin(utils.NiceSort(
-                       map(self.cfg.GetNodeName, node_uuids))))
-                    for (idx, (checksum, node_uuids)) in
-                      enumerate(sorted(checksums.items()))]
-      else:
-        variants = []
-
-      self._ErrorIf(test, constants.CV_ECLUSTERFILECHECK, None,
-                    "File %s found with %s different checksums (%s)",
-                    filename, len(checksums), "; ".join(variants))
-
-  def _VerifyNodeDrbdHelper(self, ninfo, nresult, drbd_helper):
-    """Verify the drbd helper.
-
-    """
-    if drbd_helper:
-      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
-      test = (helper_result is None)
-      self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
-                    "no drbd usermode helper returned")
-      if helper_result:
-        status, payload = helper_result
-        test = not status
-        self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
-                      "drbd usermode helper check unsuccessful: %s", payload)
-        test = status and (payload != drbd_helper)
-        self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
-                      "wrong drbd usermode helper: %s", payload)
-
-  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
-                      drbd_map):
-    """Verifies and the node DRBD status.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @param instanceinfo: the dict of instances
-    @param drbd_helper: the configured DRBD usermode helper
-    @param drbd_map: the DRBD map as returned by
-        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
-
-    """
-    self._VerifyNodeDrbdHelper(ninfo, nresult, drbd_helper)
-
-    # compute the DRBD minors
-    node_drbd = {}
-    for minor, inst_uuid in drbd_map[ninfo.uuid].items():
-      test = inst_uuid not in instanceinfo
-      self._ErrorIf(test, constants.CV_ECLUSTERCFG, None,
-                    "ghost instance '%s' in temporary DRBD map", inst_uuid)
-        # ghost instance should not be running, but otherwise we
-        # don't give double warnings (both ghost instance and
-        # unallocated minor in use)
-      if test:
-        node_drbd[minor] = (inst_uuid, False)
-      else:
-        instance = instanceinfo[inst_uuid]
-        node_drbd[minor] = (inst_uuid, instance.disks_active)
-
-    # and now check them
-    used_minors = nresult.get(constants.NV_DRBDLIST, [])
-    test = not isinstance(used_minors, (tuple, list))
-    self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
-                  "cannot parse drbd status file: %s", str(used_minors))
-    if test:
-      # we cannot check drbd status
-      return
-
-    for minor, (inst_uuid, must_exist) in node_drbd.items():
-      test = minor not in used_minors and must_exist
-      self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
-                    "drbd minor %d of instance %s is not active", minor,
-                    self.cfg.GetInstanceName(inst_uuid))
-    for minor in used_minors:
-      test = minor not in node_drbd
-      self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
-                    "unallocated drbd minor %d is in use", minor)
-
-  def _UpdateNodeOS(self, ninfo, nresult, nimg):
-    """Builds the node OS structures.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @param nimg: the node image object
-
-    """
-    remote_os = nresult.get(constants.NV_OSLIST, None)
-    test = (not isinstance(remote_os, list) or
-            not compat.all(isinstance(v, list) and len(v) == 8
-                           for v in remote_os))
-
-    self._ErrorIf(test, constants.CV_ENODEOS, ninfo.name,
-                  "node hasn't returned valid OS data")
-
-    nimg.os_fail = test
-
-    if test:
-      return
-
-    os_dict = {}
-
-    for (name, os_path, status, diagnose,
-         variants, parameters, api_ver,
-         trusted) in nresult[constants.NV_OSLIST]:
-
-      if name not in os_dict:
-        os_dict[name] = []
-
-      # parameters is a list of lists instead of list of tuples due to
-      # JSON lacking a real tuple type, fix it:
-      parameters = [tuple(v) for v in parameters]
-      os_dict[name].append((os_path, status, diagnose,
-                            set(variants), set(parameters), set(api_ver),
-                            trusted))
-
-    nimg.oslist = os_dict
-
-  def _VerifyNodeOS(self, ninfo, nimg, base):
-    """Verifies the node OS list.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nimg: the node image object
-    @param base: the 'template' node we match against (e.g. from the master)
-
-    """
-    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
-
-    beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
-    for os_name, os_data in nimg.oslist.items():
-      assert os_data, "Empty OS status for OS %s?!" % os_name
-      f_path, f_status, f_diag, f_var, f_param, f_api, f_trusted = os_data[0]
-      self._ErrorIf(not f_status, constants.CV_ENODEOS, ninfo.name,
-                    "Invalid OS %s (located at %s): %s",
-                    os_name, f_path, f_diag)
-      self._ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, ninfo.name,
-                    "OS '%s' has multiple entries"
-                    " (first one shadows the rest): %s",
-                    os_name, utils.CommaJoin([v[0] for v in os_data]))
-      # comparisons with the 'base' image
-      test = os_name not in base.oslist
-      self._ErrorIf(test, constants.CV_ENODEOS, ninfo.name,
-                    "Extra OS %s not present on reference node (%s)",
-                    os_name, self.cfg.GetNodeName(base.uuid))
-      if test:
-        continue
-      assert base.oslist[os_name], "Base node has empty OS status?"
-      _, b_status, _, b_var, b_param, b_api, b_trusted = base.oslist[os_name][0]
-      if not b_status:
-        # base OS is invalid, skipping
-        continue
-      for kind, a, b in [("API version", f_api, b_api),
-                         ("variants list", f_var, b_var),
-                         ("parameters", beautify_params(f_param),
-                          beautify_params(b_param))]:
-        self._ErrorIf(a != b, constants.CV_ENODEOS, ninfo.name,
-                      "OS %s for %s differs from reference node %s:"
-                      " [%s] vs. [%s]", kind, os_name,
-                      self.cfg.GetNodeName(base.uuid),
-                      utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
-      for kind, a, b in [("trusted", f_trusted, b_trusted)]:
-        self._ErrorIf(a != b, constants.CV_ENODEOS, ninfo.name,
-                      "OS %s for %s differs from reference node %s:"
-                      " %s vs. %s", kind, os_name,
-                      self.cfg.GetNodeName(base.uuid), a, b)
-
-    # check any missing OSes
-    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
-    self._ErrorIf(missing, constants.CV_ENODEOS, ninfo.name,
-                  "OSes present on reference node %s"
-                  " but missing on this node: %s",
-                  self.cfg.GetNodeName(base.uuid), utils.CommaJoin(missing))
-
-  def _VerifyAcceptedFileStoragePaths(self, ninfo, nresult, is_master):
-    """Verifies paths in L{pathutils.FILE_STORAGE_PATHS_FILE}.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @type is_master: bool
-    @param is_master: Whether node is the master node
-
-    """
-    cluster = self.cfg.GetClusterInfo()
-    if (is_master and
-        (cluster.IsFileStorageEnabled() or
-         cluster.IsSharedFileStorageEnabled())):
-      try:
-        fspaths = nresult[constants.NV_ACCEPTED_STORAGE_PATHS]
-      except KeyError:
-        # This should never happen
-        self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
-                      "Node did not return forbidden file storage paths")
-      else:
-        self._ErrorIf(fspaths, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
-                      "Found forbidden file storage paths: %s",
-                      utils.CommaJoin(fspaths))
-    else:
-      self._ErrorIf(constants.NV_ACCEPTED_STORAGE_PATHS in nresult,
-                    constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
-                    "Node should not have returned forbidden file storage"
-                    " paths")
-
-  def _VerifyStoragePaths(self, ninfo, nresult, file_disk_template,
-                          verify_key, error_key):
-    """Verifies (file) storage paths.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @type file_disk_template: string
-    @param file_disk_template: file-based disk template, whose directory
-        is supposed to be verified
-    @type verify_key: string
-    @param verify_key: key for the verification map of this file
-        verification step
-    @param error_key: error key to be added to the verification results
-        in case something goes wrong in this verification step
-
-    """
-    assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
-              constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
-           ))
-
-    cluster = self.cfg.GetClusterInfo()
-    if cluster.IsDiskTemplateEnabled(file_disk_template):
-      self._ErrorIf(
-          verify_key in nresult,
-          error_key, ninfo.name,
-          "The configured %s storage path is unusable: %s" %
-          (file_disk_template, nresult.get(verify_key)))
-
-  def _VerifyFileStoragePaths(self, ninfo, nresult):
-    """Verifies (file) storage paths.
-
-    @see: C{_VerifyStoragePaths}
-
-    """
-    self._VerifyStoragePaths(
-        ninfo, nresult, constants.DT_FILE,
-        constants.NV_FILE_STORAGE_PATH,
-        constants.CV_ENODEFILESTORAGEPATHUNUSABLE)
-
-  def _VerifySharedFileStoragePaths(self, ninfo, nresult):
-    """Verifies (file) storage paths.
-
-    @see: C{_VerifyStoragePaths}
-
-    """
-    self._VerifyStoragePaths(
-        ninfo, nresult, constants.DT_SHARED_FILE,
-        constants.NV_SHARED_FILE_STORAGE_PATH,
-        constants.CV_ENODESHAREDFILESTORAGEPATHUNUSABLE)
-
-  def _VerifyGlusterStoragePaths(self, ninfo, nresult):
-    """Verifies (file) storage paths.
-
-    @see: C{_VerifyStoragePaths}
-
-    """
-    self._VerifyStoragePaths(
-        ninfo, nresult, constants.DT_GLUSTER,
-        constants.NV_GLUSTER_STORAGE_PATH,
-        constants.CV_ENODEGLUSTERSTORAGEPATHUNUSABLE)
-
-  def _VerifyOob(self, ninfo, nresult):
-    """Verifies out of band functionality of a node.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-
-    """
-    # We just have to verify the paths on master and/or master candidates
-    # as the oob helper is invoked on the master
-    if ((ninfo.master_candidate or ninfo.master_capable) and
-        constants.NV_OOB_PATHS in nresult):
-      for path_result in nresult[constants.NV_OOB_PATHS]:
-        self._ErrorIf(path_result, constants.CV_ENODEOOBPATH,
-                      ninfo.name, path_result)
-
-  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
-    """Verifies and updates the node volume data.
-
-    This function will update a L{NodeImage}'s internal structures
-    with data from the remote call.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @param nimg: the node image object
-    @param vg_name: the configured VG name
-
-    """
-    nimg.lvm_fail = True
-    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
-    if vg_name is None:
-      pass
-    elif isinstance(lvdata, basestring):
-      self._ErrorIf(True, constants.CV_ENODELVM, ninfo.name,
-                    "LVM problem on node: %s", utils.SafeEncode(lvdata))
-    elif not isinstance(lvdata, dict):
-      self._ErrorIf(True, constants.CV_ENODELVM, ninfo.name,
-                    "rpc call to node failed (lvlist)")
-    else:
-      nimg.volumes = lvdata
-      nimg.lvm_fail = False
-
-  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
-    """Verifies and updates the node instance list.
-
-    If the listing was successful, then updates this node's instance
-    list. Otherwise, it marks the RPC call as failed for the instance
-    list key.
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @param nimg: the node image object
-
-    """
-    idata = nresult.get(constants.NV_INSTANCELIST, None)
-    test = not isinstance(idata, list)
-    self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
-                  "rpc call to node failed (instancelist): %s",
-                  utils.SafeEncode(str(idata)))
-    if test:
-      nimg.hyp_fail = True
-    else:
-      nimg.instances = [uuid for (uuid, _) in
-                        self.cfg.GetMultiInstanceInfoByName(idata)]
-
-  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
-    """Verifies and computes a node information map
-
-    @type ninfo: L{objects.Node}
-    @param ninfo: the node to check
-    @param nresult: the remote results for the node
-    @param nimg: the node image object
-    @param vg_name: the configured VG name
-
-    """
-    # try to read free memory (from the hypervisor)
-    hv_info = nresult.get(constants.NV_HVINFO, None)
-    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
-    self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
-                  "rpc call to node failed (hvinfo)")
-    if not test:
-      try:
-        nimg.mfree = int(hv_info["memory_free"])
-      except (ValueError, TypeError):
-        self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
-                      "node returned invalid nodeinfo, check hypervisor")
-
-    # FIXME: devise a free space model for file based instances as well
-    if vg_name is not None:
-      test = (constants.NV_VGLIST not in nresult or
-              vg_name not in nresult[constants.NV_VGLIST])
-      self._ErrorIf(test, constants.CV_ENODELVM, ninfo.name,
-                    "node didn't return data for the volume group '%s'"
-                    " - it is either missing or broken", vg_name)
-      if not test:
-        try:
-          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
-        except (ValueError, TypeError):
-          self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
-                        "node returned invalid LVM info, check LVM status")
-
-  def _CollectDiskInfo(self, node_uuids, node_image, instanceinfo):
-    """Gets per-disk status information for all instances.
-
-    @type node_uuids: list of strings
-    @param node_uuids: Node UUIDs
-    @type node_image: dict of (UUID, L{objects.Node})
-    @param node_image: Node objects
-    @type instanceinfo: dict of (UUID, L{objects.Instance})
-    @param instanceinfo: Instance objects
-    @rtype: {instance: {node: [(succes, payload)]}}
-    @return: a dictionary of per-instance dictionaries with nodes as
-        keys and disk information as values; the disk information is a
-        list of tuples (success, payload)
-
-    """
-    node_disks = {}
-    node_disks_dev_inst_only = {}
-    diskless_instances = set()
-    nodisk_instances = set()
-    diskless = constants.DT_DISKLESS
-
-    for nuuid in node_uuids:
-      node_inst_uuids = list(itertools.chain(node_image[nuuid].pinst,
-                                             node_image[nuuid].sinst))
-      diskless_instances.update(uuid for uuid in node_inst_uuids
-                                if instanceinfo[uuid].disk_template == diskless)
-      disks = [(inst_uuid, disk)
-               for inst_uuid in node_inst_uuids
-               for disk in self.cfg.GetInstanceDisks(inst_uuid)]
-
-      if not disks:
-        nodisk_instances.update(uuid for uuid in node_inst_uuids
-                                if instanceinfo[uuid].disk_template != diskless)
-        # No need to collect data
-        continue
-
-      node_disks[nuuid] = disks
-
-      # _AnnotateDiskParams makes already copies of the disks
-      dev_inst_only = []
-      for (inst_uuid, dev) in disks:
-        (anno_disk,) = AnnotateDiskParams(instanceinfo[inst_uuid], [dev],
-                                          self.cfg)
-        dev_inst_only.append((anno_disk, instanceinfo[inst_uuid]))
-
-      node_disks_dev_inst_only[nuuid] = dev_inst_only
-
-    assert len(node_disks) == len(node_disks_dev_inst_only)
-
-    # Collect data from all nodes with disks
-    result = self.rpc.call_blockdev_getmirrorstatus_multi(
-               node_disks.keys(), node_disks_dev_inst_only)
-
-    assert len(result) == len(node_disks)
-
-    instdisk = {}
-
-    for (nuuid, nres) in result.items():
-      node = self.cfg.GetNodeInfo(nuuid)
-      disks = node_disks[node.uuid]
-
-      if nres.offline:
-        # No data from this node
-        data = len(disks) * [(False, "node offline")]
-      else:
-        msg = nres.fail_msg
-        self._ErrorIf(msg, constants.CV_ENODERPC, node.name,
-                      "while getting disk information: %s", msg)
-        if msg:
-          # No data from this node
-          data = len(disks) * [(False, msg)]
-        else:
-          data = []
-          for idx, i in enumerate(nres.payload):
-            if isinstance(i, (tuple, list)) and len(i) == 2:
-              data.append(i)
-            else:
-              logging.warning("Invalid result from node %s, entry %d: %s",
-                              node.name, idx, i)
-              data.append((False, "Invalid result from the remote node"))
-
-      for ((inst_uuid, _), status) in zip(disks, data):
-        instdisk.setdefault(inst_uuid, {}).setdefault(node.uuid, []) \
-          .append(status)
-
-    # Add empty entries for diskless instances.
-    for inst_uuid in diskless_instances:
-      assert inst_uuid not in instdisk
-      instdisk[inst_uuid] = {}
-    # ...and disk-full instances that happen to have no disks
-    for inst_uuid in nodisk_instances:
-      assert inst_uuid not in instdisk
-      instdisk[inst_uuid] = {}
-
-    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
-                      len(nuuids) <= len(
-                        self.cfg.GetInstanceNodes(instanceinfo[inst].uuid)) and
-                      compat.all(isinstance(s, (tuple, list)) and
-                                 len(s) == 2 for s in statuses)
-                      for inst, nuuids in instdisk.items()
-                      for nuuid, statuses in nuuids.items())
-    if __debug__:
-      instdisk_keys = set(instdisk)
-      instanceinfo_keys = set(instanceinfo)
-      assert instdisk_keys == instanceinfo_keys, \
-        ("instdisk keys (%s) do not match instanceinfo keys (%s)" %
-         (instdisk_keys, instanceinfo_keys))
-
-    return instdisk
-
-  @staticmethod
-  def _SshNodeSelector(group_uuid, all_nodes):
-    """Create endless iterators for all potential SSH check hosts.
-
-    """
-    nodes = [node for node in all_nodes
-             if (node.group != group_uuid and
-                 not node.offline)]
-    keyfunc = operator.attrgetter("group")
-
-    return map(itertools.cycle,
-               [sorted(map(operator.attrgetter("name"), names))
-                for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
-                                                  keyfunc)])
-
-  @classmethod
-  def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
-    """Choose which nodes should talk to which other nodes.
-
-    We will make nodes contact all nodes in their group, and one node from
-    every other group.
-
-    @rtype: tuple of (string, dict of strings to list of strings, string)
-    @return: a tuple containing the list of all online nodes, a dictionary
-      mapping node names to additional nodes of other node groups to which
-      connectivity should be tested, and a list of all online master
-      candidates
-
-    @warning: This algorithm has a known issue if one node group is much
-      smaller than others (e.g. just one node). In such a case all other
-      nodes will talk to the single node.
-
-    """
-    online_nodes = sorted(node.name for node in group_nodes if not node.offline)
-    online_mcs = sorted(node.name for node in group_nodes
-                        if (node.master_candidate and not node.offline))
-    sel = cls._SshNodeSelector(group_uuid, all_nodes)
-
-    return (online_nodes,
-            dict((name, sorted([i.next() for i in sel]))
-                 for name in online_nodes),
-            online_mcs)
-
-  def _PrepareSshSetupCheck(self):
-    """Prepare the input data for the SSH setup verification.
-
-    """
-    all_nodes_info = self.cfg.GetAllNodesInfo()
-    potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
-    node_status = [
-      (uuid, node_info.name, node_info.master_candidate,
-       node_info.name in potential_master_candidates, not node_info.offline)
-      for (uuid, node_info) in all_nodes_info.items()]
-    return node_status
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    Cluster-Verify hooks just ran in the post phase and their failure makes
-    the output be logged in the verify output and the verification to fail.
-
-    """
-    env = {
-      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags()),
-      }
-
-    env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
-               for node in self.my_node_info.values())
-
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    return ([], list(self.my_node_info.keys()))
-
-  @staticmethod
-  def _VerifyOtherNotes(feedback_fn, i_non_redundant, i_non_a_balanced,
-                        i_offline, n_offline, n_drained):
-    feedback_fn("* Other Notes")
-    if i_non_redundant:
-      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
-                  % len(i_non_redundant))
-
-    if i_non_a_balanced:
-      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
-                  % len(i_non_a_balanced))
-
-    if i_offline:
-      feedback_fn("  - NOTICE: %d offline instance(s) found." % i_offline)
-
-    if n_offline:
-      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
-
-    if n_drained:
-      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
-
-  def Exec(self, feedback_fn): # pylint: disable=R0915
-    """Verify integrity of the node group, performing various test on nodes.
-
-    """
-    # This method has too many local variables. pylint: disable=R0914
-    feedback_fn("* Verifying group '%s'" % self.group_info.name)
-
-    if not self.my_node_uuids:
-      # empty node group
-      feedback_fn("* Empty node group, skipping verification")
-      return True
-
-    self.bad = False
-    verbose = self.op.verbose
-    self._feedback_fn = feedback_fn
-
-    vg_name = self.cfg.GetVGName()
-    drbd_helper = self.cfg.GetDRBDHelper()
-    cluster = self.cfg.GetClusterInfo()
-    hypervisors = cluster.enabled_hypervisors
-    node_data_list = self.my_node_info.values()
-
-    i_non_redundant = [] # Non redundant instances
-    i_non_a_balanced = [] # Non auto-balanced instances
-    i_offline = 0 # Count of offline instances
-    n_offline = 0 # Count of offline nodes
-    n_drained = 0 # Count of nodes being drained
-    node_vol_should = {}
-
-    # FIXME: verify OS list
-
-    # File verification
-    filemap = ComputeAncillaryFiles(cluster, False)
-
-    # do local checksums
-    master_node_uuid = self.master_node = self.cfg.GetMasterNode()
-    master_ip = self.cfg.GetMasterIP()
-
-    feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_uuids))
-
-    user_scripts = []
-    if self.cfg.GetUseExternalMipScript():
-      user_scripts.append(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
-
-    node_verify_param = {
-      constants.NV_FILELIST:
-        map(vcluster.MakeVirtualPath,
-            utils.UniqueSequence(filename
-                                 for files in filemap
-                                 for filename in files)),
-      constants.NV_NODELIST:
-        self._SelectSshCheckNodes(node_data_list, self.group_uuid,
-                                  self.all_node_info.values()),
-      constants.NV_HYPERVISOR: hypervisors,
-      constants.NV_HVPARAMS:
-        _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
-      constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
-                                 for node in node_data_list
-                                 if not node.offline],
-      constants.NV_INSTANCELIST: hypervisors,
-      constants.NV_VERSION: None,
-      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
-      constants.NV_NODESETUP: None,
-      constants.NV_TIME: None,
-      constants.NV_MASTERIP: (self.cfg.GetMasterNodeName(), master_ip),
-      constants.NV_OSLIST: None,
-      constants.NV_NONVMNODES: self.cfg.GetNonVmCapableNodeNameList(),
-      constants.NV_USERSCRIPTS: user_scripts,
-      constants.NV_CLIENT_CERT: None,
-      }
-
-    if self.cfg.GetClusterInfo().modify_ssh_setup:
-      node_verify_param[constants.NV_SSH_SETUP] = self._PrepareSshSetupCheck()
-      if self.op.verify_clutter:
-        node_verify_param[constants.NV_SSH_CLUTTER] = True
-
-    if vg_name is not None:
-      node_verify_param[constants.NV_VGLIST] = None
-      node_verify_param[constants.NV_LVLIST] = vg_name
-      node_verify_param[constants.NV_PVLIST] = [vg_name]
-
-    if cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
-      if drbd_helper:
-        node_verify_param[constants.NV_DRBDVERSION] = None
-        node_verify_param[constants.NV_DRBDLIST] = None
-        node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
-
-    if cluster.IsFileStorageEnabled() or \
-        cluster.IsSharedFileStorageEnabled():
-      # Load file storage paths only from master node
-      node_verify_param[constants.NV_ACCEPTED_STORAGE_PATHS] = \
-        self.cfg.GetMasterNodeName()
-      if cluster.IsFileStorageEnabled():
-        node_verify_param[constants.NV_FILE_STORAGE_PATH] = \
-          cluster.file_storage_dir
-      if cluster.IsSharedFileStorageEnabled():
-        node_verify_param[constants.NV_SHARED_FILE_STORAGE_PATH] = \
-          cluster.shared_file_storage_dir
-
-    # bridge checks
-    # FIXME: this needs to be changed per node-group, not cluster-wide
-    bridges = set()
-    default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
-    if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
-      bridges.add(default_nicpp[constants.NIC_LINK])
-    for inst_uuid in self.my_inst_info.values():
-      for nic in inst_uuid.nics:
-        full_nic = cluster.SimpleFillNIC(nic.nicparams)
-        if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
-          bridges.add(full_nic[constants.NIC_LINK])
-
-    if bridges:
-      node_verify_param[constants.NV_BRIDGES] = list(bridges)
-
-    # Build our expected cluster state
-    node_image = dict((node.uuid, self.NodeImage(offline=node.offline,
-                                                 uuid=node.uuid,
-                                                 vm_capable=node.vm_capable))
-                      for node in node_data_list)
-
-    # Gather OOB paths
-    oob_paths = []
-    for node in self.all_node_info.values():
-      path = SupportsOob(self.cfg, node)
-      if path and path not in oob_paths:
-        oob_paths.append(path)
-
-    if oob_paths:
-      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
-
-    for inst_uuid in self.my_inst_uuids:
-      instance = self.my_inst_info[inst_uuid]
-      if instance.admin_state == constants.ADMINST_OFFLINE:
-        i_offline += 1
-
-      inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
-      for nuuid in inst_nodes:
-        if nuuid not in node_image:
-          gnode = self.NodeImage(uuid=nuuid)
-          gnode.ghost = (nuuid not in self.all_node_info)
-          node_image[nuuid] = gnode
-
-      self.cfg.GetInstanceLVsByNode(instance.uuid, lvmap=node_vol_should)
-
-      pnode = instance.primary_node
-      node_image[pnode].pinst.append(instance.uuid)
-
-      for snode in self.cfg.GetInstanceSecondaryNodes(instance.uuid):
-        nimg = node_image[snode]
-        nimg.sinst.append(instance.uuid)
-        if pnode not in nimg.sbp:
-          nimg.sbp[pnode] = []
-        nimg.sbp[pnode].append(instance.uuid)
-
-    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
-                                               self.my_node_info.keys())
-    # The value of exclusive_storage should be the same across the group, so if
-    # it's True for at least a node, we act as if it were set for all the nodes
-    self._exclusive_storage = compat.any(es_flags.values())
-    if self._exclusive_storage:
-      node_verify_param[constants.NV_EXCLUSIVEPVS] = True
-
-    node_group_uuids = dict(map(lambda n: (n.name, n.group),
-                                self.cfg.GetAllNodesInfo().values()))
-    groups_config = self.cfg.GetAllNodeGroupsInfoDict()
-
-    # At this point, we have the in-memory data structures complete,
-    # except for the runtime information, which we'll gather next
-
-    # NOTE: Here we lock the configuration for the duration of RPC calls,
-    # which means that the cluster configuration changes are blocked during
-    # this period.
-    # This is something that should be done only exceptionally and only for
-    # justified cases!
-    # In this case, we need the lock as we can only verify the integrity of
-    # configuration files on MCs only if we know nobody else is modifying it.
-    # FIXME: The check for integrity of config.data should be moved to
-    # WConfD, which is the only one who can otherwise ensure nobody
-    # will modify the configuration during the check.
-    with self.cfg.GetConfigManager(shared=True):
-      feedback_fn("* Gathering information about nodes (%s nodes)" %
-                  len(self.my_node_uuids))
-      # Force the configuration to be fully distributed before doing any tests
-      self.cfg.FlushConfig()
-      # Due to the way our RPC system works, exact response times cannot be
-      # guaranteed (e.g. a broken node could run into a timeout). By keeping
-      # the time before and after executing the request, we can at least have
-      # a time window.
-      nvinfo_starttime = time.time()
-      # Get lock on the configuration so that nobody modifies it concurrently.
-      # Otherwise it can be modified by other jobs, failing the consistency
-      # test.
-      # NOTE: This is an exceptional situation, we should otherwise avoid
-      # locking the configuration for something but very fast, pure operations.
-      cluster_name = self.cfg.GetClusterName()
-      hvparams = self.cfg.GetClusterInfo().hvparams
-      all_nvinfo = self.rpc.call_node_verify(self.my_node_uuids,
-                                             node_verify_param,
-                                             cluster_name,
-                                             hvparams,
-                                             node_group_uuids,
-                                             groups_config)
-      nvinfo_endtime = time.time()
-
-      if self.extra_lv_nodes and vg_name is not None:
-        feedback_fn("* Gathering information about extra nodes (%s nodes)" %
-                    len(self.extra_lv_nodes))
-        extra_lv_nvinfo = \
-            self.rpc.call_node_verify(self.extra_lv_nodes,
-                                      {constants.NV_LVLIST: vg_name},
-                                      self.cfg.GetClusterName(),
-                                      self.cfg.GetClusterInfo().hvparams,
-                                      node_group_uuids,
-                                      groups_config)
-      else:
-        extra_lv_nvinfo = {}
-
-      # If not all nodes are being checked, we need to make sure the master
-      # node and a non-checked vm_capable node are in the list.
-      absent_node_uuids = set(self.all_node_info).difference(self.my_node_info)
-      if absent_node_uuids:
-        vf_nvinfo = all_nvinfo.copy()
-        vf_node_info = list(self.my_node_info.values())
-        additional_node_uuids = []
-        if master_node_uuid not in self.my_node_info:
-          additional_node_uuids.append(master_node_uuid)
-          vf_node_info.append(self.all_node_info[master_node_uuid])
-        # Add the first vm_capable node we find which is not included,
-        # excluding the master node (which we already have)
-        for node_uuid in absent_node_uuids:
-          nodeinfo = self.all_node_info[node_uuid]
-          if (nodeinfo.vm_capable and not nodeinfo.offline and
-              node_uuid != master_node_uuid):
-            additional_node_uuids.append(node_uuid)
-            vf_node_info.append(self.all_node_info[node_uuid])
-            break
-        key = constants.NV_FILELIST
-
-        feedback_fn("* Gathering information about the master node")
-        vf_nvinfo.update(self.rpc.call_node_verify(
-           additional_node_uuids, {key: node_verify_param[key]},
-           self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams,
-           node_group_uuids,
-           groups_config))
-      else:
-        vf_nvinfo = all_nvinfo
-        vf_node_info = self.my_node_info.values()
-
-    all_drbd_map = self.cfg.ComputeDRBDMap()
-
-    feedback_fn("* Gathering disk information (%s nodes)" %
-                len(self.my_node_uuids))
-    instdisk = self._CollectDiskInfo(self.my_node_info.keys(), node_image,
-                                     self.my_inst_info)
-
-    feedback_fn("* Verifying configuration file consistency")
-
-    self._VerifyClientCertificates(self.my_node_info.values(), all_nvinfo)
-    if self.cfg.GetClusterInfo().modify_ssh_setup:
-      self._VerifySshSetup(self.my_node_info.values(), all_nvinfo)
-    self._VerifyFiles(vf_node_info, master_node_uuid, vf_nvinfo, filemap)
-
-    feedback_fn("* Verifying node status")
-
-    refos_img = None
-
-    for node_i in node_data_list:
-      nimg = node_image[node_i.uuid]
-
-      if node_i.offline:
-        if verbose:
-          feedback_fn("* Skipping offline node %s" % (node_i.name,))
-        n_offline += 1
-        continue
-
-      if node_i.uuid == master_node_uuid:
-        ntype = "master"
-      elif node_i.master_candidate:
-        ntype = "master candidate"
-      elif node_i.drained:
-        ntype = "drained"
-        n_drained += 1
-      else:
-        ntype = "regular"
-      if verbose:
-        feedback_fn("* Verifying node %s (%s)" % (node_i.name, ntype))
-
-      msg = all_nvinfo[node_i.uuid].fail_msg
-      self._ErrorIf(msg, constants.CV_ENODERPC, node_i.name,
-                    "while contacting node: %s", msg)
-      if msg:
-        nimg.rpc_fail = True
-        continue
-
-      nresult = all_nvinfo[node_i.uuid].payload
-
-      nimg.call_ok = self._VerifyNode(node_i, nresult)
-      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
-      self._VerifyNodeNetwork(node_i, nresult)
-      self._VerifyNodeUserScripts(node_i, nresult)
-      self._VerifyOob(node_i, nresult)
-      self._VerifyAcceptedFileStoragePaths(node_i, nresult,
-                                           node_i.uuid == master_node_uuid)
-      self._VerifyFileStoragePaths(node_i, nresult)
-      self._VerifySharedFileStoragePaths(node_i, nresult)
-      self._VerifyGlusterStoragePaths(node_i, nresult)
-
-      if nimg.vm_capable:
-        self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)
-        if constants.DT_DRBD8 in cluster.enabled_disk_templates:
-          self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
-                               all_drbd_map)
-
-        if (constants.DT_PLAIN in cluster.enabled_disk_templates) or \
-            (constants.DT_DRBD8 in cluster.enabled_disk_templates):
-          self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
-        self._UpdateNodeInstances(node_i, nresult, nimg)
-        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
-        self._UpdateNodeOS(node_i, nresult, nimg)
-
-        if not nimg.os_fail:
-          if refos_img is None:
-            refos_img = nimg
-          self._VerifyNodeOS(node_i, nimg, refos_img)
-        self._VerifyNodeBridges(node_i, nresult, bridges)
-
-        # Check whether all running instances are primary for the node. (This
-        # can no longer be done from _VerifyInstance below, since some of the
-        # wrong instances could be from other node groups.)
-        non_primary_inst_uuids = set(nimg.instances).difference(nimg.pinst)
-
-        for inst_uuid in non_primary_inst_uuids:
-          test = inst_uuid in self.all_inst_info
-          self._ErrorIf(test, constants.CV_EINSTANCEWRONGNODE,
-                        self.cfg.GetInstanceName(inst_uuid),
-                        "instance should not run on node %s", node_i.name)
-          self._ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
-                        "node is running unknown instance %s", inst_uuid)
-
-    self._VerifyGroupDRBDVersion(all_nvinfo)
-    self._VerifyGroupLVM(node_image, vg_name)
-
-    for node_uuid, result in extra_lv_nvinfo.items():
-      self._UpdateNodeVolumes(self.all_node_info[node_uuid], result.payload,
-                              node_image[node_uuid], vg_name)
-
-    feedback_fn("* Verifying instance status")
-    for inst_uuid in self.my_inst_uuids:
-      instance = self.my_inst_info[inst_uuid]
-      if verbose:
-        feedback_fn("* Verifying instance %s" % instance.name)
-      self._VerifyInstance(instance, node_image, instdisk[inst_uuid])
-
-      # If the instance is non-redundant we cannot survive losing its primary
-      # node, so we are not N+1 compliant.
-      if instance.disk_template not in constants.DTS_MIRRORED:
-        i_non_redundant.append(instance)
-
-      if not cluster.FillBE(instance)[constants.BE_AUTO_BALANCE]:
-        i_non_a_balanced.append(instance)
-
-    feedback_fn("* Verifying orphan volumes")
-    reserved = utils.FieldSet(*cluster.reserved_lvs)
-
-    # We will get spurious "unknown volume" warnings if any node of this group
-    # is secondary for an instance whose primary is in another group. To avoid
-    # them, we find these instances and add their volumes to node_vol_should.
-    for instance in self.all_inst_info.values():
-      for secondary in self.cfg.GetInstanceSecondaryNodes(instance.uuid):
-        if (secondary in self.my_node_info
-            and instance.name not in self.my_inst_info):
-          self.cfg.GetInstanceLVsByNode(instance.uuid, lvmap=node_vol_should)
-          break
-
-    self._VerifyOrphanVolumes(vg_name, node_vol_should, node_image, reserved)
-
-    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
-      feedback_fn("* Verifying N+1 Memory redundancy")
-      self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
-
-    self._VerifyOtherNotes(feedback_fn, i_non_redundant, i_non_a_balanced,
-                           i_offline, n_offline, n_drained)
-
-    return not self.bad
-
-  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
-    """Analyze the post-hooks' result
-
-    This method analyses the hook result, handles it, and sends some
-    nicely-formatted feedback back to the user.
-
-    @param phase: one of L{constants.HOOKS_PHASE_POST} or
-        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
-    @param hooks_results: the results of the multi-node hooks rpc call
-    @param feedback_fn: function used send feedback back to the caller
-    @param lu_result: previous Exec result
-    @return: the new Exec result, based on the previous result
-        and hook results
-
-    """
-    # We only really run POST phase hooks, only for non-empty groups,
-    # and are only interested in their results
-    if not self.my_node_uuids:
-      # empty node group
-      pass
-    elif phase == constants.HOOKS_PHASE_POST:
-      # Used to change hooks' output to proper indentation
-      feedback_fn("* Hooks Results")
-      assert hooks_results, "invalid result from hooks"
-
-      for node_name in hooks_results:
-        res = hooks_results[node_name]
-        msg = res.fail_msg
-        test = msg and not res.offline
-        self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
-                      "Communication failure in hooks execution: %s", msg)
-        if test:
-          lu_result = False
-          continue
-        if res.offline:
-          # No need to investigate payload if node is offline
-          continue
-        for script, hkr, output in res.payload:
-          test = hkr == constants.HKR_FAIL
-          self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
-                        "Script %s failed, output:", script)
-          if test:
-            output = self._HOOKS_INDENT_RE.sub("      ", output)
-            feedback_fn("%s" % output)
-            lu_result = False
-
-    return lu_result
-
-
-class LUClusterVerifyDisks(NoHooksLU):
-  """Verifies the cluster disks status.
-
-  """
-  REQ_BGL = False
-
-  def ExpandNames(self):
-    self.share_locks = ShareAll()
-    self.needed_locks = {
-      locking.LEVEL_NODEGROUP: locking.ALL_SET,
-      }
-
-  def Exec(self, feedback_fn):
-    group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
-
-    # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
-    return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
-                           for group in group_names])
diff --git a/lib/cmdlib/cluster/__init__.py b/lib/cmdlib/cluster/__init__.py
new file mode 100644
index 0000000..51474d6
--- /dev/null
+++ b/lib/cmdlib/cluster/__init__.py
@@ -0,0 +1,1802 @@
+#
+#
+
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Logical units dealing with the cluster."""
+
+import copy
+import itertools
+import logging
+import operator
+import os
+import re
+import time
+
+from ganeti import compat
+from ganeti import constants
+from ganeti import errors
+from ganeti import hypervisor
+from ganeti import locking
+from ganeti import masterd
+from ganeti import netutils
+from ganeti import objects
+from ganeti import opcodes
+from ganeti import pathutils
+from ganeti import query
+import ganeti.rpc.node as rpc
+from ganeti import runtime
+from ganeti import ssh
+from ganeti import uidpool
+from ganeti import utils
+from ganeti import vcluster
+
+from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \
+  ResultWithJobs
+from ganeti.cmdlib.common import ShareAll, RunPostHook, \
+  ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \
+  GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
+  GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
+  CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
+  ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
+  CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
+  CheckDiskAccessModeConsistency, GetClientCertDigest, \
+  AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \
+  CheckImageValidity, CheckDiskAccessModeConsistency, EnsureKvmdOnNodes
+
+import ganeti.masterd.instance
+
+
+class LUClusterRenewCrypto(NoHooksLU):
+  """Renew the cluster's crypto tokens.
+
+  """
+
+  _MAX_NUM_RETRIES = 3
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {
+      locking.LEVEL_NODE: locking.ALL_SET,
+    }
+    self.share_locks = ShareAll()
+    self.share_locks[locking.LEVEL_NODE] = 0
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the cluster is empty.
+
+    Any errors are signaled by raising errors.OpPrereqError.
+
+    """
+    self._ssh_renewal_suppressed = \
+      not self.cfg.GetClusterInfo().modify_ssh_setup and self.op.ssh_keys
+
+  def _RenewNodeSslCertificates(self, feedback_fn):
+    """Renews the nodes' SSL certificates.
+
+    Note that most of this operation is done in gnt_cluster.py, this LU only
+    takes care of the renewal of the client SSL certificates.
+
+    """
+    master_uuid = self.cfg.GetMasterNode()
+    cluster = self.cfg.GetClusterInfo()
+
+    logging.debug("Renewing the master's SSL node certificate."
+                  " Master's UUID: %s.", master_uuid)
+
+    # mapping node UUIDs to client certificate digests
+    digest_map = {}
+    master_digest = utils.GetCertificateDigest(
+        cert_filename=pathutils.NODED_CLIENT_CERT_FILE)
+    digest_map[master_uuid] = master_digest
+    logging.debug("Adding the master's SSL node certificate digest to the"
+                  " configuration. Master's UUID: %s, Digest: %s",
+                  master_uuid, master_digest)
+
+    node_errors = {}
+    nodes = self.cfg.GetAllNodesInfo()
+    logging.debug("Renewing non-master nodes' node certificates.")
+    for (node_uuid, node_info) in nodes.items():
+      if node_info.offline:
+        logging.info("* Skipping offline node %s", node_info.name)
+        continue
+      if node_uuid != master_uuid:
+        logging.debug("Adding certificate digest of node '%s'.", node_uuid)
+        last_exception = None
+        for i in range(self._MAX_NUM_RETRIES):
+          try:
+            if node_info.master_candidate:
+              node_digest = GetClientCertDigest(self, node_uuid)
+              digest_map[node_uuid] = node_digest
+              logging.debug("Added the node's certificate to candidate"
+                            " certificate list. Current list: %s.",
+                            str(cluster.candidate_certs))
+            break
+          except errors.OpExecError as e:
+            last_exception = e
+            logging.error("Could not fetch a non-master node's SSL node"
+                          " certificate at attempt no. %s. The node's UUID"
+                          " is %s, and the error was: %s.",
+                          str(i), node_uuid, e)
+        else:
+          if last_exception:
+            node_errors[node_uuid] = last_exception
+
+    if node_errors:
+      msg = ("Some nodes' SSL client certificates could not be fetched."
+             " Please make sure those nodes are reachable and rerun"
+             " the operation. The affected nodes and their errors are:\n")
+      for uuid, e in node_errors.items():
+        msg += "Node %s: %s\n" % (uuid, e)
+      feedback_fn(msg)
+
+    self.cfg.SetCandidateCerts(digest_map)
+
+  def _RenewSshKeys(self):
+    """Renew all nodes' SSH keys.
+
+    """
+    master_uuid = self.cfg.GetMasterNode()
+
+    nodes = self.cfg.GetAllNodesInfo()
+    nodes_uuid_names = [(node_uuid, node_info.name) for (node_uuid, node_info)
+                        in nodes.items() if not node_info.offline]
+    node_names = [name for (_, name) in nodes_uuid_names]
+    node_uuids = [uuid for (uuid, _) in nodes_uuid_names]
+    potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
+    master_candidate_uuids = self.cfg.GetMasterCandidateUuids()
+    result = self.rpc.call_node_ssh_keys_renew(
+      [master_uuid],
+      node_uuids, node_names,
+      master_candidate_uuids,
+      potential_master_candidates)
+    result[master_uuid].Raise("Could not renew the SSH keys of all nodes")
+
+  def Exec(self, feedback_fn):
+    if self.op.node_certificates:
+      feedback_fn("Renewing Node SSL certificates")
+      self._RenewNodeSslCertificates(feedback_fn)
+    if self.op.ssh_keys and not self._ssh_renewal_suppressed:
+      feedback_fn("Renewing SSH keys")
+      self._RenewSshKeys()
+    elif self._ssh_renewal_suppressed:
+      feedback_fn("Cannot renew SSH keys if the cluster is configured to not"
+                  " modify the SSH setup.")
+
+
+class LUClusterActivateMasterIp(NoHooksLU):
+  """Activate the master IP on the master node.
+
+  """
+  def Exec(self, feedback_fn):
+    """Activate the master IP.
+
+    """
+    master_params = self.cfg.GetMasterNetworkParameters()
+    ems = self.cfg.GetUseExternalMipScript()
+    result = self.rpc.call_node_activate_master_ip(master_params.uuid,
+                                                   master_params, ems)
+    result.Raise("Could not activate the master IP")
+
+
+class LUClusterDeactivateMasterIp(NoHooksLU):
+  """Deactivate the master IP on the master node.
+
+  """
+  def Exec(self, feedback_fn):
+    """Deactivate the master IP.
+
+    """
+    master_params = self.cfg.GetMasterNetworkParameters()
+    ems = self.cfg.GetUseExternalMipScript()
+    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+    result.Raise("Could not deactivate the master IP")
+
+
+class LUClusterConfigQuery(NoHooksLU):
+  """Return configuration values.
+
+  """
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    self.cq = ClusterQuery(None, self.op.output_fields, False)
+
+  def ExpandNames(self):
+    self.cq.ExpandNames(self)
+
+  def DeclareLocks(self, level):
+    self.cq.DeclareLocks(self, level)
+
+  def Exec(self, feedback_fn):
+    result = self.cq.OldStyleQuery(self)
+
+    assert len(result) == 1
+
+    return result[0]
+
+
+class LUClusterDestroy(LogicalUnit):
+  """Logical unit for destroying the cluster.
+
+  """
+  HPATH = "cluster-destroy"
+  HTYPE = constants.HTYPE_CLUSTER
+
+  # Read by the job queue to detect when the cluster is gone and job files will
+  # never be available.
+  # FIXME: This variable should be removed together with the Python job queue.
+  clusterHasBeenDestroyed = False
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    return {
+      "OP_TARGET": self.cfg.GetClusterName(),
+      }
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    return ([], [])
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the cluster is empty.
+
+    Any errors are signaled by raising errors.OpPrereqError.
+
+    """
+    master = self.cfg.GetMasterNode()
+
+    nodelist = self.cfg.GetNodeList()
+    if len(nodelist) != 1 or nodelist[0] != master:
+      raise errors.OpPrereqError("There are still %d node(s) in"
+                                 " this cluster." % (len(nodelist) - 1),
+                                 errors.ECODE_INVAL)
+    instancelist = self.cfg.GetInstanceList()
+    if instancelist:
+      raise errors.OpPrereqError("There are still %d instance(s) in"
+                                 " this cluster." % len(instancelist),
+                                 errors.ECODE_INVAL)
+
+  def Exec(self, feedback_fn):
+    """Destroys the cluster.
+
+    """
+    master_params = self.cfg.GetMasterNetworkParameters()
+
+    # Run post hooks on master node before it's removed
+    RunPostHook(self, self.cfg.GetNodeName(master_params.uuid))
+
+    ems = self.cfg.GetUseExternalMipScript()
+    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+    result.Warn("Error disabling the master IP address", self.LogWarning)
+
+    self.wconfd.Client().PrepareClusterDestruction(self.wconfdcontext)
+
+    # signal to the job queue that the cluster is gone
+    LUClusterDestroy.clusterHasBeenDestroyed = True
+
+    return master_params.uuid
+
+
+class LUClusterPostInit(LogicalUnit):
+  """Logical unit for running hooks after cluster initialization.
+
+  """
+  HPATH = "cluster-init"
+  HTYPE = constants.HTYPE_CLUSTER
+
+  def CheckArguments(self):
+    self.master_uuid = self.cfg.GetMasterNode()
+    self.master_ndparams = self.cfg.GetNdParams(self.cfg.GetMasterNodeInfo())
+
+    # TODO: When Issue 584 is solved, and None is properly parsed when used
+    # as a default value, ndparams.get(.., None) can be changed to
+    # ndparams[..] to access the values directly
+
+    # OpenvSwitch: Warn user if link is missing
+    if (self.master_ndparams[constants.ND_OVS] and not
+        self.master_ndparams.get(constants.ND_OVS_LINK, None)):
+      self.LogInfo("No physical interface for OpenvSwitch was given."
+                   " OpenvSwitch will not have an outside connection. This"
+                   " might not be what you want.")
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    return {
+      "OP_TARGET": self.cfg.GetClusterName(),
+      }
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    return ([], [self.cfg.GetMasterNode()])
+
+  def Exec(self, feedback_fn):
+    """Create and configure Open vSwitch
+
+    """
+    if self.master_ndparams[constants.ND_OVS]:
+      result = self.rpc.call_node_configure_ovs(
+                 self.master_uuid,
+                 self.master_ndparams[constants.ND_OVS_NAME],
+                 self.master_ndparams.get(constants.ND_OVS_LINK, None))
+      result.Raise("Could not successully configure Open vSwitch")
+
+    return True
+
+
+class ClusterQuery(QueryBase):
+  FIELDS = query.CLUSTER_FIELDS
+
+  #: Do not sort (there is only one item)
+  SORT_FIELD = None
+
+  def ExpandNames(self, lu):
+    lu.needed_locks = {}
+
+    # The following variables interact with _QueryBase._GetNames
+    self.wanted = locking.ALL_SET
+    self.do_locking = self.use_locking
+
+    if self.do_locking:
+      raise errors.OpPrereqError("Can not use locking for cluster queries",
+                                 errors.ECODE_INVAL)
+
+  def DeclareLocks(self, lu, level):
+    pass
+
+  def _GetQueryData(self, lu):
+    """Computes the list of nodes and their attributes.
+
+    """
+    if query.CQ_CONFIG in self.requested_data:
+      cluster = lu.cfg.GetClusterInfo()
+      nodes = lu.cfg.GetAllNodesInfo()
+    else:
+      cluster = NotImplemented
+      nodes = NotImplemented
+
+    if query.CQ_QUEUE_DRAINED in self.requested_data:
+      drain_flag = os.path.exists(pathutils.JOB_QUEUE_DRAIN_FILE)
+    else:
+      drain_flag = NotImplemented
+
+    if query.CQ_WATCHER_PAUSE in self.requested_data:
+      master_node_uuid = lu.cfg.GetMasterNode()
+
+      result = lu.rpc.call_get_watcher_pause(master_node_uuid)
+      result.Raise("Can't retrieve watcher pause from master node '%s'" %
+                   lu.cfg.GetMasterNodeName())
+
+      watcher_pause = result.payload
+    else:
+      watcher_pause = NotImplemented
+
+    return query.ClusterQueryData(cluster, nodes, drain_flag, watcher_pause)
+
+
+class LUClusterQuery(NoHooksLU):
+  """Query cluster configuration.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {}
+
+  def Exec(self, feedback_fn):
+    """Return cluster config.
+
+    """
+    cluster = self.cfg.GetClusterInfo()
+    os_hvp = {}
+
+    # Filter just for enabled hypervisors
+    for os_name, hv_dict in cluster.os_hvp.items():
+      os_hvp[os_name] = {}
+      for hv_name, hv_params in hv_dict.items():
+        if hv_name in cluster.enabled_hypervisors:
+          os_hvp[os_name][hv_name] = hv_params
+
+    # Convert ip_family to ip_version
+    primary_ip_version = constants.IP4_VERSION
+    if cluster.primary_ip_family == netutils.IP6Address.family:
+      primary_ip_version = constants.IP6_VERSION
+
+    result = {
+      "software_version": constants.RELEASE_VERSION,
+      "protocol_version": constants.PROTOCOL_VERSION,
+      "config_version": constants.CONFIG_VERSION,
+      "os_api_version": max(constants.OS_API_VERSIONS),
+      "export_version": constants.EXPORT_VERSION,
+      "vcs_version": constants.VCS_VERSION,
+      "architecture": runtime.GetArchInfo(),
+      "name": cluster.cluster_name,
+      "master": self.cfg.GetMasterNodeName(),
+      "default_hypervisor": cluster.primary_hypervisor,
+      "enabled_hypervisors": cluster.enabled_hypervisors,
+      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
+                        for hypervisor_name in cluster.enabled_hypervisors]),
+      "os_hvp": os_hvp,
+      "beparams": cluster.beparams,
+      "osparams": cluster.osparams,
+      "ipolicy": cluster.ipolicy,
+      "nicparams": cluster.nicparams,
+      "ndparams": cluster.ndparams,
+      "diskparams": cluster.diskparams,
+      "candidate_pool_size": cluster.candidate_pool_size,
+      "max_running_jobs": cluster.max_running_jobs,
+      "max_tracked_jobs": cluster.max_tracked_jobs,
+      "mac_prefix": cluster.mac_prefix,
+      "master_netdev": cluster.master_netdev,
+      "master_netmask": cluster.master_netmask,
+      "use_external_mip_script": cluster.use_external_mip_script,
+      "volume_group_name": cluster.volume_group_name,
+      "drbd_usermode_helper": cluster.drbd_usermode_helper,
+      "file_storage_dir": cluster.file_storage_dir,
+      "shared_file_storage_dir": cluster.shared_file_storage_dir,
+      "maintain_node_health": cluster.maintain_node_health,
+      "ctime": cluster.ctime,
+      "mtime": cluster.mtime,
+      "uuid": cluster.uuid,
+      "tags": list(cluster.GetTags()),
+      "uid_pool": cluster.uid_pool,
+      "default_iallocator": cluster.default_iallocator,
+      "default_iallocator_params": cluster.default_iallocator_params,
+      "reserved_lvs": cluster.reserved_lvs,
+      "primary_ip_version": primary_ip_version,
+      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
+      "hidden_os": cluster.hidden_os,
+      "blacklisted_os": cluster.blacklisted_os,
+      "enabled_disk_templates": cluster.enabled_disk_templates,
+      "install_image": cluster.install_image,
+      "instance_communication_network": cluster.instance_communication_network,
+      "compression_tools": cluster.compression_tools,
+      "enabled_user_shutdown": cluster.enabled_user_shutdown,
+      }
+
+    return result
+
+
+class LUClusterRedistConf(NoHooksLU):
+  """Force the redistribution of cluster configuration.
+
+  This is a very simple LU.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {
+      locking.LEVEL_NODE: locking.ALL_SET,
+    }
+    self.share_locks = ShareAll()
+
+  def Exec(self, feedback_fn):
+    """Redistribute the configuration.
+
+    """
+    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
+    RedistributeAncillaryFiles(self)
+
+
+class LUClusterRename(LogicalUnit):
+  """Rename the cluster.
+
+  """
+  HPATH = "cluster-rename"
+  HTYPE = constants.HTYPE_CLUSTER
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    return {
+      "OP_TARGET": self.cfg.GetClusterName(),
+      "NEW_NAME": self.op.name,
+      }
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
+
+  def CheckPrereq(self):
+    """Verify that the passed name is a valid one.
+
+    """
+    hostname = netutils.GetHostname(name=self.op.name,
+                                    family=self.cfg.GetPrimaryIPFamily())
+
+    new_name = hostname.name
+    self.ip = new_ip = hostname.ip
+    old_name = self.cfg.GetClusterName()
+    old_ip = self.cfg.GetMasterIP()
+    if new_name == old_name and new_ip == old_ip:
+      raise errors.OpPrereqError("Neither the name nor the IP address of the"
+                                 " cluster has changed",
+                                 errors.ECODE_INVAL)
+    if new_ip != old_ip:
+      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
+        raise errors.OpPrereqError("The given cluster IP address (%s) is"
+                                   " reachable on the network" %
+                                   new_ip, errors.ECODE_NOTUNIQUE)
+
+    self.op.name = new_name
+
+  def Exec(self, feedback_fn):
+    """Rename the cluster.
+
+    """
+    clustername = self.op.name
+    new_ip = self.ip
+
+    # shutdown the master IP
+    master_params = self.cfg.GetMasterNetworkParameters()
+    ems = self.cfg.GetUseExternalMipScript()
+    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+    result.Raise("Could not disable the master role")
+
+    try:
+      cluster = self.cfg.GetClusterInfo()
+      cluster.cluster_name = clustername
+      cluster.master_ip = new_ip
+      self.cfg.Update(cluster, feedback_fn)
+
+      # update the known hosts file
+      ssh.WriteKnownHostsFile(self.cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
+      node_list = self.cfg.GetOnlineNodeList()
+      try:
+        node_list.remove(master_params.uuid)
+      except ValueError:
+        pass
+      UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
+    finally:
+      master_params.ip = new_ip
+      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+      result.Warn("Could not re-enable the master role on the master,"
+                  " please restart manually", self.LogWarning)
+
+    return clustername
+
+
+class LUClusterRepairDiskSizes(NoHooksLU):
+  """Verifies the cluster disks sizes.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    if self.op.instances:
+      (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
+      # Not getting the node allocation lock as only a specific set of
+      # instances (and their nodes) is going to be acquired
+      self.needed_locks = {
+        locking.LEVEL_NODE_RES: [],
+        locking.LEVEL_INSTANCE: self.wanted_names,
+        }
+      self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+    else:
+      self.wanted_names = None
+      self.needed_locks = {
+        locking.LEVEL_NODE_RES: locking.ALL_SET,
+        locking.LEVEL_INSTANCE: locking.ALL_SET,
+        }
+
+    self.share_locks = {
+      locking.LEVEL_NODE_RES: 1,
+      locking.LEVEL_INSTANCE: 0,
+      }
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
+      self._LockInstancesNodes(primary_only=True, level=level)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This only checks the optional instance list against the existing names.
+
+    """
+    if self.wanted_names is None:
+      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
+
+    self.wanted_instances = \
+        map(compat.snd, self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
+
+  def _EnsureChildSizes(self, disk):
+    """Ensure children of the disk have the needed disk size.
+
+    This is valid mainly for DRBD8 and fixes an issue where the
+    children have smaller disk size.
+
+    @param disk: an L{ganeti.objects.Disk} object
+
+    """
+    if disk.dev_type == constants.DT_DRBD8:
+      assert disk.children, "Empty children for DRBD8?"
+      fchild = disk.children[0]
+      mismatch = fchild.size < disk.size
+      if mismatch:
+        self.LogInfo("Child disk has size %d, parent %d, fixing",
+                     fchild.size, disk.size)
+        fchild.size = disk.size
+
+      # and we recurse on this child only, not on the metadev
+      return self._EnsureChildSizes(fchild) or mismatch
+    else:
+      return False
+
+  def Exec(self, feedback_fn):
+    """Verify the size of cluster disks.
+
+    """
+    # TODO: check child disks too
+    # TODO: check differences in size between primary/secondary nodes
+    per_node_disks = {}
+    for instance in self.wanted_instances:
+      pnode = instance.primary_node
+      if pnode not in per_node_disks:
+        per_node_disks[pnode] = []
+      for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
+        per_node_disks[pnode].append((instance, idx, disk))
+
+    assert not (frozenset(per_node_disks.keys()) -
+                frozenset(self.owned_locks(locking.LEVEL_NODE_RES))), \
+      "Not owning correct locks"
+    assert not self.owned_locks(locking.LEVEL_NODE)
+
+    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
+                                               per_node_disks.keys())
+
+    changed = []
+    for node_uuid, dskl in per_node_disks.items():
+      if not dskl:
+        # no disks on the node
+        continue
+
+      newl = [([v[2].Copy()], v[0]) for v in dskl]
+      node_name = self.cfg.GetNodeName(node_uuid)
+      result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
+      if result.fail_msg:
+        self.LogWarning("Failure in blockdev_getdimensions call to node"
+                        " %s, ignoring", node_name)
+        continue
+      if len(result.payload) != len(dskl):
+        logging.warning("Invalid result from node %s: len(dksl)=%d,"
+                        " result.payload=%s", node_name, len(dskl),
+                        result.payload)
+        self.LogWarning("Invalid result from node %s, ignoring node results",
+                        node_name)
+        continue
+      for ((instance, idx, disk), dimensions) in zip(dskl, result.payload):
+        if dimensions is None:
+          self.LogWarning("Disk %d of instance %s did not return size"
+                          " information, ignoring", idx, instance.name)
+          continue
+        if not isinstance(dimensions, (tuple, list)):
+          self.LogWarning("Disk %d of instance %s did not return valid"
+                          " dimension information, ignoring", idx,
+                          instance.name)
+          continue
+        (size, spindles) = dimensions
+        if not isinstance(size, (int, long)):
+          self.LogWarning("Disk %d of instance %s did not return valid"
+                          " size information, ignoring", idx, instance.name)
+          continue
+        size = size >> 20
+        if size != disk.size:
+          self.LogInfo("Disk %d of instance %s has mismatched size,"
+                       " correcting: recorded %d, actual %d", idx,
+                       instance.name, disk.size, size)
+          disk.size = size
+          self.cfg.Update(disk, feedback_fn)
+          changed.append((instance.name, idx, "size", size))
+        if es_flags[node_uuid]:
+          if spindles is None:
+            self.LogWarning("Disk %d of instance %s did not return valid"
+                            " spindles information, ignoring", idx,
+                            instance.name)
+          elif disk.spindles is None or disk.spindles != spindles:
+            self.LogInfo("Disk %d of instance %s has mismatched spindles,"
+                         " correcting: recorded %s, actual %s",
+                         idx, instance.name, disk.spindles, spindles)
+            disk.spindles = spindles
+            self.cfg.Update(disk, feedback_fn)
+            changed.append((instance.name, idx, "spindles", disk.spindles))
+        if self._EnsureChildSizes(disk):
+          self.cfg.Update(disk, feedback_fn)
+          changed.append((instance.name, idx, "size", disk.size))
+    return changed
+
+
+def _ValidateNetmask(cfg, netmask):
+  """Checks if a netmask is valid.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: cluster configuration
+  @type netmask: int
+  @param netmask: netmask to be verified
+  @raise errors.OpPrereqError: if the validation fails
+
+  """
+  ip_family = cfg.GetPrimaryIPFamily()
+  try:
+    ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
+  except errors.ProgrammerError:
+    raise errors.OpPrereqError("Invalid primary ip family: %s." %
+                               ip_family, errors.ECODE_INVAL)
+  if not ipcls.ValidateNetmask(netmask):
+    raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
+                               (netmask), errors.ECODE_INVAL)
+
+
+def CheckFileBasedStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates,
+    file_disk_template):
+  """Checks whether the given file-based storage directory is acceptable.
+
+  Note: This function is public, because it is also used in bootstrap.py.
+
+  @type logging_warn_fn: function
+  @param logging_warn_fn: function which accepts a string and logs it
+  @type file_storage_dir: string
+  @param file_storage_dir: the directory to be used for file-based instances
+  @type enabled_disk_templates: list of string
+  @param enabled_disk_templates: the list of enabled disk templates
+  @type file_disk_template: string
+  @param file_disk_template: the file-based disk template for which the
+      path should be checked
+
+  """
+  assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
+            constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
+         ))
+
+  file_storage_enabled = file_disk_template in enabled_disk_templates
+  if file_storage_dir is not None:
+    if file_storage_dir == "":
+      if file_storage_enabled:
+        raise errors.OpPrereqError(
+            "Unsetting the '%s' storage directory while having '%s' storage"
+            " enabled is not permitted." %
+            (file_disk_template, file_disk_template),
+            errors.ECODE_INVAL)
+    else:
+      if not file_storage_enabled:
+        logging_warn_fn(
+            "Specified a %s storage directory, although %s storage is not"
+            " enabled." % (file_disk_template, file_disk_template))
+  else:
+    raise errors.ProgrammerError("Received %s storage dir with value"
+                                 " 'None'." % file_disk_template)
+
+
+def CheckFileStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given file storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_FILE)
+
+
+def CheckSharedFileStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given shared file storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_SHARED_FILE)
+
+
+def CheckGlusterStoragePathVsEnabledDiskTemplates(
+    logging_warn_fn, file_storage_dir, enabled_disk_templates):
+  """Checks whether the given gluster storage directory is acceptable.
+
+  @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
+
+  """
+  CheckFileBasedStoragePathVsEnabledDiskTemplates(
+      logging_warn_fn, file_storage_dir, enabled_disk_templates,
+      constants.DT_GLUSTER)
+
+
+def CheckCompressionTools(tools):
+  """Check whether the provided compression tools look like executables.
+
+  @type tools: list of string
+  @param tools: The tools provided as opcode input
+
+  """
+  regex = re.compile('^[-_a-zA-Z0-9]+$')
+  illegal_tools = [t for t in tools if not regex.match(t)]
+
+  if illegal_tools:
+    raise errors.OpPrereqError(
+      "The tools '%s' contain illegal characters: only alphanumeric values,"
+      " dashes, and underscores are allowed" % ", ".join(illegal_tools),
+      errors.ECODE_INVAL
+    )
+
+  if constants.IEC_GZIP not in tools:
+    raise errors.OpPrereqError("For compatibility reasons, the %s utility must"
+                               " be present among the compression tools" %
+                               constants.IEC_GZIP, errors.ECODE_INVAL)
+
+  if constants.IEC_NONE in tools:
+    raise errors.OpPrereqError("%s is a reserved value used for no compression,"
+                               " and cannot be used as the name of a tool" %
+                               constants.IEC_NONE, errors.ECODE_INVAL)
+
+
+class LUClusterSetParams(LogicalUnit):
+  """Change the parameters of the cluster.
+
+  """
+  HPATH = "cluster-modify"
+  HTYPE = constants.HTYPE_CLUSTER
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    """Check parameters
+
+    """
+    if self.op.uid_pool:
+      uidpool.CheckUidPool(self.op.uid_pool)
+
+    if self.op.add_uids:
+      uidpool.CheckUidPool(self.op.add_uids)
+
+    if self.op.remove_uids:
+      uidpool.CheckUidPool(self.op.remove_uids)
+
+    if self.op.mac_prefix:
+      self.op.mac_prefix = \
+          utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
+
+    if self.op.master_netmask is not None:
+      _ValidateNetmask(self.cfg, self.op.master_netmask)
+
+    if self.op.diskparams:
+      for dt_params in self.op.diskparams.values():
+        utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+      try:
+        utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS)
+        CheckDiskAccessModeValidity(self.op.diskparams)
+      except errors.OpPrereqError, err:
+        raise errors.OpPrereqError("While verify diskparams options: %s" % err,
+                                   errors.ECODE_INVAL)
+
+    if self.op.install_image is not None:
+      CheckImageValidity(self.op.install_image,
+                         "Install image must be an absolute path or a URL")
+
+  def ExpandNames(self):
+    # FIXME: in the future maybe other cluster params won't require checking on
+    # all nodes to be modified.
+    # FIXME: This opcode changes cluster-wide settings. Is acquiring all
+    # resource locks the right thing, shouldn't it be the BGL instead?
+    self.needed_locks = {
+      locking.LEVEL_NODE: locking.ALL_SET,
+      locking.LEVEL_INSTANCE: locking.ALL_SET,
+      locking.LEVEL_NODEGROUP: locking.ALL_SET,
+    }
+    self.share_locks = ShareAll()
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    """
+    return {
+      "OP_TARGET": self.cfg.GetClusterName(),
+      "NEW_VG_NAME": self.op.vg_name,
+      }
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    mn = self.cfg.GetMasterNode()
+    return ([mn], [mn])
+
+  def _CheckVgName(self, node_uuids, enabled_disk_templates,
+                   new_enabled_disk_templates):
+    """Check the consistency of the vg name on all nodes and in case it gets
+       unset whether there are instances still using it.
+
+    """
+    lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates)
+    lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates,
+                                            new_enabled_disk_templates)
+    current_vg_name = self.cfg.GetVGName()
+
+    if self.op.vg_name == '':
+      if lvm_is_enabled:
+        raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
+                                   " disk templates are or get enabled.",
+                                   errors.ECODE_INVAL)
+
+    if self.op.vg_name is None:
+      if current_vg_name is None and lvm_is_enabled:
+        raise errors.OpPrereqError("Please specify a volume group when"
+                                   " enabling lvm-based disk-templates.",
+                                   errors.ECODE_INVAL)
+
+    if self.op.vg_name is not None and not self.op.vg_name:
+      if self.cfg.DisksOfType(constants.DT_PLAIN):
+        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
+                                   " instances exist", errors.ECODE_INVAL)
+
+    if (self.op.vg_name is not None and lvm_is_enabled) or \
+        (self.cfg.GetVGName() is not None and lvm_gets_enabled):
+      self._CheckVgNameOnNodes(node_uuids)
+
+  def _CheckVgNameOnNodes(self, node_uuids):
+    """Check the status of the volume group on each node.
+
+    """
+    vglist = self.rpc.call_vg_list(node_uuids)
+    for node_uuid in node_uuids:
+      msg = vglist[node_uuid].fail_msg
+      if msg:
+        # ignoring down node
+        self.LogWarning("Error while gathering data on node %s"
+                        " (ignoring node): %s",
+                        self.cfg.GetNodeName(node_uuid), msg)
+        continue
+      vgstatus = utils.CheckVolumeGroupSize(vglist[node_uuid].payload,
+                                            self.op.vg_name,
+                                            constants.MIN_VG_SIZE)
+      if vgstatus:
+        raise errors.OpPrereqError("Error on node '%s': %s" %
+                                   (self.cfg.GetNodeName(node_uuid), vgstatus),
+                                   errors.ECODE_ENVIRON)
+
+  @staticmethod
+  def _GetDiskTemplateSetsInner(op_enabled_disk_templates,
+                                old_enabled_disk_templates):
+    """Computes three sets of disk templates.
+
+    @see: C{_GetDiskTemplateSets} for more details.
+
+    """
+    enabled_disk_templates = None
+    new_enabled_disk_templates = []
+    disabled_disk_templates = []
+    if op_enabled_disk_templates:
+      enabled_disk_templates = op_enabled_disk_templates
+      new_enabled_disk_templates = \
+        list(set(enabled_disk_templates)
+             - set(old_enabled_disk_templates))
+      disabled_disk_templates = \
+        list(set(old_enabled_disk_templates)
+             - set(enabled_disk_templates))
+    else:
+      enabled_disk_templates = old_enabled_disk_templates
+    return (enabled_disk_templates, new_enabled_disk_templates,
+            disabled_disk_templates)
+
+  def _GetDiskTemplateSets(self, cluster):
+    """Computes three sets of disk templates.
+
+    The three sets are:
+      - disk templates that will be enabled after this operation (no matter if
+        they were enabled before or not)
+      - disk templates that get enabled by this operation (thus haven't been
+        enabled before.)
+      - disk templates that get disabled by this operation
+
+    """
+    return self._GetDiskTemplateSetsInner(self.op.enabled_disk_templates,
+                                          cluster.enabled_disk_templates)
+
+  def _CheckIpolicy(self, cluster, enabled_disk_templates):
+    """Checks the ipolicy.
+
+    @type cluster: C{objects.Cluster}
+    @param cluster: the cluster's configuration
+    @type enabled_disk_templates: list of string
+    @param enabled_disk_templates: list of (possibly newly) enabled disk
+      templates
+
+    """
+    # FIXME: write unit tests for this
+    if self.op.ipolicy:
+      self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
+                                           group_policy=False)
+
+      CheckIpolicyVsDiskTemplates(self.new_ipolicy,
+                                  enabled_disk_templates)
+
+      all_instances = self.cfg.GetAllInstancesInfo().values()
+      violations = set()
+      for group in self.cfg.GetAllNodeGroupsInfo().values():
+        instances = frozenset(
+          [inst for inst in all_instances
+           if compat.any(nuuid in group.members
+           for nuuid in self.cfg.GetInstanceNodes(inst.uuid))])
+        new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
+        ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
+        new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
+                                           self.cfg)
+        if new:
+          violations.update(new)
+
+      if violations:
+        self.LogWarning("After the ipolicy change the following instances"
+                        " violate them: %s",
+                        utils.CommaJoin(utils.NiceSort(violations)))
+    else:
+      CheckIpolicyVsDiskTemplates(cluster.ipolicy,
+                                  enabled_disk_templates)
+
+  def _CheckDrbdHelperOnNodes(self, drbd_helper, node_uuids):
+    """Checks whether the set DRBD helper actually exists on the nodes.
+
+    @type drbd_helper: string
+    @param drbd_helper: path of the drbd usermode helper binary
+    @type node_uuids: list of strings
+    @param node_uuids: list of node UUIDs to check for the helper
+
+    """
+    # checks given drbd helper on all nodes
+    helpers = self.rpc.call_drbd_helper(node_uuids)
+    for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids):
+      if ninfo.offline:
+        self.LogInfo("Not checking drbd helper on offline node %s",
+                     ninfo.name)
+        continue
+      msg = helpers[ninfo.uuid].fail_msg
+      if msg:
+        raise errors.OpPrereqError("Error checking drbd helper on node"
+                                   " '%s': %s" % (ninfo.name, msg),
+                                   errors.ECODE_ENVIRON)
+      node_helper = helpers[ninfo.uuid].payload
+      if node_helper != drbd_helper:
+        raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
+                                   (ninfo.name, node_helper),
+                                   errors.ECODE_ENVIRON)
+
+  def _CheckDrbdHelper(self, node_uuids, drbd_enabled, drbd_gets_enabled):
+    """Check the DRBD usermode helper.
+
+    @type node_uuids: list of strings
+    @param node_uuids: a list of nodes' UUIDs
+    @type drbd_enabled: boolean
+    @param drbd_enabled: whether DRBD will be enabled after this operation
+      (no matter if it was disabled before or not)
+    @type drbd_gets_enabled: boolen
+    @param drbd_gets_enabled: true if DRBD was disabled before this
+      operation, but will be enabled afterwards
+
+    """
+    if self.op.drbd_helper == '':
+      if drbd_enabled:
+        raise errors.OpPrereqError("Cannot disable drbd helper while"
+                                   " DRBD is enabled.", errors.ECODE_STATE)
+      if self.cfg.DisksOfType(constants.DT_DRBD8):
+        raise errors.OpPrereqError("Cannot disable drbd helper while"
+                                   " drbd-based instances exist",
+                                   errors.ECODE_INVAL)
+
+    else:
+      if self.op.drbd_helper is not None and drbd_enabled:
+        self._CheckDrbdHelperOnNodes(self.op.drbd_helper, node_uuids)
+      else:
+        if drbd_gets_enabled:
+          current_drbd_helper = self.cfg.GetClusterInfo().drbd_usermode_helper
+          if current_drbd_helper is not None:
+            self._CheckDrbdHelperOnNodes(current_drbd_helper, node_uuids)
+          else:
+            raise errors.OpPrereqError("Cannot enable DRBD without a"
+                                       " DRBD usermode helper set.",
+                                       errors.ECODE_STATE)
+
+  def _CheckInstancesOfDisabledDiskTemplates(
+      self, disabled_disk_templates):
+    """Check whether we try to disable a disk template that is in use.
+
+    @type disabled_disk_templates: list of string
+    @param disabled_disk_templates: list of disk templates that are going to
+      be disabled by this operation
+
+    """
+    for disk_template in disabled_disk_templates:
+      disks_with_type = self.cfg.DisksOfType(disk_template)
+      if disks_with_type:
+        disk_desc = []
+        for disk in disks_with_type:
+          instance_uuid = self.cfg.GetInstanceForDisk(disk.uuid)
+          instance = self.cfg.GetInstanceInfo(instance_uuid)
+          if instance:
+            instance_desc = "on " + instance.name
+          else:
+            instance_desc = "detached"
+          disk_desc.append("%s (%s)" % (disk, instance_desc))
+        raise errors.OpPrereqError(
+            "Cannot disable disk template '%s', because there is at least one"
+            " disk using it:\n * %s" % (disk_template, "\n * ".join(disk_desc)),
+            errors.ECODE_STATE)
+    if constants.DT_DISKLESS in disabled_disk_templates:
+      instances = self.cfg.GetAllInstancesInfo()
+      for inst in instances.values():
+        if not inst.disks:
+          raise errors.OpPrereqError(
+              "Cannot disable disk template 'diskless', because there is at"
+              " least one instance using it:\n * %s" % inst.name,
+              errors.ECODE_STATE)
+
+  @staticmethod
+  def _CheckInstanceCommunicationNetwork(network, warning_fn):
+    """Check whether an existing network is configured for instance
+    communication.
+
+    Checks whether an existing network is configured with the
+    parameters that are advisable for instance communication, and
+    otherwise issue security warnings.
+
+    @type network: L{ganeti.objects.Network}
+    @param network: L{ganeti.objects.Network} object whose
+                    configuration is being checked
+    @type warning_fn: function
+    @param warning_fn: function used to print warnings
+    @rtype: None
+    @return: None
+
+    """
+    def _MaybeWarn(err, val, default):
+      if val != default:
+        warning_fn("Supplied instance communication network '%s' %s '%s',"
+                   " this might pose a security risk (default is '%s').",
+                   network.name, err, val, default)
+
+    if network.network is None:
+      raise errors.OpPrereqError("Supplied instance communication network '%s'"
+                                 " must have an IPv4 network address.",
+                                 network.name)
+
+    _MaybeWarn("has an IPv4 gateway", network.gateway, None)
+    _MaybeWarn("has a non-standard IPv4 network address", network.network,
+               constants.INSTANCE_COMMUNICATION_NETWORK4)
+    _MaybeWarn("has an IPv6 gateway", network.gateway6, None)
+    _MaybeWarn("has a non-standard IPv6 network address", network.network6,
+               constants.INSTANCE_COMMUNICATION_NETWORK6)
+    _MaybeWarn("has a non-standard MAC prefix", network.mac_prefix,
+               constants.INSTANCE_COMMUNICATION_MAC_PREFIX)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks whether the given params don't conflict and
+    if the given volume group is valid.
+
+    """
+    node_uuids = self.owned_locks(locking.LEVEL_NODE)
+    self.cluster = cluster = self.cfg.GetClusterInfo()
+
+    vm_capable_node_uuids = [node.uuid
+                             for node in self.cfg.GetAllNodesInfo().values()
+                             if node.uuid in node_uuids and node.vm_capable]
+
+    (enabled_disk_templates, new_enabled_disk_templates,
+      disabled_disk_templates) = self._GetDiskTemplateSets(cluster)
+    self._CheckInstancesOfDisabledDiskTemplates(disabled_disk_templates)
+
+    self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
+                      new_enabled_disk_templates)
+
+    if self.op.file_storage_dir is not None:
+      CheckFileStoragePathVsEnabledDiskTemplates(
+          self.LogWarning, self.op.file_storage_dir, enabled_disk_templates)
+
+    if self.op.shared_file_storage_dir is not None:
+      CheckSharedFileStoragePathVsEnabledDiskTemplates(
+          self.LogWarning, self.op.shared_file_storage_dir,
+          enabled_disk_templates)
+
+    drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
+    drbd_gets_enabled = constants.DT_DRBD8 in new_enabled_disk_templates
+    self._CheckDrbdHelper(vm_capable_node_uuids,
+                          drbd_enabled, drbd_gets_enabled)
+
+    # validate params changes
+    if self.op.beparams:
+      objects.UpgradeBeParams(self.op.beparams)
+      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
+      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
+
+    if self.op.ndparams:
+      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
+
+      # TODO: we need a more general way to handle resetting
+      # cluster-level parameters to default values
+      if self.new_ndparams["oob_program"] == "":
+        self.new_ndparams["oob_program"] = \
+            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
+
+    if self.op.hv_state:
+      new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
+                                           self.cluster.hv_state_static)
+      self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
+                               for hv, values in new_hv_state.items())
+
+    if self.op.disk_state:
+      new_disk_state = MergeAndVerifyDiskState(self.op.disk_state,
+                                               self.cluster.disk_state_static)
+      self.new_disk_state = \
+        dict((storage, dict((name, cluster.SimpleFillDiskState(values))
+                            for name, values in svalues.items()))
+             for storage, svalues in new_disk_state.items())
+
+    self._CheckIpolicy(cluster, enabled_disk_templates)
+
+    if self.op.nicparams:
+      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
+      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
+      objects.NIC.CheckParameterSyntax(self.new_nicparams)
+      nic_errors = []
+
+      # check all instances for consistency
+      for instance in self.cfg.GetAllInstancesInfo().values():
+        for nic_idx, nic in enumerate(instance.nics):
+          params_copy = copy.deepcopy(nic.nicparams)
+          params_filled = objects.FillDict(self.new_nicparams, params_copy)
+
+          # check parameter syntax
+          try:
+            objects.NIC.CheckParameterSyntax(params_filled)
+          except errors.ConfigurationError, err:
+            nic_errors.append("Instance %s, nic/%d: %s" %
+                              (instance.name, nic_idx, err))
+
+          # if we're moving instances to routed, check that they have an ip
+          target_mode = params_filled[constants.NIC_MODE]
+          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
+            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
+                              " address" % (instance.name, nic_idx))
+      if nic_errors:
+        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
+                                   "\n".join(nic_errors), errors.ECODE_INVAL)
+
+    # hypervisor list/parameters
+    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
+    if self.op.hvparams:
+      for hv_name, hv_dict in self.op.hvparams.items():
+        if hv_name not in self.new_hvparams:
+          self.new_hvparams[hv_name] = hv_dict
+        else:
+          self.new_hvparams[hv_name].update(hv_dict)
+
+    # disk template parameters
+    self.new_diskparams = objects.FillDict(cluster.diskparams, {})
+    if self.op.diskparams:
+      for dt_name, dt_params in self.op.diskparams.items():
+        if dt_name not in self.new_diskparams:
+          self.new_diskparams[dt_name] = dt_params
+        else:
+          self.new_diskparams[dt_name].update(dt_params)
+      CheckDiskAccessModeConsistency(self.op.diskparams, self.cfg)
+
+    # os hypervisor parameters
+    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
+    if self.op.os_hvp:
+      for os_name, hvs in self.op.os_hvp.items():
+        if os_name not in self.new_os_hvp:
+          self.new_os_hvp[os_name] = hvs
+        else:
+          for hv_name, hv_dict in hvs.items():
+            if hv_dict is None:
+              # Delete if it exists
+              self.new_os_hvp[os_name].pop(hv_name, None)
+            elif hv_name not in self.new_os_hvp[os_name]:
+              self.new_os_hvp[os_name][hv_name] = hv_dict
+            else:
+              self.new_os_hvp[os_name][hv_name].update(hv_dict)
+
+    # os parameters
+    self._BuildOSParams(cluster)
+
+    # changes to the hypervisor list
+    if self.op.enabled_hypervisors is not None:
+      for hv in self.op.enabled_hypervisors:
+        # if the hypervisor doesn't already exist in the cluster
+        # hvparams, we initialize it to empty, and then (in both
+        # cases) we make sure to fill the defaults, as we might not
+        # have a complete defaults list if the hypervisor wasn't
+        # enabled before
+        if hv not in new_hvp:
+          new_hvp[hv] = {}
+        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
+        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
+
+    if self.op.hvparams or self.op.enabled_hypervisors is not None:
+      # either the enabled list has changed, or the parameters have, validate
+      for hv_name, hv_params in self.new_hvparams.items():
+        if ((self.op.hvparams and hv_name in self.op.hvparams) or
+            (self.op.enabled_hypervisors and
+             hv_name in self.op.enabled_hypervisors)):
+          # either this is a new hypervisor, or its parameters have changed
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
+          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+          hv_class.CheckParameterSyntax(hv_params)
+          CheckHVParams(self, node_uuids, hv_name, hv_params)
+
+    if self.op.os_hvp:
+      # no need to check any newly-enabled hypervisors, since the
+      # defaults have already been checked in the above code-block
+      for os_name, os_hvp in self.new_os_hvp.items():
+        for hv_name, hv_params in os_hvp.items():
+          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+          # we need to fill in the new os_hvp on top of the actual hv_p
+          cluster_defaults = self.new_hvparams.get(hv_name, {})
+          new_osp = objects.FillDict(cluster_defaults, hv_params)
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
+          hv_class.CheckParameterSyntax(new_osp)
+          CheckHVParams(self, node_uuids, hv_name, new_osp)
+
+    if self.op.default_iallocator:
+      alloc_script = utils.FindFile(self.op.default_iallocator,
+                                    constants.IALLOCATOR_SEARCH_PATH,
+                                    os.path.isfile)
+      if alloc_script is None:
+        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
+                                   " specified" % self.op.default_iallocator,
+                                   errors.ECODE_INVAL)
+
+    if self.op.instance_communication_network:
+      network_name = self.op.instance_communication_network
+
+      try:
+        network_uuid = self.cfg.LookupNetwork(network_name)
+      except errors.OpPrereqError:
+        network_uuid = None
+
+      if network_uuid is not None:
+        network = self.cfg.GetNetwork(network_uuid)
+        self._CheckInstanceCommunicationNetwork(network, self.LogWarning)
+
+    if self.op.compression_tools:
+      CheckCompressionTools(self.op.compression_tools)
+
+  def _BuildOSParams(self, cluster):
+    "Calculate the new OS parameters for this operation."
+
+    def _GetNewParams(source, new_params):
+      "Wrapper around GetUpdatedParams."
+      if new_params is None:
+        return source
+      result = objects.FillDict(source, {}) # deep copy of source
+      for os_name in new_params:
+        result[os_name] = GetUpdatedParams(result.get(os_name, {}),
+                                           new_params[os_name],
+                                           use_none=True)
+        if not result[os_name]:
+          del result[os_name] # we removed all parameters
+      return result
+
+    self.new_osp = _GetNewParams(cluster.osparams,
+                                 self.op.osparams)
+    self.new_osp_private = _GetNewParams(cluster.osparams_private_cluster,
+                                         self.op.osparams_private_cluster)
+
+    # Remove os validity check
+    changed_oses = (set(self.new_osp.keys()) | set(self.new_osp_private.keys()))
+    for os_name in changed_oses:
+      os_params = cluster.SimpleFillOS(
+        os_name,
+        self.new_osp.get(os_name, {}),
+        os_params_private=self.new_osp_private.get(os_name, {})
+      )
+      # check the parameter validity (remote check)
+      CheckOSParams(self, False, [self.cfg.GetMasterNode()],
+                    os_name, os_params, False)
+
+  def _SetVgName(self, feedback_fn):
+    """Determines and sets the new volume group name.
+
+    """
+    if self.op.vg_name is not None:
+      new_volume = self.op.vg_name
+      if not new_volume:
+        new_volume = None
+      if new_volume != self.cfg.GetVGName():
+        self.cfg.SetVGName(new_volume)
+      else:
+        feedback_fn("Cluster LVM configuration already in desired"
+                    " state, not changing")
+
+  def _SetFileStorageDir(self, feedback_fn):
+    """Set the file storage directory.
+
+    """
+    if self.op.file_storage_dir is not None:
+      if self.cluster.file_storage_dir == self.op.file_storage_dir:
+        feedback_fn("Global file storage dir already set to value '%s'"
+                    % self.cluster.file_storage_dir)
+      else:
+        self.cluster.file_storage_dir = self.op.file_storage_dir
+
+  def _SetSharedFileStorageDir(self, feedback_fn):
+    """Set the shared file storage directory.
+
+    """
+    if self.op.shared_file_storage_dir is not None:
+      if self.cluster.shared_file_storage_dir == \
+          self.op.shared_file_storage_dir:
+        feedback_fn("Global shared file storage dir already set to value '%s'"
+                    % self.cluster.shared_file_storage_dir)
+      else:
+        self.cluster.shared_file_storage_dir = self.op.shared_file_storage_dir
+
+  def _SetDrbdHelper(self, feedback_fn):
+    """Set the DRBD usermode helper.
+
+    """
+    if self.op.drbd_helper is not None:
+      if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
+        feedback_fn("Note that you specified a drbd user helper, but did not"
+                    " enable the drbd disk template.")
+      new_helper = self.op.drbd_helper
+      if not new_helper:
+        new_helper = None
+      if new_helper != self.cfg.GetDRBDHelper():
+        self.cfg.SetDRBDHelper(new_helper)
+      else:
+        feedback_fn("Cluster DRBD helper already in desired state,"
+                    " not changing")
+
+  @staticmethod
+  def _EnsureInstanceCommunicationNetwork(cfg, network_name):
+    """Ensure that the instance communication network exists and is
+    connected to all groups.
+
+    The instance communication network given by L{network_name} it is
+    created, if necessary, via the opcode 'OpNetworkAdd'.  Also, the
+    instance communication network is connected to all existing node
+    groups, if necessary, via the opcode 'OpNetworkConnect'.
+
+    @type cfg: L{config.ConfigWriter}
+    @param cfg: cluster configuration
+
+    @type network_name: string
+    @param network_name: instance communication network name
+
+    @rtype: L{ganeti.cmdlib.ResultWithJobs} or L{None}
+    @return: L{ganeti.cmdlib.ResultWithJobs} if the instance
+             communication needs to be created or it needs to be
+             connected to a group, otherwise L{None}
+
+    """
+    jobs = []
+
+    try:
+      network_uuid = cfg.LookupNetwork(network_name)
+      network_exists = True
+    except errors.OpPrereqError:
+      network_exists = False
+
+    if not network_exists:
+      jobs.append(AddInstanceCommunicationNetworkOp(network_name))
+
+    for group_uuid in cfg.GetNodeGroupList():
+      group = cfg.GetNodeGroup(group_uuid)
+
+      if network_exists:
+        network_connected = network_uuid in group.networks
+      else:
+        # The network was created asynchronously by the previous
+        # opcode and, therefore, we don't have access to its
+        # network_uuid.  As a result, we assume that the network is
+        # not connected to any group yet.
+        network_connected = False
+
+      if not network_connected:
+        op = ConnectInstanceCommunicationNetworkOp(group_uuid, network_name)
+        jobs.append(op)
+
+    if jobs:
+      return ResultWithJobs([jobs])
+    else:
+      return None
+
+  @staticmethod
+  def _ModifyInstanceCommunicationNetwork(cfg, network_name, feedback_fn):
+    """Update the instance communication network stored in the cluster
+    configuration.
+
+    Compares the user-supplied instance communication network against
+    the one stored in the Ganeti cluster configuration.  If there is a
+    change, the instance communication network may be possibly created
+    and connected to all groups (see
+    L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}).
+
+    @type cfg: L{config.ConfigWriter}
+    @param cfg: cluster configuration
+
+    @type network_name: string
+    @param network_name: instance communication network name
+
+    @type feedback_fn: function
+    @param feedback_fn: see L{ganeti.cmdlist.base.LogicalUnit}
+
+    @rtype: L{LUClusterSetParams._EnsureInstanceCommunicationNetwork} or L{None}
+    @return: see L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}
+
+    """
+    config_network_name = cfg.GetInstanceCommunicationNetwork()
+
+    if network_name == config_network_name:
+      feedback_fn("Instance communication network already is '%s', nothing to"
+                  " do." % network_name)
+    else:
+      try:
+        cfg.LookupNetwork(config_network_name)
+        feedback_fn("Previous instance communication network '%s'"
+                    " should be removed manually." % config_network_name)
+      except errors.OpPrereqError:
+        pass
+
+      if network_name:
+        feedback_fn("Changing instance communication network to '%s', only new"
+                    " instances will be affected."
+                    % network_name)
+      else:
+        feedback_fn("Disabling instance communication network, only new"
+                    " instances will be affected.")
+
+      cfg.SetInstanceCommunicationNetwork(network_name)
+
+      if network_name:
+        return LUClusterSetParams._EnsureInstanceCommunicationNetwork(
+          cfg,
+          network_name)
+      else:
+        return None
+
+  def Exec(self, feedback_fn):
+    """Change the parameters of the cluster.
+
+    """
+    # re-read the fresh configuration
+    self.cluster = self.cfg.GetClusterInfo()
+    if self.op.enabled_disk_templates:
+      self.cluster.enabled_disk_templates = \
+        list(self.op.enabled_disk_templates)
+    # save the changes
+    self.cfg.Update(self.cluster, feedback_fn)
+
+    self._SetVgName(feedback_fn)
+
+    self.cluster = self.cfg.GetClusterInfo()
+    self._SetFileStorageDir(feedback_fn)
+    self._SetSharedFileStorageDir(feedback_fn)
+    self.cfg.Update(self.cluster, feedback_fn)
+    self._SetDrbdHelper(feedback_fn)
+
+    # re-read the fresh configuration again
+    self.cluster = self.cfg.GetClusterInfo()
+
+    ensure_kvmd = False
+
+    active = constants.DATA_COLLECTOR_STATE_ACTIVE
+    if self.op.enabled_data_collectors is not None:
+      for name, val in self.op.enabled_data_collectors.items():
+        self.cluster.data_collectors[name][active] = val
+
+    if self.op.data_collector_interval:
+      internal = constants.DATA_COLLECTOR_PARAMETER_INTERVAL
+      for name, val in self.op.data_collector_interval.items():
+        self.cluster.data_collectors[name][internal] = int(val)
+
+    if self.op.hvparams:
+      self.cluster.hvparams = self.new_hvparams
+    if self.op.os_hvp:
+      self.cluster.os_hvp = self.new_os_hvp
+    if self.op.enabled_hypervisors is not None:
+      self.cluster.hvparams = self.new_hvparams
+      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
+      ensure_kvmd = True
+    if self.op.beparams:
+      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
+    if self.op.nicparams:
+      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
+    if self.op.ipolicy:
+      self.cluster.ipolicy = self.new_ipolicy
+    if self.op.osparams:
+      self.cluster.osparams = self.new_osp
+    if self.op.osparams_private_cluster:
+      self.cluster.osparams_private_cluster = self.new_osp_private
+    if self.op.ndparams:
+      self.cluster.ndparams = self.new_ndparams
+    if self.op.diskparams:
+      self.cluster.diskparams = self.new_diskparams
+    if self.op.hv_state:
+      self.cluster.hv_state_static = self.new_hv_state
+    if self.op.disk_state:
+      self.cluster.disk_state_static = self.new_disk_state
+
+    if self.op.candidate_pool_size is not None:
+      self.cluster.candidate_pool_size = self.op.candidate_pool_size
+      # we need to update the pool size here, otherwise the save will fail
+      AdjustCandidatePool(self, [])
+
+    if self.op.max_running_jobs is not None:
+      self.cluster.max_running_jobs = self.op.max_running_jobs
+
+    if self.op.max_tracked_jobs is not None:
+      self.cluster.max_tracked_jobs = self.op.max_tracked_jobs
+
+    if self.op.maintain_node_health is not None:
+      self.cluster.maintain_node_health = self.op.maintain_node_health
+
+    if self.op.modify_etc_hosts is not None:
+      self.cluster.modify_etc_hosts = self.op.modify_etc_hosts
+
+    if self.op.prealloc_wipe_disks is not None:
+      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
+
+    if self.op.add_uids is not None:
+      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
+
+    if self.op.remove_uids is not None:
+      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
+
+    if self.op.uid_pool is not None:
+      self.cluster.uid_pool = self.op.uid_pool
+
+    if self.op.default_iallocator is not None:
+      self.cluster.default_iallocator = self.op.default_iallocator
+
+    if self.op.default_iallocator_params is not None:
+      self.cluster.default_iallocator_params = self.op.default_iallocator_params
+
+    if self.op.reserved_lvs is not None:
+      self.cluster.reserved_lvs = self.op.reserved_lvs
+
+    if self.op.use_external_mip_script is not None:
+      self.cluster.use_external_mip_script = self.op.use_external_mip_script
+
+    if self.op.enabled_user_shutdown is not None and \
+          self.cluster.enabled_user_shutdown != self.op.enabled_user_shutdown:
+      self.cluster.enabled_user_shutdown = self.op.enabled_user_shutdown
+      ensure_kvmd = True
+
+    def helper_os(aname, mods, desc):
+      desc += " OS list"
+      lst = getattr(self.cluster, aname)
+      for key, val in mods:
+        if key == constants.DDM_ADD:
+          if val in lst:
+            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
+          else:
+            lst.append(val)
+        elif key == constants.DDM_REMOVE:
+          if val in lst:
+            lst.remove(val)
+          else:
+            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
+        else:
+          raise errors.ProgrammerError("Invalid modification '%s'" % key)
+
+    if self.op.hidden_os:
+      helper_os("hidden_os", self.op.hidden_os, "hidden")
+
+    if self.op.blacklisted_os:
+      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
+
+    if self.op.mac_prefix:
+      self.cluster.mac_prefix = self.op.mac_prefix
+
+    if self.op.master_netdev:
+      master_params = self.cfg.GetMasterNetworkParameters()
+      ems = self.cfg.GetUseExternalMipScript()
+      feedback_fn("Shutting down master ip on the current netdev (%s)" %
+                  self.cluster.master_netdev)
+      result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
+                                                       master_params, ems)
+      if not self.op.force:
+        result.Raise("Could not disable the master ip")
+      else:
+        if result.fail_msg:
+          msg = ("Could not disable the master ip (continuing anyway): %s" %
+                 result.fail_msg)
+          feedback_fn(msg)
+      feedback_fn("Changing master_netdev from %s to %s" %
+                  (master_params.netdev, self.op.master_netdev))
+      self.cluster.master_netdev = self.op.master_netdev
+
+    if self.op.master_netmask:
+      master_params = self.cfg.GetMasterNetworkParameters()
+      feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
+      result = self.rpc.call_node_change_master_netmask(
+                 master_params.uuid, master_params.netmask,
+                 self.op.master_netmask, master_params.ip,
+                 master_params.netdev)
+      result.Warn("Could not change the master IP netmask", feedback_fn)
+      self.cluster.master_netmask = self.op.master_netmask
+
+    if self.op.install_image:
+      self.cluster.install_image = self.op.install_image
+
+    if self.op.zeroing_image is not None:
+      CheckImageValidity(self.op.zeroing_image,
+                         "Zeroing image must be an absolute path or a URL")
+      self.cluster.zeroing_image = self.op.zeroing_image
+
+    self.cfg.Update(self.cluster, feedback_fn)
+
+    if self.op.master_netdev:
+      master_params = self.cfg.GetMasterNetworkParameters()
+      feedback_fn("Starting the master ip on the new master netdev (%s)" %
+                  self.op.master_netdev)
+      ems = self.cfg.GetUseExternalMipScript()
+      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
+                                                     master_params, ems)
+      result.Warn("Could not re-enable the master ip on the master,"
+                  " please restart manually", self.LogWarning)
+
+    # Even though 'self.op.enabled_user_shutdown' is being tested
+    # above, the RPCs can only be done after 'self.cfg.Update' because
+    # this will update the cluster object and sync 'Ssconf', and kvmd
+    # uses 'Ssconf'.
+    if ensure_kvmd:
+      EnsureKvmdOnNodes(self, feedback_fn)
+
+    if self.op.compression_tools is not None:
+      self.cfg.SetCompressionTools(self.op.compression_tools)
+
+    network_name = self.op.instance_communication_network
+    if network_name is not None:
+      return self._ModifyInstanceCommunicationNetwork(self.cfg,
+                                                      network_name, feedback_fn)
+    else:
+      return None
diff --git a/lib/cmdlib/cluster/verify.py b/lib/cmdlib/cluster/verify.py
new file mode 100644
index 0000000..dfa1294
--- /dev/null
+++ b/lib/cmdlib/cluster/verify.py
@@ -0,0 +1,2201 @@
+#
+#
+
+# Copyright (C) 2014 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Logical units for cluster verification."""
+
+import itertools
+import logging
+import operator
+import re
+import time
+import ganeti.masterd.instance
+import ganeti.rpc.node as rpc
+
+from ganeti import compat
+from ganeti import constants
+from ganeti import errors
+from ganeti import locking
+from ganeti import pathutils
+from ganeti import utils
+from ganeti import vcluster
+from ganeti import hypervisor
+from ganeti import opcodes
+
+from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, ResultWithJobs
+from ganeti.cmdlib.common import ShareAll, ComputeAncillaryFiles, \
+    CheckNodePVs, ComputeIPolicyInstanceViolation, AnnotateDiskParams, \
+    SupportsOob
+
+
+def _GetAllHypervisorParameters(cluster, instances):
+  """Compute the set of all hypervisor parameters.
+
+  @type cluster: L{objects.Cluster}
+  @param cluster: the cluster object
+  @param instances: list of L{objects.Instance}
+  @param instances: additional instances from which to obtain parameters
+  @rtype: list of (origin, hypervisor, parameters)
+  @return: a list with all parameters found, indicating the hypervisor they
+       apply to, and the origin (can be "cluster", "os X", or "instance Y")
+
+  """
+  hvp_data = []
+
+  for hv_name in cluster.enabled_hypervisors:
+    hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
+
+  for os_name, os_hvp in cluster.os_hvp.items():
+    for hv_name, hv_params in os_hvp.items():
+      if hv_params:
+        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
+        hvp_data.append(("os %s" % os_name, hv_name, full_params))
+
+  # TODO: collapse identical parameter values in a single one
+  for instance in instances:
+    if instance.hvparams:
+      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
+                       cluster.FillHV(instance)))
+
+  return hvp_data
+
+
+class _VerifyErrors(object):
+  """Mix-in for cluster/group verify LUs.
+
+  It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
+  self.op and self._feedback_fn to be available.)
+
+  """
+
+  ETYPE_FIELD = "code"
+  ETYPE_ERROR = constants.CV_ERROR
+  ETYPE_WARNING = constants.CV_WARNING
+
+  def _Error(self, ecode, item, msg, *args, **kwargs):
+    """Format an error message.
+
+    Based on the opcode's error_codes parameter, either format a
+    parseable error code, or a simpler error string.
+
+    This must be called only from Exec and functions called from Exec.
+
+    """
+    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
+    itype, etxt, _ = ecode
+    # If the error code is in the list of ignored errors, demote the error to a
+    # warning
+    if etxt in self.op.ignore_errors:     # pylint: disable=E1101
+      ltype = self.ETYPE_WARNING
+    # first complete the msg
+    if args:
+      msg = msg % args
+    # then format the whole message
+    if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
+      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
+    else:
+      if item:
+        item = " " + item
+      else:
+        item = ""
+      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
+    # and finally report it via the feedback_fn
+    self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable=E1101
+    # do not mark the operation as failed for WARN cases only
+    if ltype == self.ETYPE_ERROR:
+      self.bad = True
+
+  def _ErrorIf(self, cond, *args, **kwargs):
+    """Log an error message if the passed condition is True.
+
+    """
+    if (bool(cond)
+        or self.op.debug_simulate_errors): # pylint: disable=E1101
+      self._Error(*args, **kwargs)
+
+
+class LUClusterVerify(NoHooksLU):
+  """Submits all jobs necessary to verify the cluster.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.needed_locks = {}
+
+  def Exec(self, feedback_fn):
+    jobs = []
+
+    if self.op.group_name:
+      groups = [self.op.group_name]
+      depends_fn = lambda: None
+    else:
+      groups = self.cfg.GetNodeGroupList()
+
+      # Verify global configuration
+      jobs.append([
+        opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors),
+        ])
+
+      # Always depend on global verification
+      depends_fn = lambda: [(-len(jobs), [])]
+
+    jobs.extend(
+      [opcodes.OpClusterVerifyGroup(group_name=group,
+                                    ignore_errors=self.op.ignore_errors,
+                                    depends=depends_fn(),
+                                    verify_clutter=self.op.verify_clutter)]
+      for group in groups)
+
+    # Fix up all parameters
+    for op in itertools.chain(*jobs): # pylint: disable=W0142
+      op.debug_simulate_errors = self.op.debug_simulate_errors
+      op.verbose = self.op.verbose
+      op.error_codes = self.op.error_codes
+      try:
+        op.skip_checks = self.op.skip_checks
+      except AttributeError:
+        assert not isinstance(op, opcodes.OpClusterVerifyGroup)
+
+    return ResultWithJobs(jobs)
+
+
+class LUClusterVerifyDisks(NoHooksLU):
+  """Verifies the cluster disks status.
+
+  """
+  REQ_BGL = False
+
+  def ExpandNames(self):
+    self.share_locks = ShareAll()
+    self.needed_locks = {
+      locking.LEVEL_NODEGROUP: locking.ALL_SET,
+      }
+
+  def Exec(self, feedback_fn):
+    group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
+
+    # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
+    return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
+                           for group in group_names])
+
+
+class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
+  """Verifies the cluster config.
+
+  """
+  REQ_BGL = False
+
+  def _VerifyHVP(self, hvp_data):
+    """Verifies locally the syntax of the hypervisor parameters.
+
+    """
+    for item, hv_name, hv_params in hvp_data:
+      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
+             (item, hv_name))
+      try:
+        hv_class = hypervisor.GetHypervisorClass(hv_name)
+        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+        hv_class.CheckParameterSyntax(hv_params)
+      except errors.GenericError, err:
+        self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
+
+  def ExpandNames(self):
+    self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
+    self.share_locks = ShareAll()
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    """
+    # Retrieve all information
+    self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
+    self.all_node_info = self.cfg.GetAllNodesInfo()
+    self.all_inst_info = self.cfg.GetAllInstancesInfo()
+
+  def Exec(self, feedback_fn):
+    """Verify integrity of cluster, performing various test on nodes.
+
+    """
+    self.bad = False
+    self._feedback_fn = feedback_fn
+
+    feedback_fn("* Verifying cluster config")
+
+    for msg in self.cfg.VerifyConfig():
+      self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg)
+
+    feedback_fn("* Verifying cluster certificate files")
+
+    for cert_filename in pathutils.ALL_CERT_FILES:
+      (errcode, msg) = utils.VerifyCertificate(cert_filename)
+      self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
+
+    self._ErrorIf(not utils.CanRead(constants.LUXID_USER,
+                                    pathutils.NODED_CERT_FILE),
+                  constants.CV_ECLUSTERCERT,
+                  None,
+                  pathutils.NODED_CERT_FILE + " must be accessible by the " +
+                    constants.LUXID_USER + " user")
+
+    feedback_fn("* Verifying hypervisor parameters")
+
+    self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
+                                                self.all_inst_info.values()))
+
+    feedback_fn("* Verifying all nodes belong to an existing group")
+
+    # We do this verification here because, should this bogus circumstance
+    # occur, it would never be caught by VerifyGroup, which only acts on
+    # nodes/instances reachable from existing node groups.
+
+    dangling_nodes = set(node for node in self.all_node_info.values()
+                         if node.group not in self.all_group_info)
+
+    dangling_instances = {}
+    no_node_instances = []
+
+    for inst in self.all_inst_info.values():
+      if inst.primary_node in [node.uuid for node in dangling_nodes]:
+        dangling_instances.setdefault(inst.primary_node, []).append(inst)
+      elif inst.primary_node not in self.all_node_info:
+        no_node_instances.append(inst)
+
+    pretty_dangling = [
+        "%s (%s)" %
+        (node.name,
+         utils.CommaJoin(inst.name for
+                         inst in dangling_instances.get(node.uuid, [])))
+        for node in dangling_nodes]
+
+    self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
+                  None,
+                  "the following nodes (and their instances) belong to a non"
+                  " existing group: %s", utils.CommaJoin(pretty_dangling))
+
+    self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
+                  None,
+                  "the following instances have a non-existing primary-node:"
+                  " %s", utils.CommaJoin(inst.name for
+                                         inst in no_node_instances))
+
+    return not self.bad
+
+
+class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
+  """Verifies the status of a node group.
+
+  """
+  HPATH = "cluster-verify"
+  HTYPE = constants.HTYPE_CLUSTER
+  REQ_BGL = False
+
+  _HOOKS_INDENT_RE = re.compile("^", re.M)
+
+  class NodeImage(object):
+    """A class representing the logical and physical status of a node.
+
+    @type uuid: string
+    @ivar uuid: the node UUID to which this object refers
+    @ivar volumes: a structure as returned from
+        L{ganeti.backend.GetVolumeList} (runtime)
+    @ivar instances: a list of running instances (runtime)
+    @ivar pinst: list of configured primary instances (config)
+    @ivar sinst: list of configured secondary instances (config)
+    @ivar sbp: dictionary of {primary-node: list of instances} for all
+        instances for which this node is secondary (config)
+    @ivar mfree: free memory, as reported by hypervisor (runtime)
+    @ivar dfree: free disk, as reported by the node (runtime)
+    @ivar offline: the offline status (config)
+    @type rpc_fail: boolean
+    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
+        not whether the individual keys were correct) (runtime)
+    @type lvm_fail: boolean
+    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
+    @type hyp_fail: boolean
+    @ivar hyp_fail: whether the RPC call didn't return the instance list
+    @type ghost: boolean
+    @ivar ghost: whether this is a known node or not (config)
+    @type os_fail: boolean
+    @ivar os_fail: whether the RPC call didn't return valid OS data
+    @type oslist: list
+    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
+    @type vm_capable: boolean
+    @ivar vm_capable: whether the node can host instances
+    @type pv_min: float
+    @ivar pv_min: size in MiB of the smallest PVs
+    @type pv_max: float
+    @ivar pv_max: size in MiB of the biggest PVs
+
+    """
+    def __init__(self, offline=False, uuid=None, vm_capable=True):
+      self.uuid = uuid
+      self.volumes = {}
+      self.instances = []
+      self.pinst = []
+      self.sinst = []
+      self.sbp = {}
+      self.mfree = 0
+      self.dfree = 0
+      self.offline = offline
+      self.vm_capable = vm_capable
+      self.rpc_fail = False
+      self.lvm_fail = False
+      self.hyp_fail = False
+      self.ghost = False
+      self.os_fail = False
+      self.oslist = {}
+      self.pv_min = None
+      self.pv_max = None
+
+  def ExpandNames(self):
+    # This raises errors.OpPrereqError on its own:
+    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
+
+    # Get instances in node group; this is unsafe and needs verification later
+    inst_uuids = \
+      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
+
+    self.needed_locks = {
+      locking.LEVEL_INSTANCE: self.cfg.GetInstanceNames(inst_uuids),
+      locking.LEVEL_NODEGROUP: [self.group_uuid],
+      locking.LEVEL_NODE: [],
+      }
+
+    self.share_locks = ShareAll()
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE:
+      # Get members of node group; this is unsafe and needs verification later
+      nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
+
+      # In Exec(), we warn about mirrored instances that have primary and
+      # secondary living in separate node groups. To fully verify that
+      # volumes for these instances are healthy, we will need to do an
+      # extra call to their secondaries. We ensure here those nodes will
+      # be locked.
+      for inst_name in self.owned_locks(locking.LEVEL_INSTANCE):
+        # Important: access only the instances whose lock is owned
+        instance = self.cfg.GetInstanceInfoByName(inst_name)
+        disks = self.cfg.GetInstanceDisks(instance.uuid)
+        if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
+          nodes.update(self.cfg.GetInstanceSecondaryNodes(instance.uuid))
+
+      self.needed_locks[locking.LEVEL_NODE] = nodes
+
+  def CheckPrereq(self):
+    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
+    self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
+
+    group_node_uuids = set(self.group_info.members)
+    group_inst_uuids = \
+      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
+
+    unlocked_node_uuids = \
+        group_node_uuids.difference(self.owned_locks(locking.LEVEL_NODE))
+
+    unlocked_inst_uuids = \
+        group_inst_uuids.difference(
+          [self.cfg.GetInstanceInfoByName(name).uuid
+           for name in self.owned_locks(locking.LEVEL_INSTANCE)])
+
+    if unlocked_node_uuids:
+      raise errors.OpPrereqError(
+        "Missing lock for nodes: %s" %
+        utils.CommaJoin(self.cfg.GetNodeNames(unlocked_node_uuids)),
+        errors.ECODE_STATE)
+
+    if unlocked_inst_uuids:
+      raise errors.OpPrereqError(
+        "Missing lock for instances: %s" %
+        utils.CommaJoin(self.cfg.GetInstanceNames(unlocked_inst_uuids)),
+        errors.ECODE_STATE)
+
+    self.all_node_info = self.cfg.GetAllNodesInfo()
+    self.all_inst_info = self.cfg.GetAllInstancesInfo()
+    self.all_disks_info = self.cfg.GetAllDisksInfo()
+
+    self.my_node_uuids = group_node_uuids
+    self.my_node_info = dict((node_uuid, self.all_node_info[node_uuid])
+                             for node_uuid in group_node_uuids)
+
+    self.my_inst_uuids = group_inst_uuids
+    self.my_inst_info = dict((inst_uuid, self.all_inst_info[inst_uuid])
+                             for inst_uuid in group_inst_uuids)
+
+    # We detect here the nodes that will need the extra RPC calls for verifying
+    # split LV volumes; they should be locked.
+    extra_lv_nodes = set()
+
+    for inst in self.my_inst_info.values():
+      disks = self.cfg.GetInstanceDisks(inst.uuid)
+      if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
+        inst_nodes = self.cfg.GetInstanceNodes(inst.uuid)
+        for nuuid in inst_nodes:
+          if self.all_node_info[nuuid].group != self.group_uuid:
+            extra_lv_nodes.add(nuuid)
+
+    unlocked_lv_nodes = \
+        extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
+
+    if unlocked_lv_nodes:
+      raise errors.OpPrereqError("Missing node locks for LV check: %s" %
+                                 utils.CommaJoin(unlocked_lv_nodes),
+                                 errors.ECODE_STATE)
+    self.extra_lv_nodes = list(extra_lv_nodes)
+
+  def _VerifyNode(self, ninfo, nresult):
+    """Perform some basic validation on data returned from a node.
+
+      - check the result data structure is well formed and has all the
+        mandatory fields
+      - check ganeti version
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the results from the node
+    @rtype: boolean
+    @return: whether overall this call was successful (and we can expect
+         reasonable values in the respose)
+
+    """
+    # main result, nresult should be a non-empty dict
+    test = not nresult or not isinstance(nresult, dict)
+    self._ErrorIf(test, constants.CV_ENODERPC, ninfo.name,
+                  "unable to verify node: no data returned")
+    if test:
+      return False
+
+    # compares ganeti version
+    local_version = constants.PROTOCOL_VERSION
+    remote_version = nresult.get("version", None)
+    test = not (remote_version and
+                isinstance(remote_version, (list, tuple)) and
+                len(remote_version) == 2)
+    self._ErrorIf(test, constants.CV_ENODERPC, ninfo.name,
+                  "connection to node returned invalid data")
+    if test:
+      return False
+
+    test = local_version != remote_version[0]
+    self._ErrorIf(test, constants.CV_ENODEVERSION, ninfo.name,
+                  "incompatible protocol versions: master %s,"
+                  " node %s", local_version, remote_version[0])
+    if test:
+      return False
+
+    # node seems compatible, we can actually try to look into its results
+
+    # full package version
+    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
+                  constants.CV_ENODEVERSION, ninfo.name,
+                  "software version mismatch: master %s, node %s",
+                  constants.RELEASE_VERSION, remote_version[1],
+                  code=self.ETYPE_WARNING)
+
+    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
+    if ninfo.vm_capable and isinstance(hyp_result, dict):
+      for hv_name, hv_result in hyp_result.iteritems():
+        test = hv_result is not None
+        self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
+                      "hypervisor %s verify failure: '%s'", hv_name, hv_result)
+
+    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
+    if ninfo.vm_capable and isinstance(hvp_result, list):
+      for item, hv_name, hv_result in hvp_result:
+        self._ErrorIf(True, constants.CV_ENODEHV, ninfo.name,
+                      "hypervisor %s parameter verify failure (source %s): %s",
+                      hv_name, item, hv_result)
+
+    test = nresult.get(constants.NV_NODESETUP,
+                       ["Missing NODESETUP results"])
+    self._ErrorIf(test, constants.CV_ENODESETUP, ninfo.name,
+                  "node setup error: %s", "; ".join(test))
+
+    return True
+
+  def _VerifyNodeTime(self, ninfo, nresult,
+                      nvinfo_starttime, nvinfo_endtime):
+    """Check the node time.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @param nvinfo_starttime: the start time of the RPC call
+    @param nvinfo_endtime: the end time of the RPC call
+
+    """
+    ntime = nresult.get(constants.NV_TIME, None)
+    try:
+      ntime_merged = utils.MergeTime(ntime)
+    except (ValueError, TypeError):
+      self._ErrorIf(True, constants.CV_ENODETIME, ninfo.name,
+                    "Node returned invalid time")
+      return
+
+    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
+      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
+    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
+      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
+    else:
+      ntime_diff = None
+
+    self._ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, ninfo.name,
+                  "Node time diverges by at least %s from master node time",
+                  ntime_diff)
+
+  def _UpdateVerifyNodeLVM(self, ninfo, nresult, vg_name, nimg):
+    """Check the node LVM results and update info for cross-node checks.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @param vg_name: the configured VG name
+    @type nimg: L{NodeImage}
+    @param nimg: node image
+
+    """
+    if vg_name is None:
+      return
+
+    # checks vg existence and size > 20G
+    vglist = nresult.get(constants.NV_VGLIST, None)
+    test = not vglist
+    self._ErrorIf(test, constants.CV_ENODELVM, ninfo.name,
+                  "unable to check volume groups")
+    if not test:
+      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
+                                            constants.MIN_VG_SIZE)
+      self._ErrorIf(vgstatus, constants.CV_ENODELVM, ninfo.name, vgstatus)
+
+    # Check PVs
+    (errmsgs, pvminmax) = CheckNodePVs(nresult, self._exclusive_storage)
+    for em in errmsgs:
+      self._Error(constants.CV_ENODELVM, ninfo.name, em)
+    if pvminmax is not None:
+      (nimg.pv_min, nimg.pv_max) = pvminmax
+
+  def _VerifyGroupDRBDVersion(self, node_verify_infos):
+    """Check cross-node DRBD version consistency.
+
+    @type node_verify_infos: dict
+    @param node_verify_infos: infos about nodes as returned from the
+      node_verify call.
+
+    """
+    node_versions = {}
+    for node_uuid, ndata in node_verify_infos.items():
+      nresult = ndata.payload
+      if nresult:
+        version = nresult.get(constants.NV_DRBDVERSION, None)
+        if version:
+          node_versions[node_uuid] = version
+
+    if len(set(node_versions.values())) > 1:
+      for node_uuid, version in sorted(node_versions.items()):
+        msg = "DRBD version mismatch: %s" % version
+        self._Error(constants.CV_ENODEDRBDHELPER, node_uuid, msg,
+                    code=self.ETYPE_WARNING)
+
+  def _VerifyGroupLVM(self, node_image, vg_name):
+    """Check cross-node consistency in LVM.
+
+    @type node_image: dict
+    @param node_image: info about nodes, mapping from node to names to
+      L{NodeImage} objects
+    @param vg_name: the configured VG name
+
+    """
+    if vg_name is None:
+      return
+
+    # Only exclusive storage needs this kind of checks
+    if not self._exclusive_storage:
+      return
+
+    # exclusive_storage wants all PVs to have the same size (approximately),
+    # if the smallest and the biggest ones are okay, everything is fine.
+    # pv_min is None iff pv_max is None
+    vals = filter((lambda ni: ni.pv_min is not None), node_image.values())
+    if not vals:
+      return
+    (pvmin, minnode_uuid) = min((ni.pv_min, ni.uuid) for ni in vals)
+    (pvmax, maxnode_uuid) = max((ni.pv_max, ni.uuid) for ni in vals)
+    bad = utils.LvmExclusiveTestBadPvSizes(pvmin, pvmax)
+    self._ErrorIf(bad, constants.CV_EGROUPDIFFERENTPVSIZE, self.group_info.name,
+                  "PV sizes differ too much in the group; smallest (%s MB) is"
+                  " on %s, biggest (%s MB) is on %s",
+                  pvmin, self.cfg.GetNodeName(minnode_uuid),
+                  pvmax, self.cfg.GetNodeName(maxnode_uuid))
+
+  def _VerifyNodeBridges(self, ninfo, nresult, bridges):
+    """Check the node bridges.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @param bridges: the expected list of bridges
+
+    """
+    if not bridges:
+      return
+
+    missing = nresult.get(constants.NV_BRIDGES, None)
+    test = not isinstance(missing, list)
+    self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
+                  "did not return valid bridge information")
+    if not test:
+      self._ErrorIf(bool(missing), constants.CV_ENODENET, ninfo.name,
+                    "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
+
+  def _VerifyNodeUserScripts(self, ninfo, nresult):
+    """Check the results of user scripts presence and executability on the node
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+
+    """
+    test = not constants.NV_USERSCRIPTS in nresult
+    self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, ninfo.name,
+                  "did not return user scripts information")
+
+    broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
+    if not test:
+      self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, ninfo.name,
+                    "user scripts not present or not executable: %s" %
+                    utils.CommaJoin(sorted(broken_scripts)))
+
+  def _VerifyNodeNetwork(self, ninfo, nresult):
+    """Check the node network connectivity results.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+
+    """
+    test = constants.NV_NODELIST not in nresult
+    self._ErrorIf(test, constants.CV_ENODESSH, ninfo.name,
+                  "node hasn't returned node ssh connectivity data")
+    if not test:
+      if nresult[constants.NV_NODELIST]:
+        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
+          self._ErrorIf(True, constants.CV_ENODESSH, ninfo.name,
+                        "ssh communication with node '%s': %s", a_node, a_msg)
+
+    test = constants.NV_NODENETTEST not in nresult
+    self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
+                  "node hasn't returned node tcp connectivity data")
+    if not test:
+      if nresult[constants.NV_NODENETTEST]:
+        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
+        for anode in nlist:
+          self._ErrorIf(True, constants.CV_ENODENET, ninfo.name,
+                        "tcp communication with node '%s': %s",
+                        anode, nresult[constants.NV_NODENETTEST][anode])
+
+    test = constants.NV_MASTERIP not in nresult
+    self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
+                  "node hasn't returned node master IP reachability data")
+    if not test:
+      if not nresult[constants.NV_MASTERIP]:
+        if ninfo.uuid == self.master_node:
+          msg = "the master node cannot reach the master IP (not configured?)"
+        else:
+          msg = "cannot reach the master IP"
+        self._ErrorIf(True, constants.CV_ENODENET, ninfo.name, msg)
+
+  def _VerifyInstance(self, instance, node_image, diskstatus):
+    """Verify an instance.
+
+    This function checks to see if the required block devices are
+    available on the instance's node, and that the nodes are in the correct
+    state.
+
+    """
+    pnode_uuid = instance.primary_node
+    pnode_img = node_image[pnode_uuid]
+    groupinfo = self.cfg.GetAllNodeGroupsInfo()
+
+    node_vol_should = {}
+    self.cfg.GetInstanceLVsByNode(instance.uuid, lvmap=node_vol_should)
+
+    cluster = self.cfg.GetClusterInfo()
+    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
+                                                            self.group_info)
+    err = ComputeIPolicyInstanceViolation(ipolicy, instance, self.cfg)
+    self._ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance.name,
+                  utils.CommaJoin(err), code=self.ETYPE_WARNING)
+
+    for node_uuid in node_vol_should:
+      n_img = node_image[node_uuid]
+      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
+        # ignore missing volumes on offline or broken nodes
+        continue
+      for volume in node_vol_should[node_uuid]:
+        test = volume not in n_img.volumes
+        self._ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance.name,
+                      "volume %s missing on node %s", volume,
+                      self.cfg.GetNodeName(node_uuid))
+
+    if instance.admin_state == constants.ADMINST_UP:
+      test = instance.uuid not in pnode_img.instances and not pnode_img.offline
+      self._ErrorIf(test, constants.CV_EINSTANCEDOWN, instance.name,
+                    "instance not running on its primary node %s",
+                     self.cfg.GetNodeName(pnode_uuid))
+      self._ErrorIf(pnode_img.offline, constants.CV_EINSTANCEBADNODE,
+                    instance.name, "instance is marked as running and lives on"
+                    " offline node %s", self.cfg.GetNodeName(pnode_uuid))
+
+    diskdata = [(nname, success, status, idx)
+                for (nname, disks) in diskstatus.items()
+                for idx, (success, status) in enumerate(disks)]
+
+    for nname, success, bdev_status, idx in diskdata:
+      # the 'ghost node' construction in Exec() ensures that we have a
+      # node here
+      snode = node_image[nname]
+      bad_snode = snode.ghost or snode.offline
+      self._ErrorIf(instance.disks_active and
+                    not success and not bad_snode,
+                    constants.CV_EINSTANCEFAULTYDISK, instance.name,
+                    "couldn't retrieve status for disk/%s on %s: %s",
+                    idx, self.cfg.GetNodeName(nname), bdev_status)
+
+      if instance.disks_active and success and bdev_status.is_degraded:
+        msg = "disk/%s on %s is degraded" % (idx, self.cfg.GetNodeName(nname))
+
+        code = self.ETYPE_ERROR
+        accepted_lds = [constants.LDS_OKAY, constants.LDS_SYNC]
+
+        if bdev_status.ldisk_status in accepted_lds:
+          code = self.ETYPE_WARNING
+
+        msg += "; local disk state is '%s'" % \
+                 constants.LDS_NAMES[bdev_status.ldisk_status]
+
+        self._Error(constants.CV_EINSTANCEFAULTYDISK, instance.name, msg,
+                    code=code)
+
+    self._ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
+                  constants.CV_ENODERPC, self.cfg.GetNodeName(pnode_uuid),
+                  "instance %s, connection to primary node failed",
+                  instance.name)
+
+    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
+    self._ErrorIf(len(secondary_nodes) > 1,
+                  constants.CV_EINSTANCELAYOUT, instance.name,
+                  "instance has multiple secondary nodes: %s",
+                  utils.CommaJoin(secondary_nodes),
+                  code=self.ETYPE_WARNING)
+
+    inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
+    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes)
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    if any(es_flags.values()):
+      if not utils.AllDiskOfType(disks, constants.DTS_EXCL_STORAGE):
+        # Disk template not compatible with exclusive_storage: no instance
+        # node should have the flag set
+        es_nodes = [n
+                    for (n, es) in es_flags.items()
+                    if es]
+        unsupported = [d.dev_type for d in disks
+                       if d.dev_type not in constants.DTS_EXCL_STORAGE]
+        self._Error(constants.CV_EINSTANCEUNSUITABLENODE, instance.name,
+                    "instance uses disk types %s, which are not supported on"
+                    " nodes that have exclusive storage set: %s",
+                    utils.CommaJoin(unsupported),
+                    utils.CommaJoin(self.cfg.GetNodeNames(es_nodes)))
+      for (idx, disk) in enumerate(disks):
+        self._ErrorIf(disk.spindles is None,
+                      constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance.name,
+                      "number of spindles not configured for disk %s while"
+                      " exclusive storage is enabled, try running"
+                      " gnt-cluster repair-disk-sizes", idx)
+
+    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
+      instance_nodes = utils.NiceSort(inst_nodes)
+      instance_groups = {}
+
+      for node_uuid in instance_nodes:
+        instance_groups.setdefault(self.all_node_info[node_uuid].group,
+                                   []).append(node_uuid)
+
+      pretty_list = [
+        "%s (group %s)" % (utils.CommaJoin(self.cfg.GetNodeNames(nodes)),
+                           groupinfo[group].name)
+        # Sort so that we always list the primary node first.
+        for group, nodes in sorted(instance_groups.items(),
+                                   key=lambda (_, nodes): pnode_uuid in nodes,
+                                   reverse=True)]
+
+      self._ErrorIf(len(instance_groups) > 1,
+                    constants.CV_EINSTANCESPLITGROUPS,
+                    instance.name, "instance has primary and secondary nodes in"
+                    " different groups: %s", utils.CommaJoin(pretty_list),
+                    code=self.ETYPE_WARNING)
+
+    inst_nodes_offline = []
+    for snode in secondary_nodes:
+      s_img = node_image[snode]
+      self._ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
+                    self.cfg.GetNodeName(snode),
+                    "instance %s, connection to secondary node failed",
+                    instance.name)
+
+      if s_img.offline:
+        inst_nodes_offline.append(snode)
+
+    # warn that the instance lives on offline nodes
+    self._ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE,
+                  instance.name, "instance has offline secondary node(s) %s",
+                  utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
+    # ... or ghost/non-vm_capable nodes
+    for node_uuid in inst_nodes:
+      self._ErrorIf(node_image[node_uuid].ghost, constants.CV_EINSTANCEBADNODE,
+                    instance.name, "instance lives on ghost node %s",
+                    self.cfg.GetNodeName(node_uuid))
+      self._ErrorIf(not node_image[node_uuid].vm_capable,
+                    constants.CV_EINSTANCEBADNODE, instance.name,
+                    "instance lives on non-vm_capable node %s",
+                    self.cfg.GetNodeName(node_uuid))
+
+  def _VerifyOrphanVolumes(self, vg_name, node_vol_should, node_image,
+                           reserved):
+    """Verify if there are any unknown volumes in the cluster.
+
+    The .os, .swap and backup volumes are ignored. All other volumes are
+    reported as unknown.
+
+    @type vg_name: string
+    @param vg_name: the name of the Ganeti-administered volume group
+    @type reserved: L{ganeti.utils.FieldSet}
+    @param reserved: a FieldSet of reserved volume names
+
+    """
+    for node_uuid, n_img in node_image.items():
+      if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
+          self.all_node_info[node_uuid].group != self.group_uuid):
+        # skip non-healthy nodes
+        continue
+      for volume in n_img.volumes:
+        # skip volumes not belonging to the ganeti-administered volume group
+        if volume.split('/')[0] != vg_name:
+          continue
+
+        test = ((node_uuid not in node_vol_should or
+                volume not in node_vol_should[node_uuid]) and
+                not reserved.Matches(volume))
+        self._ErrorIf(test, constants.CV_ENODEORPHANLV,
+                      self.cfg.GetNodeName(node_uuid),
+                      "volume %s is unknown", volume,
+                      code=_VerifyErrors.ETYPE_WARNING)
+
+  def _VerifyNPlusOneMemory(self, node_image, all_insts):
+    """Verify N+1 Memory Resilience.
+
+    Check that if one single node dies we can still start all the
+    instances it was primary for.
+
+    """
+    cluster_info = self.cfg.GetClusterInfo()
+    for node_uuid, n_img in node_image.items():
+      # This code checks that every node which is now listed as
+      # secondary has enough memory to host all instances it is
+      # supposed to should a single other node in the cluster fail.
+      # FIXME: not ready for failover to an arbitrary node
+      # FIXME: does not support file-backed instances
+      # WARNING: we currently take into account down instances as well
+      # as up ones, considering that even if they're down someone
+      # might want to start them even in the event of a node failure.
+      if n_img.offline or \
+         self.all_node_info[node_uuid].group != self.group_uuid:
+        # we're skipping nodes marked offline and nodes in other groups from
+        # the N+1 warning, since most likely we don't have good memory
+        # information from them; we already list instances living on such
+        # nodes, and that's enough warning
+        continue
+      #TODO(dynmem): also consider ballooning out other instances
+      for prinode, inst_uuids in n_img.sbp.items():
+        needed_mem = 0
+        for inst_uuid in inst_uuids:
+          bep = cluster_info.FillBE(all_insts[inst_uuid])
+          if bep[constants.BE_AUTO_BALANCE]:
+            needed_mem += bep[constants.BE_MINMEM]
+        test = n_img.mfree < needed_mem
+        self._ErrorIf(test, constants.CV_ENODEN1,
+                      self.cfg.GetNodeName(node_uuid),
+                      "not enough memory to accomodate instance failovers"
+                      " should node %s fail (%dMiB needed, %dMiB available)",
+                      self.cfg.GetNodeName(prinode), needed_mem, n_img.mfree)
+
+  def _VerifyClientCertificates(self, nodes, all_nvinfo):
+    """Verifies the consistency of the client certificates.
+
+    This includes several aspects:
+      - the individual validation of all nodes' certificates
+      - the consistency of the master candidate certificate map
+      - the consistency of the master candidate certificate map with the
+        certificates that the master candidates are actually using.
+
+    @param nodes: the list of nodes to consider in this verification
+    @param all_nvinfo: the map of results of the verify_node call to
+      all nodes
+
+    """
+    candidate_certs = self.cfg.GetClusterInfo().candidate_certs
+    if candidate_certs is None or len(candidate_certs) == 0:
+      self._ErrorIf(
+        True, constants.CV_ECLUSTERCLIENTCERT, None,
+        "The cluster's list of master candidate certificates is empty."
+        " If you just updated the cluster, please run"
+        " 'gnt-cluster renew-crypto --new-node-certificates'.")
+      return
+
+    self._ErrorIf(
+      len(candidate_certs) != len(set(candidate_certs.values())),
+      constants.CV_ECLUSTERCLIENTCERT, None,
+      "There are at least two master candidates configured to use the same"
+      " certificate.")
+
+    # collect the client certificate
+    for node in nodes:
+      if node.offline:
+        continue
+
+      nresult = all_nvinfo[node.uuid]
+      if nresult.fail_msg or not nresult.payload:
+        continue
+
+      (errcode, msg) = nresult.payload.get(constants.NV_CLIENT_CERT, None)
+
+      self._ErrorIf(
+        errcode is not None, constants.CV_ECLUSTERCLIENTCERT, None,
+        "Client certificate of node '%s' failed validation: %s (code '%s')",
+        node.uuid, msg, errcode)
+
+      if not errcode:
+        digest = msg
+        if node.master_candidate:
+          if node.uuid in candidate_certs:
+            self._ErrorIf(
+              digest != candidate_certs[node.uuid],
+              constants.CV_ECLUSTERCLIENTCERT, None,
+              "Client certificate digest of master candidate '%s' does not"
+              " match its entry in the cluster's map of master candidate"
+              " certificates. Expected: %s Got: %s", node.uuid,
+              digest, candidate_certs[node.uuid])
+          else:
+            self._ErrorIf(
+              True, constants.CV_ECLUSTERCLIENTCERT, None,
+              "The master candidate '%s' does not have an entry in the"
+              " map of candidate certificates.", node.uuid)
+            self._ErrorIf(
+              digest in candidate_certs.values(),
+              constants.CV_ECLUSTERCLIENTCERT, None,
+              "Master candidate '%s' is using a certificate of another node.",
+              node.uuid)
+        else:
+          self._ErrorIf(
+            node.uuid in candidate_certs,
+            constants.CV_ECLUSTERCLIENTCERT, None,
+            "Node '%s' is not a master candidate, but still listed in the"
+            " map of master candidate certificates.", node.uuid)
+          self._ErrorIf(
+            (node.uuid not in candidate_certs) and
+              (digest in candidate_certs.values()),
+            constants.CV_ECLUSTERCLIENTCERT, None,
+            "Node '%s' is not a master candidate and is incorrectly using a"
+            " certificate of another node which is master candidate.",
+            node.uuid)
+
+  def _VerifySshSetup(self, nodes, all_nvinfo):
+    """Evaluates the verification results of the SSH setup and clutter test.
+
+    @param nodes: List of L{objects.Node} objects
+    @param all_nvinfo: RPC results
+
+    """
+    for node in nodes:
+      if not node.offline:
+        nresult = all_nvinfo[node.uuid]
+        if nresult.fail_msg or not nresult.payload:
+          self._ErrorIf(True, constants.CV_ENODESSH, node.name,
+                        "Could not verify the SSH setup of this node.")
+          return
+        for ssh_test in [constants.NV_SSH_SETUP, constants.NV_SSH_CLUTTER]:
+          result = nresult.payload.get(ssh_test, None)
+          error_msg = ""
+          if isinstance(result, list):
+            error_msg = " ".join(result)
+          self._ErrorIf(result,
+                        constants.CV_ENODESSH, None, error_msg)
+
+  def _VerifyFiles(self, nodes, master_node_uuid, all_nvinfo,
+                   (files_all, files_opt, files_mc, files_vm)):
+    """Verifies file checksums collected from all nodes.
+
+    @param nodes: List of L{objects.Node} objects
+    @param master_node_uuid: UUID of master node
+    @param all_nvinfo: RPC results
+
+    """
+    # Define functions determining which nodes to consider for a file
+    files2nodefn = [
+      (files_all, None),
+      (files_mc, lambda node: (node.master_candidate or
+                               node.uuid == master_node_uuid)),
+      (files_vm, lambda node: node.vm_capable),
+      ]
+
+    # Build mapping from filename to list of nodes which should have the file
+    nodefiles = {}
+    for (files, fn) in files2nodefn:
+      if fn is None:
+        filenodes = nodes
+      else:
+        filenodes = filter(fn, nodes)
+      nodefiles.update((filename,
+                        frozenset(map(operator.attrgetter("uuid"), filenodes)))
+                       for filename in files)
+
+    assert set(nodefiles) == (files_all | files_mc | files_vm)
+
+    fileinfo = dict((filename, {}) for filename in nodefiles)
+    ignore_nodes = set()
+
+    for node in nodes:
+      if node.offline:
+        ignore_nodes.add(node.uuid)
+        continue
+
+      nresult = all_nvinfo[node.uuid]
+
+      if nresult.fail_msg or not nresult.payload:
+        node_files = None
+      else:
+        fingerprints = nresult.payload.get(constants.NV_FILELIST, {})
+        node_files = dict((vcluster.LocalizeVirtualPath(key), value)
+                          for (key, value) in fingerprints.items())
+        del fingerprints
+
+      test = not (node_files and isinstance(node_files, dict))
+      self._ErrorIf(test, constants.CV_ENODEFILECHECK, node.name,
+                    "Node did not return file checksum data")
+      if test:
+        ignore_nodes.add(node.uuid)
+        continue
+
+      # Build per-checksum mapping from filename to nodes having it
+      for (filename, checksum) in node_files.items():
+        assert filename in nodefiles
+        fileinfo[filename].setdefault(checksum, set()).add(node.uuid)
+
+    for (filename, checksums) in fileinfo.items():
+      assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
+
+      # Nodes having the file
+      with_file = frozenset(node_uuid
+                            for node_uuids in fileinfo[filename].values()
+                            for node_uuid in node_uuids) - ignore_nodes
+
+      expected_nodes = nodefiles[filename] - ignore_nodes
+
+      # Nodes missing file
+      missing_file = expected_nodes - with_file
+
+      if filename in files_opt:
+        # All or no nodes
+        self._ErrorIf(missing_file and missing_file != expected_nodes,
+                      constants.CV_ECLUSTERFILECHECK, None,
+                      "File %s is optional, but it must exist on all or no"
+                      " nodes (not found on %s)",
+                      filename,
+                      utils.CommaJoin(
+                        utils.NiceSort(
+                          map(self.cfg.GetNodeName, missing_file))))
+      else:
+        self._ErrorIf(missing_file, constants.CV_ECLUSTERFILECHECK, None,
+                      "File %s is missing from node(s) %s", filename,
+                      utils.CommaJoin(
+                        utils.NiceSort(
+                          map(self.cfg.GetNodeName, missing_file))))
+
+        # Warn if a node has a file it shouldn't
+        unexpected = with_file - expected_nodes
+        self._ErrorIf(unexpected,
+                      constants.CV_ECLUSTERFILECHECK, None,
+                      "File %s should not exist on node(s) %s",
+                      filename, utils.CommaJoin(
+                        utils.NiceSort(map(self.cfg.GetNodeName, unexpected))))
+
+      # See if there are multiple versions of the file
+      test = len(checksums) > 1
+      if test:
+        variants = ["variant %s on %s" %
+                    (idx + 1,
+                     utils.CommaJoin(utils.NiceSort(
+                       map(self.cfg.GetNodeName, node_uuids))))
+                    for (idx, (checksum, node_uuids)) in
+                      enumerate(sorted(checksums.items()))]
+      else:
+        variants = []
+
+      self._ErrorIf(test, constants.CV_ECLUSTERFILECHECK, None,
+                    "File %s found with %s different checksums (%s)",
+                    filename, len(checksums), "; ".join(variants))
+
+  def _VerifyNodeDrbdHelper(self, ninfo, nresult, drbd_helper):
+    """Verify the drbd helper.
+
+    """
+    if drbd_helper:
+      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
+      test = (helper_result is None)
+      self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
+                    "no drbd usermode helper returned")
+      if helper_result:
+        status, payload = helper_result
+        test = not status
+        self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
+                      "drbd usermode helper check unsuccessful: %s", payload)
+        test = status and (payload != drbd_helper)
+        self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
+                      "wrong drbd usermode helper: %s", payload)
+
+  @staticmethod
+  def _ComputeDrbdMinors(ninfo, instanceinfo, disks_info, drbd_map, error_if):
+    """Gives the DRBD information in a map for a node.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param instanceinfo: the dict of instances
+    @param disks_info: the dict of disks
+    @param drbd_map: the DRBD map as returned by
+        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
+    @type error_if: callable like L{_ErrorIf}
+    @param error_if: The error reporting function
+    @return: dict from minor number to (disk_uuid, instance_uuid, active)
+
+    """
+    node_drbd = {}
+    for minor, disk_uuid in drbd_map[ninfo.uuid].items():
+      test = disk_uuid not in disks_info
+      error_if(test, constants.CV_ECLUSTERCFG, None,
+               "ghost disk '%s' in temporary DRBD map", disk_uuid)
+        # ghost disk should not be active, but otherwise we
+        # don't give double warnings (both ghost disk and
+        # unallocated minor in use)
+      if test:
+        node_drbd[minor] = (disk_uuid, None, False)
+      else:
+        disk_active = False
+        disk_instance = None
+        for (inst_uuid, inst) in instanceinfo.items():
+          if disk_uuid in inst.disks:
+            disk_active = inst.disks_active
+            disk_instance = inst_uuid
+            break
+        node_drbd[minor] = (disk_uuid, disk_instance, disk_active)
+    return node_drbd
+
+  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, disks_info,
+                      drbd_helper, drbd_map):
+    """Verifies and the node DRBD status.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @param instanceinfo: the dict of instances
+    @param disks_info: the dict of disks
+    @param drbd_helper: the configured DRBD usermode helper
+    @param drbd_map: the DRBD map as returned by
+        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
+
+    """
+    self._VerifyNodeDrbdHelper(ninfo, nresult, drbd_helper)
+
+    # compute the DRBD minors
+    node_drbd = self._ComputeDrbdMinors(ninfo, instanceinfo, disks_info,
+                                        drbd_map, self._ErrorIf)
+
+    # and now check them
+    used_minors = nresult.get(constants.NV_DRBDLIST, [])
+    test = not isinstance(used_minors, (tuple, list))
+    self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
+                  "cannot parse drbd status file: %s", str(used_minors))
+    if test:
+      # we cannot check drbd status
+      return
+
+    for minor, (disk_uuid, inst_uuid, must_exist) in node_drbd.items():
+      test = minor not in used_minors and must_exist
+      if inst_uuid is not None:
+        attached = "(attached in instance '%s')" % \
+          self.cfg.GetInstanceName(inst_uuid)
+      else:
+        attached = "(detached)"
+      self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
+                    "drbd minor %d of disk %s %s is not active",
+                    minor, disk_uuid, attached)
+    for minor in used_minors:
+      test = minor not in node_drbd
+      self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
+                    "unallocated drbd minor %d is in use", minor)
+
+  def _UpdateNodeOS(self, ninfo, nresult, nimg):
+    """Builds the node OS structures.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @param nimg: the node image object
+
+    """
+    remote_os = nresult.get(constants.NV_OSLIST, None)
+    test = (not isinstance(remote_os, list) or
+            not compat.all(isinstance(v, list) and len(v) == 8
+                           for v in remote_os))
+
+    self._ErrorIf(test, constants.CV_ENODEOS, ninfo.name,
+                  "node hasn't returned valid OS data")
+
+    nimg.os_fail = test
+
+    if test:
+      return
+
+    os_dict = {}
+
+    for (name, os_path, status, diagnose,
+         variants, parameters, api_ver,
+         trusted) in nresult[constants.NV_OSLIST]:
+
+      if name not in os_dict:
+        os_dict[name] = []
+
+      # parameters is a list of lists instead of list of tuples due to
+      # JSON lacking a real tuple type, fix it:
+      parameters = [tuple(v) for v in parameters]
+      os_dict[name].append((os_path, status, diagnose,
+                            set(variants), set(parameters), set(api_ver),
+                            trusted))
+
+    nimg.oslist = os_dict
+
+  def _VerifyNodeOS(self, ninfo, nimg, base):
+    """Verifies the node OS list.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nimg: the node image object
+    @param base: the 'template' node we match against (e.g. from the master)
+
+    """
+    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
+
+    beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
+    for os_name, os_data in nimg.oslist.items():
+      assert os_data, "Empty OS status for OS %s?!" % os_name
+      f_path, f_status, f_diag, f_var, f_param, f_api, f_trusted = os_data[0]
+      self._ErrorIf(not f_status, constants.CV_ENODEOS, ninfo.name,
+                    "Invalid OS %s (located at %s): %s",
+                    os_name, f_path, f_diag)
+      self._ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, ninfo.name,
+                    "OS '%s' has multiple entries"
+                    " (first one shadows the rest): %s",
+                    os_name, utils.CommaJoin([v[0] for v in os_data]))
+      # comparisons with the 'base' image
+      test = os_name not in base.oslist
+      self._ErrorIf(test, constants.CV_ENODEOS, ninfo.name,
+                    "Extra OS %s not present on reference node (%s)",
+                    os_name, self.cfg.GetNodeName(base.uuid))
+      if test:
+        continue
+      assert base.oslist[os_name], "Base node has empty OS status?"
+      _, b_status, _, b_var, b_param, b_api, b_trusted = base.oslist[os_name][0]
+      if not b_status:
+        # base OS is invalid, skipping
+        continue
+      for kind, a, b in [("API version", f_api, b_api),
+                         ("variants list", f_var, b_var),
+                         ("parameters", beautify_params(f_param),
+                          beautify_params(b_param))]:
+        self._ErrorIf(a != b, constants.CV_ENODEOS, ninfo.name,
+                      "OS %s for %s differs from reference node %s:"
+                      " [%s] vs. [%s]", kind, os_name,
+                      self.cfg.GetNodeName(base.uuid),
+                      utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
+      for kind, a, b in [("trusted", f_trusted, b_trusted)]:
+        self._ErrorIf(a != b, constants.CV_ENODEOS, ninfo.name,
+                      "OS %s for %s differs from reference node %s:"
+                      " %s vs. %s", kind, os_name,
+                      self.cfg.GetNodeName(base.uuid), a, b)
+
+    # check any missing OSes
+    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
+    self._ErrorIf(missing, constants.CV_ENODEOS, ninfo.name,
+                  "OSes present on reference node %s"
+                  " but missing on this node: %s",
+                  self.cfg.GetNodeName(base.uuid), utils.CommaJoin(missing))
+
+  def _VerifyAcceptedFileStoragePaths(self, ninfo, nresult, is_master):
+    """Verifies paths in L{pathutils.FILE_STORAGE_PATHS_FILE}.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @type is_master: bool
+    @param is_master: Whether node is the master node
+
+    """
+    cluster = self.cfg.GetClusterInfo()
+    if (is_master and
+        (cluster.IsFileStorageEnabled() or
+         cluster.IsSharedFileStorageEnabled())):
+      try:
+        fspaths = nresult[constants.NV_ACCEPTED_STORAGE_PATHS]
+      except KeyError:
+        # This should never happen
+        self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
+                      "Node did not return forbidden file storage paths")
+      else:
+        self._ErrorIf(fspaths, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
+                      "Found forbidden file storage paths: %s",
+                      utils.CommaJoin(fspaths))
+    else:
+      self._ErrorIf(constants.NV_ACCEPTED_STORAGE_PATHS in nresult,
+                    constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
+                    "Node should not have returned forbidden file storage"
+                    " paths")
+
+  def _VerifyStoragePaths(self, ninfo, nresult, file_disk_template,
+                          verify_key, error_key):
+    """Verifies (file) storage paths.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @type file_disk_template: string
+    @param file_disk_template: file-based disk template, whose directory
+        is supposed to be verified
+    @type verify_key: string
+    @param verify_key: key for the verification map of this file
+        verification step
+    @param error_key: error key to be added to the verification results
+        in case something goes wrong in this verification step
+
+    """
+    assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
+              constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
+           ))
+
+    cluster = self.cfg.GetClusterInfo()
+    if cluster.IsDiskTemplateEnabled(file_disk_template):
+      self._ErrorIf(
+          verify_key in nresult,
+          error_key, ninfo.name,
+          "The configured %s storage path is unusable: %s" %
+          (file_disk_template, nresult.get(verify_key)))
+
+  def _VerifyFileStoragePaths(self, ninfo, nresult):
+    """Verifies (file) storage paths.
+
+    @see: C{_VerifyStoragePaths}
+
+    """
+    self._VerifyStoragePaths(
+        ninfo, nresult, constants.DT_FILE,
+        constants.NV_FILE_STORAGE_PATH,
+        constants.CV_ENODEFILESTORAGEPATHUNUSABLE)
+
+  def _VerifySharedFileStoragePaths(self, ninfo, nresult):
+    """Verifies (file) storage paths.
+
+    @see: C{_VerifyStoragePaths}
+
+    """
+    self._VerifyStoragePaths(
+        ninfo, nresult, constants.DT_SHARED_FILE,
+        constants.NV_SHARED_FILE_STORAGE_PATH,
+        constants.CV_ENODESHAREDFILESTORAGEPATHUNUSABLE)
+
+  def _VerifyGlusterStoragePaths(self, ninfo, nresult):
+    """Verifies (file) storage paths.
+
+    @see: C{_VerifyStoragePaths}
+
+    """
+    self._VerifyStoragePaths(
+        ninfo, nresult, constants.DT_GLUSTER,
+        constants.NV_GLUSTER_STORAGE_PATH,
+        constants.CV_ENODEGLUSTERSTORAGEPATHUNUSABLE)
+
+  def _VerifyOob(self, ninfo, nresult):
+    """Verifies out of band functionality of a node.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+
+    """
+    # We just have to verify the paths on master and/or master candidates
+    # as the oob helper is invoked on the master
+    if ((ninfo.master_candidate or ninfo.master_capable) and
+        constants.NV_OOB_PATHS in nresult):
+      for path_result in nresult[constants.NV_OOB_PATHS]:
+        self._ErrorIf(path_result, constants.CV_ENODEOOBPATH,
+                      ninfo.name, path_result)
+
+  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
+    """Verifies and updates the node volume data.
+
+    This function will update a L{NodeImage}'s internal structures
+    with data from the remote call.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @param nimg: the node image object
+    @param vg_name: the configured VG name
+
+    """
+    nimg.lvm_fail = True
+    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
+    if vg_name is None:
+      pass
+    elif isinstance(lvdata, basestring):
+      self._ErrorIf(True, constants.CV_ENODELVM, ninfo.name,
+                    "LVM problem on node: %s", utils.SafeEncode(lvdata))
+    elif not isinstance(lvdata, dict):
+      self._ErrorIf(True, constants.CV_ENODELVM, ninfo.name,
+                    "rpc call to node failed (lvlist)")
+    else:
+      nimg.volumes = lvdata
+      nimg.lvm_fail = False
+
+  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
+    """Verifies and updates the node instance list.
+
+    If the listing was successful, then updates this node's instance
+    list. Otherwise, it marks the RPC call as failed for the instance
+    list key.
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @param nimg: the node image object
+
+    """
+    idata = nresult.get(constants.NV_INSTANCELIST, None)
+    test = not isinstance(idata, list)
+    self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
+                  "rpc call to node failed (instancelist): %s",
+                  utils.SafeEncode(str(idata)))
+    if test:
+      nimg.hyp_fail = True
+    else:
+      nimg.instances = [uuid for (uuid, _) in
+                        self.cfg.GetMultiInstanceInfoByName(idata)]
+
+  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
+    """Verifies and computes a node information map
+
+    @type ninfo: L{objects.Node}
+    @param ninfo: the node to check
+    @param nresult: the remote results for the node
+    @param nimg: the node image object
+    @param vg_name: the configured VG name
+
+    """
+    # try to read free memory (from the hypervisor)
+    hv_info = nresult.get(constants.NV_HVINFO, None)
+    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
+    self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
+                  "rpc call to node failed (hvinfo)")
+    if not test:
+      try:
+        nimg.mfree = int(hv_info["memory_free"])
+      except (ValueError, TypeError):
+        self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
+                      "node returned invalid nodeinfo, check hypervisor")
+
+    # FIXME: devise a free space model for file based instances as well
+    if vg_name is not None:
+      test = (constants.NV_VGLIST not in nresult or
+              vg_name not in nresult[constants.NV_VGLIST])
+      self._ErrorIf(test, constants.CV_ENODELVM, ninfo.name,
+                    "node didn't return data for the volume group '%s'"
+                    " - it is either missing or broken", vg_name)
+      if not test:
+        try:
+          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
+        except (ValueError, TypeError):
+          self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
+                        "node returned invalid LVM info, check LVM status")
+
+  def _CollectDiskInfo(self, node_uuids, node_image, instanceinfo):
+    """Gets per-disk status information for all instances.
+
+    @type node_uuids: list of strings
+    @param node_uuids: Node UUIDs
+    @type node_image: dict of (UUID, L{objects.Node})
+    @param node_image: Node objects
+    @type instanceinfo: dict of (UUID, L{objects.Instance})
+    @param instanceinfo: Instance objects
+    @rtype: {instance: {node: [(succes, payload)]}}
+    @return: a dictionary of per-instance dictionaries with nodes as
+        keys and disk information as values; the disk information is a
+        list of tuples (success, payload)
+
+    """
+    node_disks = {}
+    node_disks_dev_inst_only = {}
+    diskless_instances = set()
+    nodisk_instances = set()
+
+    for nuuid in node_uuids:
+      node_inst_uuids = list(itertools.chain(node_image[nuuid].pinst,
+                                             node_image[nuuid].sinst))
+      diskless_instances.update(uuid for uuid in node_inst_uuids
+                                if not instanceinfo[uuid].disks)
+      disks = [(inst_uuid, disk)
+               for inst_uuid in node_inst_uuids
+               for disk in self.cfg.GetInstanceDisks(inst_uuid)]
+
+      if not disks:
+        nodisk_instances.update(uuid for uuid in node_inst_uuids
+                                if instanceinfo[uuid].disks)
+        # No need to collect data
+        continue
+
+      node_disks[nuuid] = disks
+
+      # _AnnotateDiskParams makes already copies of the disks
+      dev_inst_only = []
+      for (inst_uuid, dev) in disks:
+        (anno_disk,) = AnnotateDiskParams(instanceinfo[inst_uuid], [dev],
+                                          self.cfg)
+        dev_inst_only.append((anno_disk, instanceinfo[inst_uuid]))
+
+      node_disks_dev_inst_only[nuuid] = dev_inst_only
+
+    assert len(node_disks) == len(node_disks_dev_inst_only)
+
+    # Collect data from all nodes with disks
+    result = self.rpc.call_blockdev_getmirrorstatus_multi(
+               node_disks.keys(), node_disks_dev_inst_only)
+
+    assert len(result) == len(node_disks)
+
+    instdisk = {}
+
+    for (nuuid, nres) in result.items():
+      node = self.cfg.GetNodeInfo(nuuid)
+      disks = node_disks[node.uuid]
+
+      if nres.offline:
+        # No data from this node
+        data = len(disks) * [(False, "node offline")]
+      else:
+        msg = nres.fail_msg
+        self._ErrorIf(msg, constants.CV_ENODERPC, node.name,
+                      "while getting disk information: %s", msg)
+        if msg:
+          # No data from this node
+          data = len(disks) * [(False, msg)]
+        else:
+          data = []
+          for idx, i in enumerate(nres.payload):
+            if isinstance(i, (tuple, list)) and len(i) == 2:
+              data.append(i)
+            else:
+              logging.warning("Invalid result from node %s, entry %d: %s",
+                              node.name, idx, i)
+              data.append((False, "Invalid result from the remote node"))
+
+      for ((inst_uuid, _), status) in zip(disks, data):
+        instdisk.setdefault(inst_uuid, {}).setdefault(node.uuid, []) \
+          .append(status)
+
+    # Add empty entries for diskless instances.
+    for inst_uuid in diskless_instances:
+      assert inst_uuid not in instdisk
+      instdisk[inst_uuid] = {}
+    # ...and disk-full instances that happen to have no disks
+    for inst_uuid in nodisk_instances:
+      assert inst_uuid not in instdisk
+      instdisk[inst_uuid] = {}
+
+    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
+                      len(nuuids) <= len(
+                        self.cfg.GetInstanceNodes(instanceinfo[inst].uuid)) and
+                      compat.all(isinstance(s, (tuple, list)) and
+                                 len(s) == 2 for s in statuses)
+                      for inst, nuuids in instdisk.items()
+                      for nuuid, statuses in nuuids.items())
+    if __debug__:
+      instdisk_keys = set(instdisk)
+      instanceinfo_keys = set(instanceinfo)
+      assert instdisk_keys == instanceinfo_keys, \
+        ("instdisk keys (%s) do not match instanceinfo keys (%s)" %
+         (instdisk_keys, instanceinfo_keys))
+
+    return instdisk
+
+  @staticmethod
+  def _SshNodeSelector(group_uuid, all_nodes):
+    """Create endless iterators for all potential SSH check hosts.
+
+    """
+    nodes = [node for node in all_nodes
+             if (node.group != group_uuid and
+                 not node.offline)]
+    keyfunc = operator.attrgetter("group")
+
+    return map(itertools.cycle,
+               [sorted(map(operator.attrgetter("name"), names))
+                for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
+                                                  keyfunc)])
+
+  @classmethod
+  def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
+    """Choose which nodes should talk to which other nodes.
+
+    We will make nodes contact all nodes in their group, and one node from
+    every other group.
+
+    @rtype: tuple of (string, dict of strings to list of strings, string)
+    @return: a tuple containing the list of all online nodes, a dictionary
+      mapping node names to additional nodes of other node groups to which
+      connectivity should be tested, and a list of all online master
+      candidates
+
+    @warning: This algorithm has a known issue if one node group is much
+      smaller than others (e.g. just one node). In such a case all other
+      nodes will talk to the single node.
+
+    """
+    online_nodes = sorted(node.name for node in group_nodes if not node.offline)
+    online_mcs = sorted(node.name for node in group_nodes
+                        if (node.master_candidate and not node.offline))
+    sel = cls._SshNodeSelector(group_uuid, all_nodes)
+
+    return (online_nodes,
+            dict((name, sorted([i.next() for i in sel]))
+                 for name in online_nodes),
+            online_mcs)
+
+  def _PrepareSshSetupCheck(self):
+    """Prepare the input data for the SSH setup verification.
+
+    """
+    all_nodes_info = self.cfg.GetAllNodesInfo()
+    potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
+    node_status = [
+      (uuid, node_info.name, node_info.master_candidate,
+       node_info.name in potential_master_candidates, not node_info.offline)
+      for (uuid, node_info) in all_nodes_info.items()]
+    return node_status
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    Cluster-Verify hooks just ran in the post phase and their failure makes
+    the output be logged in the verify output and the verification to fail.
+
+    """
+    env = {
+      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags()),
+      }
+
+    env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
+               for node in self.my_node_info.values())
+
+    return env
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    return ([], list(self.my_node_info.keys()))
+
+  @staticmethod
+  def _VerifyOtherNotes(feedback_fn, i_non_redundant, i_non_a_balanced,
+                        i_offline, n_offline, n_drained):
+    feedback_fn("* Other Notes")
+    if i_non_redundant:
+      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
+                  % len(i_non_redundant))
+
+    if i_non_a_balanced:
+      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
+                  % len(i_non_a_balanced))
+
+    if i_offline:
+      feedback_fn("  - NOTICE: %d offline instance(s) found." % i_offline)
+
+    if n_offline:
+      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
+
+    if n_drained:
+      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
+
+  def Exec(self, feedback_fn): # pylint: disable=R0915
+    """Verify integrity of the node group, performing various test on nodes.
+
+    """
+    # This method has too many local variables. pylint: disable=R0914
+    feedback_fn("* Verifying group '%s'" % self.group_info.name)
+
+    if not self.my_node_uuids:
+      # empty node group
+      feedback_fn("* Empty node group, skipping verification")
+      return True
+
+    self.bad = False
+    verbose = self.op.verbose
+    self._feedback_fn = feedback_fn
+
+    vg_name = self.cfg.GetVGName()
+    drbd_helper = self.cfg.GetDRBDHelper()
+    cluster = self.cfg.GetClusterInfo()
+    hypervisors = cluster.enabled_hypervisors
+    node_data_list = self.my_node_info.values()
+
+    i_non_redundant = [] # Non redundant instances
+    i_non_a_balanced = [] # Non auto-balanced instances
+    i_offline = 0 # Count of offline instances
+    n_offline = 0 # Count of offline nodes
+    n_drained = 0 # Count of nodes being drained
+    node_vol_should = {}
+
+    # FIXME: verify OS list
+
+    # File verification
+    filemap = ComputeAncillaryFiles(cluster, False)
+
+    # do local checksums
+    master_node_uuid = self.master_node = self.cfg.GetMasterNode()
+    master_ip = self.cfg.GetMasterIP()
+
+    feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_uuids))
+
+    user_scripts = []
+    if self.cfg.GetUseExternalMipScript():
+      user_scripts.append(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
+
+    node_verify_param = {
+      constants.NV_FILELIST:
+        map(vcluster.MakeVirtualPath,
+            utils.UniqueSequence(filename
+                                 for files in filemap
+                                 for filename in files)),
+      constants.NV_NODELIST:
+        self._SelectSshCheckNodes(node_data_list, self.group_uuid,
+                                  self.all_node_info.values()),
+      constants.NV_HYPERVISOR: hypervisors,
+      constants.NV_HVPARAMS:
+        _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
+      constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
+                                 for node in node_data_list
+                                 if not node.offline],
+      constants.NV_INSTANCELIST: hypervisors,
+      constants.NV_VERSION: None,
+      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
+      constants.NV_NODESETUP: None,
+      constants.NV_TIME: None,
+      constants.NV_MASTERIP: (self.cfg.GetMasterNodeName(), master_ip),
+      constants.NV_OSLIST: None,
+      constants.NV_NONVMNODES: self.cfg.GetNonVmCapableNodeNameList(),
+      constants.NV_USERSCRIPTS: user_scripts,
+      constants.NV_CLIENT_CERT: None,
+      }
+
+    if self.cfg.GetClusterInfo().modify_ssh_setup:
+      node_verify_param[constants.NV_SSH_SETUP] = self._PrepareSshSetupCheck()
+      if self.op.verify_clutter:
+        node_verify_param[constants.NV_SSH_CLUTTER] = True
+
+    if vg_name is not None:
+      node_verify_param[constants.NV_VGLIST] = None
+      node_verify_param[constants.NV_LVLIST] = vg_name
+      node_verify_param[constants.NV_PVLIST] = [vg_name]
+
+    if cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
+      if drbd_helper:
+        node_verify_param[constants.NV_DRBDVERSION] = None
+        node_verify_param[constants.NV_DRBDLIST] = None
+        node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
+
+    if cluster.IsFileStorageEnabled() or \
+        cluster.IsSharedFileStorageEnabled():
+      # Load file storage paths only from master node
+      node_verify_param[constants.NV_ACCEPTED_STORAGE_PATHS] = \
+        self.cfg.GetMasterNodeName()
+      if cluster.IsFileStorageEnabled():
+        node_verify_param[constants.NV_FILE_STORAGE_PATH] = \
+          cluster.file_storage_dir
+      if cluster.IsSharedFileStorageEnabled():
+        node_verify_param[constants.NV_SHARED_FILE_STORAGE_PATH] = \
+          cluster.shared_file_storage_dir
+
+    # bridge checks
+    # FIXME: this needs to be changed per node-group, not cluster-wide
+    bridges = set()
+    default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
+    if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
+      bridges.add(default_nicpp[constants.NIC_LINK])
+    for inst_uuid in self.my_inst_info.values():
+      for nic in inst_uuid.nics:
+        full_nic = cluster.SimpleFillNIC(nic.nicparams)
+        if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
+          bridges.add(full_nic[constants.NIC_LINK])
+
+    if bridges:
+      node_verify_param[constants.NV_BRIDGES] = list(bridges)
+
+    # Build our expected cluster state
+    node_image = dict((node.uuid, self.NodeImage(offline=node.offline,
+                                                 uuid=node.uuid,
+                                                 vm_capable=node.vm_capable))
+                      for node in node_data_list)
+
+    # Gather OOB paths
+    oob_paths = []
+    for node in self.all_node_info.values():
+      path = SupportsOob(self.cfg, node)
+      if path and path not in oob_paths:
+        oob_paths.append(path)
+
+    if oob_paths:
+      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
+
+    for inst_uuid in self.my_inst_uuids:
+      instance = self.my_inst_info[inst_uuid]
+      if instance.admin_state == constants.ADMINST_OFFLINE:
+        i_offline += 1
+
+      inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
+      for nuuid in inst_nodes:
+        if nuuid not in node_image:
+          gnode = self.NodeImage(uuid=nuuid)
+          gnode.ghost = (nuuid not in self.all_node_info)
+          node_image[nuuid] = gnode
+
+      self.cfg.GetInstanceLVsByNode(instance.uuid, lvmap=node_vol_should)
+
+      pnode = instance.primary_node
+      node_image[pnode].pinst.append(instance.uuid)
+
+      for snode in self.cfg.GetInstanceSecondaryNodes(instance.uuid):
+        nimg = node_image[snode]
+        nimg.sinst.append(instance.uuid)
+        if pnode not in nimg.sbp:
+          nimg.sbp[pnode] = []
+        nimg.sbp[pnode].append(instance.uuid)
+
+    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
+                                               self.my_node_info.keys())
+    # The value of exclusive_storage should be the same across the group, so if
+    # it's True for at least a node, we act as if it were set for all the nodes
+    self._exclusive_storage = compat.any(es_flags.values())
+    if self._exclusive_storage:
+      node_verify_param[constants.NV_EXCLUSIVEPVS] = True
+
+    node_group_uuids = dict(map(lambda n: (n.name, n.group),
+                                self.cfg.GetAllNodesInfo().values()))
+    groups_config = self.cfg.GetAllNodeGroupsInfoDict()
+
+    # At this point, we have the in-memory data structures complete,
+    # except for the runtime information, which we'll gather next
+
+    # NOTE: Here we lock the configuration for the duration of RPC calls,
+    # which means that the cluster configuration changes are blocked during
+    # this period.
+    # This is something that should be done only exceptionally and only for
+    # justified cases!
+    # In this case, we need the lock as we can only verify the integrity of
+    # configuration files on MCs only if we know nobody else is modifying it.
+    # FIXME: The check for integrity of config.data should be moved to
+    # WConfD, which is the only one who can otherwise ensure nobody
+    # will modify the configuration during the check.
+    with self.cfg.GetConfigManager(shared=True, forcelock=True):
+      feedback_fn("* Gathering information about nodes (%s nodes)" %
+                  len(self.my_node_uuids))
+      # Force the configuration to be fully distributed before doing any tests
+      self.cfg.FlushConfig()
+      # Due to the way our RPC system works, exact response times cannot be
+      # guaranteed (e.g. a broken node could run into a timeout). By keeping
+      # the time before and after executing the request, we can at least have
+      # a time window.
+      nvinfo_starttime = time.time()
+      # Get lock on the configuration so that nobody modifies it concurrently.
+      # Otherwise it can be modified by other jobs, failing the consistency
+      # test.
+      # NOTE: This is an exceptional situation, we should otherwise avoid
+      # locking the configuration for something but very fast, pure operations.
+      cluster_name = self.cfg.GetClusterName()
+      hvparams = self.cfg.GetClusterInfo().hvparams
+      all_nvinfo = self.rpc.call_node_verify(self.my_node_uuids,
+                                             node_verify_param,
+                                             cluster_name,
+                                             hvparams,
+                                             node_group_uuids,
+                                             groups_config)
+      nvinfo_endtime = time.time()
+
+      if self.extra_lv_nodes and vg_name is not None:
+        feedback_fn("* Gathering information about extra nodes (%s nodes)" %
+                    len(self.extra_lv_nodes))
+        extra_lv_nvinfo = \
+            self.rpc.call_node_verify(self.extra_lv_nodes,
+                                      {constants.NV_LVLIST: vg_name},
+                                      self.cfg.GetClusterName(),
+                                      self.cfg.GetClusterInfo().hvparams,
+                                      node_group_uuids,
+                                      groups_config)
+      else:
+        extra_lv_nvinfo = {}
+
+      # If not all nodes are being checked, we need to make sure the master
+      # node and a non-checked vm_capable node are in the list.
+      absent_node_uuids = set(self.all_node_info).difference(self.my_node_info)
+      if absent_node_uuids:
+        vf_nvinfo = all_nvinfo.copy()
+        vf_node_info = list(self.my_node_info.values())
+        additional_node_uuids = []
+        if master_node_uuid not in self.my_node_info:
+          additional_node_uuids.append(master_node_uuid)
+          vf_node_info.append(self.all_node_info[master_node_uuid])
+        # Add the first vm_capable node we find which is not included,
+        # excluding the master node (which we already have)
+        for node_uuid in absent_node_uuids:
+          nodeinfo = self.all_node_info[node_uuid]
+          if (nodeinfo.vm_capable and not nodeinfo.offline and
+              node_uuid != master_node_uuid):
+            additional_node_uuids.append(node_uuid)
+            vf_node_info.append(self.all_node_info[node_uuid])
+            break
+        key = constants.NV_FILELIST
+
+        feedback_fn("* Gathering information about the master node")
+        vf_nvinfo.update(self.rpc.call_node_verify(
+           additional_node_uuids, {key: node_verify_param[key]},
+           self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams,
+           node_group_uuids,
+           groups_config))
+      else:
+        vf_nvinfo = all_nvinfo
+        vf_node_info = self.my_node_info.values()
+
+    all_drbd_map = self.cfg.ComputeDRBDMap()
+
+    feedback_fn("* Gathering disk information (%s nodes)" %
+                len(self.my_node_uuids))
+    instdisk = self._CollectDiskInfo(self.my_node_info.keys(), node_image,
+                                     self.my_inst_info)
+
+    feedback_fn("* Verifying configuration file consistency")
+
+    self._VerifyClientCertificates(self.my_node_info.values(), all_nvinfo)
+    if self.cfg.GetClusterInfo().modify_ssh_setup:
+      self._VerifySshSetup(self.my_node_info.values(), all_nvinfo)
+    self._VerifyFiles(vf_node_info, master_node_uuid, vf_nvinfo, filemap)
+
+    feedback_fn("* Verifying node status")
+
+    refos_img = None
+
+    for node_i in node_data_list:
+      nimg = node_image[node_i.uuid]
+
+      if node_i.offline:
+        if verbose:
+          feedback_fn("* Skipping offline node %s" % (node_i.name,))
+        n_offline += 1
+        continue
+
+      if node_i.uuid == master_node_uuid:
+        ntype = "master"
+      elif node_i.master_candidate:
+        ntype = "master candidate"
+      elif node_i.drained:
+        ntype = "drained"
+        n_drained += 1
+      else:
+        ntype = "regular"
+      if verbose:
+        feedback_fn("* Verifying node %s (%s)" % (node_i.name, ntype))
+
+      msg = all_nvinfo[node_i.uuid].fail_msg
+      self._ErrorIf(msg, constants.CV_ENODERPC, node_i.name,
+                    "while contacting node: %s", msg)
+      if msg:
+        nimg.rpc_fail = True
+        continue
+
+      nresult = all_nvinfo[node_i.uuid].payload
+
+      nimg.call_ok = self._VerifyNode(node_i, nresult)
+      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
+      self._VerifyNodeNetwork(node_i, nresult)
+      self._VerifyNodeUserScripts(node_i, nresult)
+      self._VerifyOob(node_i, nresult)
+      self._VerifyAcceptedFileStoragePaths(node_i, nresult,
+                                           node_i.uuid == master_node_uuid)
+      self._VerifyFileStoragePaths(node_i, nresult)
+      self._VerifySharedFileStoragePaths(node_i, nresult)
+      self._VerifyGlusterStoragePaths(node_i, nresult)
+
+      if nimg.vm_capable:
+        self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)
+        if constants.DT_DRBD8 in cluster.enabled_disk_templates:
+          self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info,
+                               self.all_disks_info, drbd_helper, all_drbd_map)
+
+        if (constants.DT_PLAIN in cluster.enabled_disk_templates) or \
+            (constants.DT_DRBD8 in cluster.enabled_disk_templates):
+          self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
+        self._UpdateNodeInstances(node_i, nresult, nimg)
+        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
+        self._UpdateNodeOS(node_i, nresult, nimg)
+
+        if not nimg.os_fail:
+          if refos_img is None:
+            refos_img = nimg
+          self._VerifyNodeOS(node_i, nimg, refos_img)
+        self._VerifyNodeBridges(node_i, nresult, bridges)
+
+        # Check whether all running instances are primary for the node. (This
+        # can no longer be done from _VerifyInstance below, since some of the
+        # wrong instances could be from other node groups.)
+        non_primary_inst_uuids = set(nimg.instances).difference(nimg.pinst)
+
+        for inst_uuid in non_primary_inst_uuids:
+          test = inst_uuid in self.all_inst_info
+          self._ErrorIf(test, constants.CV_EINSTANCEWRONGNODE,
+                        self.cfg.GetInstanceName(inst_uuid),
+                        "instance should not run on node %s", node_i.name)
+          self._ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
+                        "node is running unknown instance %s", inst_uuid)
+
+    self._VerifyGroupDRBDVersion(all_nvinfo)
+    self._VerifyGroupLVM(node_image, vg_name)
+
+    for node_uuid, result in extra_lv_nvinfo.items():
+      self._UpdateNodeVolumes(self.all_node_info[node_uuid], result.payload,
+                              node_image[node_uuid], vg_name)
+
+    feedback_fn("* Verifying instance status")
+    for inst_uuid in self.my_inst_uuids:
+      instance = self.my_inst_info[inst_uuid]
+      if verbose:
+        feedback_fn("* Verifying instance %s" % instance.name)
+      self._VerifyInstance(instance, node_image, instdisk[inst_uuid])
+
+      # If the instance is not fully redundant we cannot survive losing its
+      # primary node, so we are not N+1 compliant.
+      inst_disks = self.cfg.GetInstanceDisks(instance.uuid)
+      if not utils.AllDiskOfType(inst_disks, constants.DTS_MIRRORED):
+        i_non_redundant.append(instance)
+
+      if not cluster.FillBE(instance)[constants.BE_AUTO_BALANCE]:
+        i_non_a_balanced.append(instance)
+
+    feedback_fn("* Verifying orphan volumes")
+    reserved = utils.FieldSet(*cluster.reserved_lvs)
+
+    # We will get spurious "unknown volume" warnings if any node of this group
+    # is secondary for an instance whose primary is in another group. To avoid
+    # them, we find these instances and add their volumes to node_vol_should.
+    for instance in self.all_inst_info.values():
+      for secondary in self.cfg.GetInstanceSecondaryNodes(instance.uuid):
+        if (secondary in self.my_node_info
+            and instance.name not in self.my_inst_info):
+          self.cfg.GetInstanceLVsByNode(instance.uuid, lvmap=node_vol_should)
+          break
+
+    self._VerifyOrphanVolumes(vg_name, node_vol_should, node_image, reserved)
+
+    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
+      feedback_fn("* Verifying N+1 Memory redundancy")
+      self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
+
+    self._VerifyOtherNotes(feedback_fn, i_non_redundant, i_non_a_balanced,
+                           i_offline, n_offline, n_drained)
+
+    return not self.bad
+
+  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
+    """Analyze the post-hooks' result
+
+    This method analyses the hook result, handles it, and sends some
+    nicely-formatted feedback back to the user.
+
+    @param phase: one of L{constants.HOOKS_PHASE_POST} or
+        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
+    @param hooks_results: the results of the multi-node hooks rpc call
+    @param feedback_fn: function used send feedback back to the caller
+    @param lu_result: previous Exec result
+    @return: the new Exec result, based on the previous result
+        and hook results
+
+    """
+    # We only really run POST phase hooks, only for non-empty groups,
+    # and are only interested in their results
+    if not self.my_node_uuids:
+      # empty node group
+      pass
+    elif phase == constants.HOOKS_PHASE_POST:
+      # Used to change hooks' output to proper indentation
+      feedback_fn("* Hooks Results")
+      assert hooks_results, "invalid result from hooks"
+
+      for node_name in hooks_results:
+        res = hooks_results[node_name]
+        msg = res.fail_msg
+        test = msg and not res.offline
+        self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
+                      "Communication failure in hooks execution: %s", msg)
+        if test:
+          lu_result = False
+          continue
+        if res.offline:
+          # No need to investigate payload if node is offline
+          continue
+        for script, hkr, output in res.payload:
+          test = hkr == constants.HKR_FAIL
+          self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
+                        "Script %s failed, output:", script)
+          if test:
+            output = self._HOOKS_INDENT_RE.sub("      ", output)
+            feedback_fn("%s" % output)
+            lu_result = False
+
+    return lu_result
diff --git a/lib/cmdlib/common.py b/lib/cmdlib/common.py
index ddc4551..fa2bf77 100644
--- a/lib/cmdlib/common.py
+++ b/lib/cmdlib/common.py
@@ -549,8 +549,7 @@
 
 def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
                                 nic_count, disk_sizes, spindle_use,
-                                disk_template,
-                                _compute_fn=_ComputeMinMaxSpec):
+                                disk_types, _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided specs.
 
   @type ipolicy: dict
@@ -567,13 +566,15 @@
   @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
   @type spindle_use: int
   @param spindle_use: The number of spindles this instance uses
-  @type disk_template: string
-  @param disk_template: The disk template of the instance
+  @type disk_types: list of strings
+  @param disk_types: The disk template of the instance
   @param _compute_fn: The compute function (unittest only)
   @return: A list of violations, or an empty list of no violations are found
 
   """
   assert disk_count == len(disk_sizes)
+  assert isinstance(disk_types, list)
+  assert disk_count == len(disk_types)
 
   test_settings = [
     (constants.ISPEC_MEM_SIZE, "", mem_size),
@@ -582,14 +583,20 @@
     (constants.ISPEC_SPINDLE_USE, "", spindle_use),
     ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
          for idx, d in enumerate(disk_sizes)]
-  if disk_template != constants.DT_DISKLESS:
+
+  allowed_dts = set(ipolicy[constants.IPOLICY_DTS])
+  ret = []
+  if disk_count != 0:
     # This check doesn't make sense for diskless instances
     test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
-  ret = []
-  allowed_dts = ipolicy[constants.IPOLICY_DTS]
-  if disk_template not in allowed_dts:
+  elif constants.DT_DISKLESS not in allowed_dts:
+    ret.append("Disk template %s is not allowed (allowed templates %s)" %
+                (constants.DT_DISKLESS, utils.CommaJoin(allowed_dts)))
+
+  forbidden_dts = set(disk_types) - allowed_dts
+  if forbidden_dts:
     ret.append("Disk template %s is not allowed (allowed templates: %s)" %
-               (disk_template, utils.CommaJoin(allowed_dts)))
+               (utils.CommaJoin(forbidden_dts), utils.CommaJoin(allowed_dts)))
 
   min_errs = None
   for minmax in ipolicy[constants.ISPECS_MINMAX]:
@@ -602,8 +609,7 @@
   return ret + min_errs
 
 
-def ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes,
-                                     disk_template,
+def ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes, disks,
                                      _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided disk sizes.
 
@@ -614,18 +620,21 @@
   @param ipolicy: The ipolicy
   @type disk_sizes: list of ints
   @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
-  @type disk_template: string
-  @param disk_template: The disk template of the instance
+  @type disks: list of L{Disk}
+  @param disks: The Disk objects of the instance
   @param _compute_fn: The compute function (unittest only)
   @return: A list of violations, or an empty list of no violations are found
 
   """
+  if len(disk_sizes) != len(disks):
+    return [constants.ISPEC_DISK_COUNT]
+  dev_types = [d.dev_type for d in disks]
   return ComputeIPolicySpecViolation(ipolicy,
                                      # mem_size, cpu_count, disk_count
                                      None, None, len(disk_sizes),
                                      None, disk_sizes, # nic_count, disk_sizes
                                      None, # spindle_use
-                                     disk_template,
+                                     dev_types,
                                      _compute_fn=_compute_fn)
 
 
@@ -665,10 +674,10 @@
   disk_count = len(disks)
   disk_sizes = [disk.size for disk in disks]
   nic_count = len(instance.nics)
-  disk_template = instance.disk_template
+  disk_types = [d.dev_type for d in disks]
 
   return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                           disk_sizes, spindle_use, disk_template)
+                           disk_sizes, spindle_use, disk_types)
 
 
 def _ComputeViolatingInstances(ipolicy, instances, cfg):
@@ -1294,20 +1303,21 @@
 
     for entry in inst_uuids:
       inst = cfg.GetInstanceInfo(entry)
-      inst_template = inst.disk_template
+      disks = cfg.GetInstanceDisks(entry)
+      for disk in disks:
 
-      if inst_template != disk_template:
-        continue
+        if disk.dev_type != disk_template:
+          continue
 
-      hv = inst.hypervisor
+        hv = inst.hypervisor
 
-      if not IsValidDiskAccessModeCombination(hv, inst_template, access):
-        raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
-                                   " setting with {h} hypervisor and {d} disk"
-                                   " type.".format(i=inst.name,
-                                                   a=access,
-                                                   h=hv,
-                                                   d=inst_template))
+        if not IsValidDiskAccessModeCombination(hv, disk.dev_type, access):
+          raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
+                                     " setting with {h} hypervisor and {d} disk"
+                                     " type.".format(i=inst.name,
+                                                     a=access,
+                                                     h=hv,
+                                                     d=disk.dev_type))
 
 
 def IsValidDiskAccessModeCombination(hv, disk_template, mode):
diff --git a/lib/cmdlib/group.py b/lib/cmdlib/group.py
index 30983e0..91f8752 100644
--- a/lib/cmdlib/group.py
+++ b/lib/cmdlib/group.py
@@ -295,7 +295,7 @@
     In particular, it returns information about newly split instances, and
     instances that were already split, and remain so after the change.
 
-    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
+    Only disks whose template is listed in constants.DTS_INT_MIRROR are
     considered.
 
     @type changes: list of (node_uuid, new_group_uuid) pairs.
@@ -315,7 +315,8 @@
     previously_split_instances = set()
 
     for inst in instance_data.values():
-      if inst.disk_template not in constants.DTS_INT_MIRROR:
+      inst_disks = self.cfg.GetInstanceDisks(inst.uuid)
+      if not utils.AnyDiskOfType(inst_disks, constants.DTS_INT_MIRROR):
         continue
 
       inst_nodes = self.cfg.GetInstanceNodes(inst.uuid)
@@ -846,12 +847,6 @@
       locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [],
       locking.LEVEL_NODE: [],
-
-      # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
-      # starts one instance of this opcode for every group, which means all
-      # nodes will be locked for a short amount of time, so it's better to
-      # acquire the node allocation lock as well.
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
       }
     self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
     self.dont_collate_locks[locking.LEVEL_NODE] = True
@@ -943,7 +938,9 @@
   def _VerifyDrbdStates(self, node_errors, offline_disk_instance_names):
     node_to_inst = {}
     for inst in self.instances.values():
-      if not inst.disks_active or inst.disk_template != constants.DT_DRBD8:
+      disks = self.cfg.GetInstanceDisks(inst.uuid)
+      if not (inst.disks_active and
+              utils.AnyDiskOfType(disks, [constants.DT_DRBD8])):
         continue
 
       secondary_nodes = self.cfg.GetInstanceSecondaryNodes(inst.uuid)
diff --git a/lib/cmdlib/instance.py b/lib/cmdlib/instance.py
index e6c5866..4f46910 100644
--- a/lib/cmdlib/instance.py
+++ b/lib/cmdlib/instance.py
@@ -30,1790 +30,44 @@
 
 """Logical units dealing with instances."""
 
-import OpenSSL
-import copy
 import logging
 import os
 
 from ganeti import compat
 from ganeti import constants
 from ganeti import errors
-from ganeti import ht
-from ganeti import hypervisor
 from ganeti import locking
 from ganeti.masterd import iallocator
 from ganeti import masterd
 from ganeti import netutils
 from ganeti import objects
-from ganeti import pathutils
-from ganeti import serializer
-import ganeti.rpc.node as rpc
 from ganeti import utils
-from ganeti.utils import retry
 
 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
 
-from ganeti.cmdlib.common import INSTANCE_DOWN, \
+from ganeti.cmdlib.common import \
   INSTANCE_NOT_RUNNING, CheckNodeOnline, \
   ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
-  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
-  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, CheckOSImage, \
-  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
-  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
-  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination, \
-  DetermineImageSize, IsInstanceRunning
+  LoadNodeEvacResult, \
+  ExpandInstanceUuidAndName, \
+  CheckInstanceState, ExpandNodeUuidAndName, \
+  CheckDiskTemplateEnabled
 from ganeti.cmdlib.instance_storage import CreateDisks, \
-  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \
-  WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \
-  ComputeDisks, ComputeDisksInfo, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
-  GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \
-  AssembleInstanceDisks, CheckSpindlesExclusiveStorage, TemporaryDisk, \
-  CalculateFileStorageDir
-from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
-  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
-  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
-  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
+  ComputeDisks, \
+  StartInstanceDisks, ShutdownInstanceDisks, \
+  AssembleInstanceDisks
+from ganeti.cmdlib.instance_utils import \
+  BuildInstanceHookEnvByObject,\
+  CheckNodeNotDrained, RemoveInstance, CopyLockList, \
+  CheckNodeVmCapable, CheckTargetNodeIPolicy, \
   GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
-  CheckInstanceBridgesExist, CheckNicsBridgesExist, UpdateMetadata, \
-  CheckCompressionTool, CheckInstanceExistence
+  CheckInstanceBridgesExist, \
+  CheckInstanceExistence, \
+  CheckHostnameSane, CheckOpportunisticLocking, ComputeFullBeParams, \
+  ComputeNics, CreateInstanceAllocRequest
 import ganeti.masterd.instance
 
 
-#: Type description for changes as returned by L{_ApplyContainerMods}'s
-#: callbacks
-_TApplyContModsCbChanges = \
-  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
-    ht.TNonEmptyString,
-    ht.TAny,
-    ])))
-
-
-def _CheckHostnameSane(lu, name):
-  """Ensures that a given hostname resolves to a 'sane' name.
-
-  The given name is required to be a prefix of the resolved hostname,
-  to prevent accidental mismatches.
-
-  @param lu: the logical unit on behalf of which we're checking
-  @param name: the name we should resolve and check
-  @return: the resolved hostname object
-
-  """
-  hostname = netutils.GetHostname(name=name)
-  if hostname.name != name:
-    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
-  if not utils.MatchNameComponent(name, [hostname.name]):
-    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
-                                " same as given hostname '%s'") %
-                               (hostname.name, name), errors.ECODE_INVAL)
-  return hostname
-
-
-def _CheckOpportunisticLocking(op):
-  """Generate error if opportunistic locking is not possible.
-
-  """
-  if op.opportunistic_locking and not op.iallocator:
-    raise errors.OpPrereqError("Opportunistic locking is only available in"
-                               " combination with an instance allocator",
-                               errors.ECODE_INVAL)
-
-
-def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
-  """Wrapper around IAReqInstanceAlloc.
-
-  @param op: The instance opcode
-  @param disks: The computed disks
-  @param nics: The computed nics
-  @param beparams: The full filled beparams
-  @param node_name_whitelist: List of nodes which should appear as online to the
-    allocator (unless the node is already marked offline)
-
-  @returns: A filled L{iallocator.IAReqInstanceAlloc}
-
-  """
-  spindle_use = beparams[constants.BE_SPINDLE_USE]
-  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
-                                       disk_template=op.disk_template,
-                                       group_name=op.group_name,
-                                       tags=op.tags,
-                                       os=op.os_type,
-                                       vcpus=beparams[constants.BE_VCPUS],
-                                       memory=beparams[constants.BE_MAXMEM],
-                                       spindle_use=spindle_use,
-                                       disks=disks,
-                                       nics=[n.ToDict() for n in nics],
-                                       hypervisor=op.hypervisor,
-                                       node_whitelist=node_name_whitelist)
-
-
-def _ComputeFullBeParams(op, cluster):
-  """Computes the full beparams.
-
-  @param op: The instance opcode
-  @param cluster: The cluster config object
-
-  @return: The fully filled beparams
-
-  """
-  default_beparams = cluster.beparams[constants.PP_DEFAULT]
-  for param, value in op.beparams.iteritems():
-    if value == constants.VALUE_AUTO:
-      op.beparams[param] = default_beparams[param]
-  objects.UpgradeBeParams(op.beparams)
-  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
-  return cluster.SimpleFillBE(op.beparams)
-
-
-def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
-  """Computes the nics.
-
-  @param op: The instance opcode
-  @param cluster: Cluster configuration object
-  @param default_ip: The default ip to assign
-  @param cfg: An instance of the configuration object
-  @param ec_id: Execution context ID
-
-  @returns: The build up nics
-
-  """
-  nics = []
-  for nic in op.nics:
-    nic_mode_req = nic.get(constants.INIC_MODE, None)
-    nic_mode = nic_mode_req
-    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
-      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
-
-    net = nic.get(constants.INIC_NETWORK, None)
-    link = nic.get(constants.NIC_LINK, None)
-    ip = nic.get(constants.INIC_IP, None)
-    vlan = nic.get(constants.INIC_VLAN, None)
-
-    if net is None or net.lower() == constants.VALUE_NONE:
-      net = None
-    else:
-      if nic_mode_req is not None or link is not None:
-        raise errors.OpPrereqError("If network is given, no mode or link"
-                                   " is allowed to be passed",
-                                   errors.ECODE_INVAL)
-
-    # ip validity checks
-    if ip is None or ip.lower() == constants.VALUE_NONE:
-      nic_ip = None
-    elif ip.lower() == constants.VALUE_AUTO:
-      if not op.name_check:
-        raise errors.OpPrereqError("IP address set to auto but name checks"
-                                   " have been skipped",
-                                   errors.ECODE_INVAL)
-      nic_ip = default_ip
-    else:
-      # We defer pool operations until later, so that the iallocator has
-      # filled in the instance's node(s) dimara
-      if ip.lower() == constants.NIC_IP_POOL:
-        if net is None:
-          raise errors.OpPrereqError("if ip=pool, parameter network"
-                                     " must be passed too",
-                                     errors.ECODE_INVAL)
-
-      elif not netutils.IPAddress.IsValid(ip):
-        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
-                                   errors.ECODE_INVAL)
-
-      nic_ip = ip
-
-    # TODO: check the ip address for uniqueness
-    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip and not net:
-      raise errors.OpPrereqError("Routed nic mode requires an ip address"
-                                 " if not attached to a network",
-                                 errors.ECODE_INVAL)
-
-    # MAC address verification
-    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
-    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-      mac = utils.NormalizeAndValidateMac(mac)
-
-      try:
-        # TODO: We need to factor this out
-        cfg.ReserveMAC(mac, ec_id)
-      except errors.ReservationError:
-        raise errors.OpPrereqError("MAC address %s already in use"
-                                   " in cluster" % mac,
-                                   errors.ECODE_NOTUNIQUE)
-
-    #  Build nic parameters
-    nicparams = {}
-    if nic_mode_req:
-      nicparams[constants.NIC_MODE] = nic_mode
-    if link:
-      nicparams[constants.NIC_LINK] = link
-    if vlan:
-      nicparams[constants.NIC_VLAN] = vlan
-
-    check_params = cluster.SimpleFillNIC(nicparams)
-    objects.NIC.CheckParameterSyntax(check_params)
-    net_uuid = cfg.LookupNetwork(net)
-    name = nic.get(constants.INIC_NAME, None)
-    if name is not None and name.lower() == constants.VALUE_NONE:
-      name = None
-    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
-                          network=net_uuid, nicparams=nicparams)
-    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
-    nics.append(nic_obj)
-
-  return nics
-
-
-def _CheckForConflictingIp(lu, ip, node_uuid):
-  """In case of conflicting IP address raise error.
-
-  @type ip: string
-  @param ip: IP address
-  @type node_uuid: string
-  @param node_uuid: node UUID
-
-  """
-  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
-  if conf_net is not None:
-    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
-                                " network %s, but the target NIC does not." %
-                                (ip, conf_net)),
-                               errors.ECODE_STATE)
-
-  return (None, None)
-
-
-def _ComputeIPolicyInstanceSpecViolation(
-  ipolicy, instance_spec, disk_template,
-  _compute_fn=ComputeIPolicySpecViolation):
-  """Compute if instance specs meets the specs of ipolicy.
-
-  @type ipolicy: dict
-  @param ipolicy: The ipolicy to verify against
-  @param instance_spec: dict
-  @param instance_spec: The instance spec to verify
-  @type disk_template: string
-  @param disk_template: the disk template of the instance
-  @param _compute_fn: The function to verify ipolicy (unittest only)
-  @see: L{ComputeIPolicySpecViolation}
-
-  """
-  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
-  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
-  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
-  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
-  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
-  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
-
-  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
-                     disk_sizes, spindle_use, disk_template)
-
-
-def _ComputeInstanceCommunicationNIC(instance_name):
-  """Compute the name of the instance NIC used by instance
-  communication.
-
-  With instance communication, a new NIC is added to the instance.
-  This NIC has a special name that identities it as being part of
-  instance communication, and not just a normal NIC.  This function
-  generates the name of the NIC based on a prefix and the instance
-  name
-
-  @type instance_name: string
-  @param instance_name: name of the instance the NIC belongs to
-
-  @rtype: string
-  @return: name of the NIC
-
-  """
-  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
-
-
-class LUInstanceCreate(LogicalUnit):
-  """Create an instance.
-
-  """
-  HPATH = "instance-add"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  def _CheckDiskTemplateValid(self):
-    """Checks validity of disk template.
-
-    """
-    cluster = self.cfg.GetClusterInfo()
-    if self.op.disk_template is None:
-      # FIXME: It would be better to take the default disk template from the
-      # ipolicy, but for the ipolicy we need the primary node, which we get from
-      # the iallocator, which wants the disk template as input. To solve this
-      # chicken-and-egg problem, it should be possible to specify just a node
-      # group from the iallocator and take the ipolicy from that.
-      self.op.disk_template = cluster.enabled_disk_templates[0]
-    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
-
-  def _CheckDiskArguments(self):
-    """Checks validity of disk-related arguments.
-
-    """
-    # check that disk's names are unique and valid
-    utils.ValidateDeviceNames("disk", self.op.disks)
-
-    self._CheckDiskTemplateValid()
-
-    # check disks. parameter names and consistent adopt/no-adopt strategy
-    has_adopt = has_no_adopt = False
-    for disk in self.op.disks:
-      if self.op.disk_template != constants.DT_EXT:
-        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
-      if constants.IDISK_ADOPT in disk:
-        has_adopt = True
-      else:
-        has_no_adopt = True
-    if has_adopt and has_no_adopt:
-      raise errors.OpPrereqError("Either all disks are adopted or none is",
-                                 errors.ECODE_INVAL)
-    if has_adopt:
-      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
-        raise errors.OpPrereqError("Disk adoption is not supported for the"
-                                   " '%s' disk template" %
-                                   self.op.disk_template,
-                                   errors.ECODE_INVAL)
-      if self.op.iallocator is not None:
-        raise errors.OpPrereqError("Disk adoption not allowed with an"
-                                   " iallocator script", errors.ECODE_INVAL)
-      if self.op.mode == constants.INSTANCE_IMPORT:
-        raise errors.OpPrereqError("Disk adoption not allowed for"
-                                   " instance import", errors.ECODE_INVAL)
-    else:
-      if self.op.disk_template in constants.DTS_MUST_ADOPT:
-        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
-                                   " but no 'adopt' parameter given" %
-                                   self.op.disk_template,
-                                   errors.ECODE_INVAL)
-
-    self.adopt_disks = has_adopt
-
-  def _CheckVLANArguments(self):
-    """ Check validity of VLANs if given
-
-    """
-    for nic in self.op.nics:
-      vlan = nic.get(constants.INIC_VLAN, None)
-      if vlan:
-        if vlan[0] == ".":
-          # vlan starting with dot means single untagged vlan,
-          # might be followed by trunk (:)
-          if not vlan[1:].isdigit():
-            vlanlist = vlan[1:].split(':')
-            for vl in vlanlist:
-              if not vl.isdigit():
-                raise errors.OpPrereqError("Specified VLAN parameter is "
-                                           "invalid : %s" % vlan,
-                                             errors.ECODE_INVAL)
-        elif vlan[0] == ":":
-          # Trunk - tagged only
-          vlanlist = vlan[1:].split(':')
-          for vl in vlanlist:
-            if not vl.isdigit():
-              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
-                                           " : %s" % vlan, errors.ECODE_INVAL)
-        elif vlan.isdigit():
-          # This is the simplest case. No dots, only single digit
-          # -> Create untagged access port, dot needs to be added
-          nic[constants.INIC_VLAN] = "." + vlan
-        else:
-          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
-                                       " : %s" % vlan, errors.ECODE_INVAL)
-
-  def CheckArguments(self):
-    """Check arguments.
-
-    """
-    # do not require name_check to ease forward/backward compatibility
-    # for tools
-    if self.op.no_install and self.op.start:
-      self.LogInfo("No-installation mode selected, disabling startup")
-      self.op.start = False
-    # validate/normalize the instance name
-    self.op.instance_name = \
-      netutils.Hostname.GetNormalizedName(self.op.instance_name)
-
-    if self.op.ip_check and not self.op.name_check:
-      # TODO: make the ip check more flexible and not depend on the name check
-      raise errors.OpPrereqError("Cannot do IP address check without a name"
-                                 " check", errors.ECODE_INVAL)
-
-    # instance name verification
-    if self.op.name_check:
-      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
-      self.op.instance_name = self.hostname.name
-      # used in CheckPrereq for ip ping check
-      self.check_ip = self.hostname.ip
-    else:
-      self.check_ip = None
-
-    # add NIC for instance communication
-    if self.op.instance_communication:
-      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
-
-      for nic in self.op.nics:
-        if nic.get(constants.INIC_NAME, None) == nic_name:
-          break
-      else:
-        self.op.nics.append({constants.INIC_NAME: nic_name,
-                             constants.INIC_MAC: constants.VALUE_GENERATE,
-                             constants.INIC_IP: constants.NIC_IP_POOL,
-                             constants.INIC_NETWORK:
-                               self.cfg.GetInstanceCommunicationNetwork()})
-
-    # timeouts for unsafe OS installs
-    if self.op.helper_startup_timeout is None:
-      self.op.helper_startup_timeout = constants.HELPER_VM_STARTUP
-
-    if self.op.helper_shutdown_timeout is None:
-      self.op.helper_shutdown_timeout = constants.HELPER_VM_SHUTDOWN
-
-    # check nics' parameter names
-    for nic in self.op.nics:
-      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
-    # check that NIC's parameters names are unique and valid
-    utils.ValidateDeviceNames("NIC", self.op.nics)
-
-    self._CheckVLANArguments()
-
-    self._CheckDiskArguments()
-    assert self.op.disk_template is not None
-
-    # file storage checks
-    if (self.op.file_driver and
-        not self.op.file_driver in constants.FILE_DRIVER):
-      raise errors.OpPrereqError("Invalid file driver name '%s'" %
-                                 self.op.file_driver, errors.ECODE_INVAL)
-
-    # set default file_driver if unset and required
-    if (not self.op.file_driver and
-        self.op.disk_template in constants.DTS_FILEBASED):
-      self.op.file_driver = constants.FD_DEFAULT
-
-    ### Node/iallocator related checks
-    CheckIAllocatorOrNode(self, "iallocator", "pnode")
-
-    if self.op.pnode is not None:
-      if self.op.disk_template in constants.DTS_INT_MIRROR:
-        if self.op.snode is None:
-          raise errors.OpPrereqError("The networked disk templates need"
-                                     " a mirror node", errors.ECODE_INVAL)
-      elif self.op.snode:
-        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
-                        " template")
-        self.op.snode = None
-
-    _CheckOpportunisticLocking(self.op)
-
-    if self.op.mode == constants.INSTANCE_IMPORT:
-      # On import force_variant must be True, because if we forced it at
-      # initial install, our only chance when importing it back is that it
-      # works again!
-      self.op.force_variant = True
-
-      if self.op.no_install:
-        self.LogInfo("No-installation mode has no effect during import")
-
-      if objects.GetOSImage(self.op.osparams):
-        self.LogInfo("OS image has no effect during import")
-    elif self.op.mode == constants.INSTANCE_CREATE:
-      os_image = CheckOSImage(self.op)
-
-      if self.op.os_type is None and os_image is None:
-        raise errors.OpPrereqError("No guest OS or OS image specified",
-                                   errors.ECODE_INVAL)
-
-      if self.op.os_type is not None \
-            and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
-        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
-                                   " installation" % self.op.os_type,
-                                   errors.ECODE_STATE)
-    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
-      if objects.GetOSImage(self.op.osparams):
-        self.LogInfo("OS image has no effect during import")
-
-      self._cds = GetClusterDomainSecret()
-
-      # Check handshake to ensure both clusters have the same domain secret
-      src_handshake = self.op.source_handshake
-      if not src_handshake:
-        raise errors.OpPrereqError("Missing source handshake",
-                                   errors.ECODE_INVAL)
-
-      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
-                                                           src_handshake)
-      if errmsg:
-        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
-                                   errors.ECODE_INVAL)
-
-      # Load and check source CA
-      self.source_x509_ca_pem = self.op.source_x509_ca
-      if not self.source_x509_ca_pem:
-        raise errors.OpPrereqError("Missing source X509 CA",
-                                   errors.ECODE_INVAL)
-
-      try:
-        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
-                                                    self._cds)
-      except OpenSSL.crypto.Error, err:
-        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
-                                   (err, ), errors.ECODE_INVAL)
-
-      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
-      if errcode is not None:
-        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
-                                   errors.ECODE_INVAL)
-
-      self.source_x509_ca = cert
-
-      src_instance_name = self.op.source_instance_name
-      if not src_instance_name:
-        raise errors.OpPrereqError("Missing source instance name",
-                                   errors.ECODE_INVAL)
-
-      self.source_instance_name = \
-        netutils.GetHostname(name=src_instance_name).name
-
-    else:
-      raise errors.OpPrereqError("Invalid instance creation mode %r" %
-                                 self.op.mode, errors.ECODE_INVAL)
-
-  def ExpandNames(self):
-    """ExpandNames for CreateInstance.
-
-    Figure out the right locks for instance creation.
-
-    """
-    self.needed_locks = {}
-
-    # this is just a preventive check, but someone might still add this
-    # instance in the meantime, and creation will fail at lock-add time
-    CheckInstanceExistence(self, self.op.instance_name)
-
-    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
-
-    if self.op.iallocator:
-      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
-      # specifying a group on instance creation and then selecting nodes from
-      # that group
-      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
-      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
-
-      if self.op.opportunistic_locking:
-        self.opportunistic_locks[locking.LEVEL_NODE] = True
-        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
-        if self.op.disk_template == constants.DT_DRBD8:
-          self.opportunistic_locks_count[locking.LEVEL_NODE] = 2
-          self.opportunistic_locks_count[locking.LEVEL_NODE_RES] = 2
-    else:
-      (self.op.pnode_uuid, self.op.pnode) = \
-        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
-      nodelist = [self.op.pnode_uuid]
-      if self.op.snode is not None:
-        (self.op.snode_uuid, self.op.snode) = \
-          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
-        nodelist.append(self.op.snode_uuid)
-      self.needed_locks[locking.LEVEL_NODE] = nodelist
-
-    # in case of import lock the source node too
-    if self.op.mode == constants.INSTANCE_IMPORT:
-      src_node = self.op.src_node
-      src_path = self.op.src_path
-
-      if src_path is None:
-        self.op.src_path = src_path = self.op.instance_name
-
-      if src_node is None:
-        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
-        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
-        self.op.src_node = None
-        if os.path.isabs(src_path):
-          raise errors.OpPrereqError("Importing an instance from a path"
-                                     " requires a source node option",
-                                     errors.ECODE_INVAL)
-      else:
-        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
-          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
-        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
-          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
-        if not os.path.isabs(src_path):
-          self.op.src_path = \
-            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
-
-    self.needed_locks[locking.LEVEL_NODE_RES] = \
-      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
-
-    # Optimistically acquire shared group locks (we're reading the
-    # configuration).  We can't just call GetInstanceNodeGroups, because the
-    # instance doesn't exist yet. Therefore we lock all node groups of all
-    # nodes we have.
-    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
-      # In the case we lock all nodes for opportunistic allocation, we have no
-      # choice than to lock all groups, because they're allocated before nodes.
-      # This is sad, but true. At least we release all those we don't need in
-      # CheckPrereq later.
-      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
-    else:
-      self.needed_locks[locking.LEVEL_NODEGROUP] = \
-        list(self.cfg.GetNodeGroupsFromNodes(
-          self.needed_locks[locking.LEVEL_NODE]))
-    self.share_locks[locking.LEVEL_NODEGROUP] = 1
-
-  def _RunAllocator(self):
-    """Run the allocator based on input opcode.
-
-    """
-    if self.op.opportunistic_locking:
-      # Only consider nodes for which a lock is held
-      node_name_whitelist = self.cfg.GetNodeNames(
-        set(self.owned_locks(locking.LEVEL_NODE)) &
-        set(self.owned_locks(locking.LEVEL_NODE_RES)))
-    else:
-      node_name_whitelist = None
-
-    req = _CreateInstanceAllocRequest(self.op, self.disks,
-                                      self.nics, self.be_full,
-                                      node_name_whitelist)
-    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
-
-    ial.Run(self.op.iallocator)
-
-    if not ial.success:
-      # When opportunistic locks are used only a temporary failure is generated
-      if self.op.opportunistic_locking:
-        ecode = errors.ECODE_TEMP_NORES
-        self.LogInfo("IAllocator '%s' failed on opportunistically acquired"
-                     " nodes: %s", self.op.iallocator, ial.info)
-      else:
-        ecode = errors.ECODE_NORES
-
-      raise errors.OpPrereqError("Can't compute nodes using"
-                                 " iallocator '%s': %s" %
-                                 (self.op.iallocator, ial.info),
-                                 ecode)
-
-    (self.op.pnode_uuid, self.op.pnode) = \
-      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
-    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
-                 self.op.instance_name, self.op.iallocator,
-                 utils.CommaJoin(ial.result))
-
-    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
-
-    if req.RequiredNodes() == 2:
-      (self.op.snode_uuid, self.op.snode) = \
-        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on master, primary and secondary nodes of the instance.
-
-    """
-    env = {
-      "ADD_MODE": self.op.mode,
-      }
-    if self.op.mode == constants.INSTANCE_IMPORT:
-      env["SRC_NODE"] = self.op.src_node
-      env["SRC_PATH"] = self.op.src_path
-      env["SRC_IMAGES"] = self.src_images
-
-    env.update(BuildInstanceHookEnv(
-      name=self.op.instance_name,
-      primary_node_name=self.op.pnode,
-      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
-      status=self.op.start,
-      os_type=self.op.os_type,
-      minmem=self.be_full[constants.BE_MINMEM],
-      maxmem=self.be_full[constants.BE_MAXMEM],
-      vcpus=self.be_full[constants.BE_VCPUS],
-      nics=NICListToTuple(self, self.nics),
-      disk_template=self.op.disk_template,
-      # Note that self.disks here is not a list with objects.Disk
-      # but with dicts as returned by ComputeDisks.
-      disks=self.disks,
-      bep=self.be_full,
-      hvp=self.hv_full,
-      hypervisor_name=self.op.hypervisor,
-      tags=self.op.tags,
-      ))
-
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
-    return nl, nl
-
-  def _ReadExportInfo(self):
-    """Reads the export information from disk.
-
-    It will override the opcode source node and path with the actual
-    information, if these two were not specified before.
-
-    @return: the export information
-
-    """
-    assert self.op.mode == constants.INSTANCE_IMPORT
-
-    if self.op.src_node_uuid is None:
-      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
-      exp_list = self.rpc.call_export_list(locked_nodes)
-      found = False
-      for node_uuid in exp_list:
-        if exp_list[node_uuid].fail_msg:
-          continue
-        if self.op.src_path in exp_list[node_uuid].payload:
-          found = True
-          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
-          self.op.src_node_uuid = node_uuid
-          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
-                                            self.op.src_path)
-          break
-      if not found:
-        raise errors.OpPrereqError("No export found for relative path %s" %
-                                   self.op.src_path, errors.ECODE_INVAL)
-
-    CheckNodeOnline(self, self.op.src_node_uuid)
-    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
-    result.Raise("No export or invalid export found in dir %s" %
-                 self.op.src_path)
-
-    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
-    if not export_info.has_section(constants.INISECT_EXP):
-      raise errors.ProgrammerError("Corrupted export config",
-                                   errors.ECODE_ENVIRON)
-
-    ei_version = export_info.get(constants.INISECT_EXP, "version")
-    if int(ei_version) != constants.EXPORT_VERSION:
-      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
-                                 (ei_version, constants.EXPORT_VERSION),
-                                 errors.ECODE_ENVIRON)
-    return export_info
-
-  def _ReadExportParams(self, einfo):
-    """Use export parameters as defaults.
-
-    In case the opcode doesn't specify (as in override) some instance
-    parameters, then try to use them from the export information, if
-    that declares them.
-
-    """
-    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
-
-    if not self.op.disks:
-      disks = []
-      # TODO: import the disk iv_name too
-      for idx in range(constants.MAX_DISKS):
-        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
-          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
-          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
-          disk = {
-            constants.IDISK_SIZE: disk_sz,
-            constants.IDISK_NAME: disk_name
-            }
-          disks.append(disk)
-      self.op.disks = disks
-      if not disks and self.op.disk_template != constants.DT_DISKLESS:
-        raise errors.OpPrereqError("No disk info specified and the export"
-                                   " is missing the disk information",
-                                   errors.ECODE_INVAL)
-
-    if not self.op.nics:
-      nics = []
-      for idx in range(constants.MAX_NICS):
-        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
-          ndict = {}
-          for name in [constants.INIC_IP,
-                       constants.INIC_MAC, constants.INIC_NAME]:
-            nic_param_name = "nic%d_%s" % (idx, name)
-            if einfo.has_option(constants.INISECT_INS, nic_param_name):
-              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
-              ndict[name] = v
-          network = einfo.get(constants.INISECT_INS,
-                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
-          # in case network is given link and mode are inherited
-          # from nodegroup's netparams and thus should not be passed here
-          if network:
-            ndict[constants.INIC_NETWORK] = network
-          else:
-            for name in list(constants.NICS_PARAMETERS):
-              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
-              ndict[name] = v
-          nics.append(ndict)
-        else:
-          break
-      self.op.nics = nics
-
-    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
-      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
-
-    if (self.op.hypervisor is None and
-        einfo.has_option(constants.INISECT_INS, "hypervisor")):
-      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
-
-    if einfo.has_section(constants.INISECT_HYP):
-      # use the export parameters but do not override the ones
-      # specified by the user
-      for name, value in einfo.items(constants.INISECT_HYP):
-        if name not in self.op.hvparams:
-          self.op.hvparams[name] = value
-
-    if einfo.has_section(constants.INISECT_BEP):
-      # use the parameters, without overriding
-      for name, value in einfo.items(constants.INISECT_BEP):
-        if name not in self.op.beparams:
-          self.op.beparams[name] = value
-        # Compatibility for the old "memory" be param
-        if name == constants.BE_MEMORY:
-          if constants.BE_MAXMEM not in self.op.beparams:
-            self.op.beparams[constants.BE_MAXMEM] = value
-          if constants.BE_MINMEM not in self.op.beparams:
-            self.op.beparams[constants.BE_MINMEM] = value
-    else:
-      # try to read the parameters old style, from the main section
-      for name in constants.BES_PARAMETERS:
-        if (name not in self.op.beparams and
-            einfo.has_option(constants.INISECT_INS, name)):
-          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
-
-    if einfo.has_section(constants.INISECT_OSP):
-      # use the parameters, without overriding
-      for name, value in einfo.items(constants.INISECT_OSP):
-        if name not in self.op.osparams:
-          self.op.osparams[name] = value
-
-    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
-      # use the parameters, without overriding
-      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
-        if name not in self.op.osparams_private:
-          self.op.osparams_private[name] = serializer.Private(value, descr=name)
-
-  def _RevertToDefaults(self, cluster):
-    """Revert the instance parameters to the default values.
-
-    """
-    # hvparams
-    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
-    for name in self.op.hvparams.keys():
-      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
-        del self.op.hvparams[name]
-    # beparams
-    be_defs = cluster.SimpleFillBE({})
-    for name in self.op.beparams.keys():
-      if name in be_defs and be_defs[name] == self.op.beparams[name]:
-        del self.op.beparams[name]
-    # nic params
-    nic_defs = cluster.SimpleFillNIC({})
-    for nic in self.op.nics:
-      for name in constants.NICS_PARAMETERS:
-        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
-          del nic[name]
-    # osparams
-    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
-    for name in self.op.osparams.keys():
-      if name in os_defs and os_defs[name] == self.op.osparams[name]:
-        del self.op.osparams[name]
-
-    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
-                                    os_params_private={})
-    for name in self.op.osparams_private.keys():
-      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
-        del self.op.osparams_private[name]
-
-  def CheckPrereq(self): # pylint: disable=R0914
-    """Check prerequisites.
-
-    """
-    # Check that the optimistically acquired groups are correct wrt the
-    # acquired nodes
-    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
-    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
-    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
-    if not owned_groups.issuperset(cur_groups):
-      raise errors.OpPrereqError("New instance %s's node groups changed since"
-                                 " locks were acquired, current groups are"
-                                 " are '%s', owning groups '%s'; retry the"
-                                 " operation" %
-                                 (self.op.instance_name,
-                                  utils.CommaJoin(cur_groups),
-                                  utils.CommaJoin(owned_groups)),
-                                 errors.ECODE_STATE)
-
-    self.instance_file_storage_dir = CalculateFileStorageDir(self)
-
-    if self.op.mode == constants.INSTANCE_IMPORT:
-      export_info = self._ReadExportInfo()
-      self._ReadExportParams(export_info)
-      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
-    else:
-      self._old_instance_name = None
-
-    if (not self.cfg.GetVGName() and
-        self.op.disk_template not in constants.DTS_NOT_LVM):
-      raise errors.OpPrereqError("Cluster does not support lvm-based"
-                                 " instances", errors.ECODE_STATE)
-
-    if (self.op.hypervisor is None or
-        self.op.hypervisor == constants.VALUE_AUTO):
-      self.op.hypervisor = self.cfg.GetHypervisorType()
-
-    cluster = self.cfg.GetClusterInfo()
-    enabled_hvs = cluster.enabled_hypervisors
-    if self.op.hypervisor not in enabled_hvs:
-      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
-                                 " cluster (%s)" %
-                                 (self.op.hypervisor, ",".join(enabled_hvs)),
-                                 errors.ECODE_STATE)
-
-    # Check tag validity
-    for tag in self.op.tags:
-      objects.TaggableObject.ValidateTag(tag)
-
-    # check hypervisor parameter syntax (locally)
-    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
-    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
-                                      self.op.hvparams)
-    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
-    hv_type.CheckParameterSyntax(filled_hvp)
-    self.hv_full = filled_hvp
-    # check that we don't specify global parameters on an instance
-    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
-                         "instance", "cluster")
-
-    # fill and remember the beparams dict
-    self.be_full = _ComputeFullBeParams(self.op, cluster)
-
-    # build os parameters
-    if self.op.osparams_private is None:
-      self.op.osparams_private = serializer.PrivateDict()
-    if self.op.osparams_secret is None:
-      self.op.osparams_secret = serializer.PrivateDict()
-
-    self.os_full = cluster.SimpleFillOS(
-      self.op.os_type,
-      self.op.osparams,
-      os_params_private=self.op.osparams_private,
-      os_params_secret=self.op.osparams_secret
-    )
-
-    # now that hvp/bep are in final format, let's reset to defaults,
-    # if told to do so
-    if self.op.identify_defaults:
-      self._RevertToDefaults(cluster)
-
-    # NIC buildup
-    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
-                             self.proc.GetECId())
-
-    # disk checks/pre-build
-    default_vg = self.cfg.GetVGName()
-    self.disks = ComputeDisks(self.op.disks, self.op.disk_template, default_vg)
-
-    if self.op.mode == constants.INSTANCE_IMPORT:
-      disk_images = []
-      for idx in range(len(self.disks)):
-        option = "disk%d_dump" % idx
-        if export_info.has_option(constants.INISECT_INS, option):
-          # FIXME: are the old os-es, disk sizes, etc. useful?
-          export_name = export_info.get(constants.INISECT_INS, option)
-          image = utils.PathJoin(self.op.src_path, export_name)
-          disk_images.append(image)
-        else:
-          disk_images.append(False)
-
-      self.src_images = disk_images
-
-      if self.op.instance_name == self._old_instance_name:
-        for idx, nic in enumerate(self.nics):
-          if nic.mac == constants.VALUE_AUTO:
-            nic_mac_ini = "nic%d_mac" % idx
-            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
-
-    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
-
-    # ip ping checks (we use the same ip that was resolved in ExpandNames)
-    if self.op.ip_check:
-      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
-        raise errors.OpPrereqError("IP %s of instance %s already in use" %
-                                   (self.check_ip, self.op.instance_name),
-                                   errors.ECODE_NOTUNIQUE)
-
-    #### mac address generation
-    # By generating here the mac address both the allocator and the hooks get
-    # the real final mac address rather than the 'auto' or 'generate' value.
-    # There is a race condition between the generation and the instance object
-    # creation, which means that we know the mac is valid now, but we're not
-    # sure it will be when we actually add the instance. If things go bad
-    # adding the instance will abort because of a duplicate mac, and the
-    # creation job will fail.
-    for nic in self.nics:
-      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
-
-    #### allocator run
-
-    if self.op.iallocator is not None:
-      self._RunAllocator()
-
-    # Release all unneeded node locks
-    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
-                               self.op.src_node_uuid])
-    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
-    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
-    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
-    # Release all unneeded group locks
-    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
-                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
-
-    assert (self.owned_locks(locking.LEVEL_NODE) ==
-            self.owned_locks(locking.LEVEL_NODE_RES)), \
-      "Node locks differ from node resource locks"
-
-    #### node related checks
-
-    # check primary node
-    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
-    assert self.pnode is not None, \
-      "Cannot retrieve locked node %s" % self.op.pnode_uuid
-    if pnode.offline:
-      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
-                                 pnode.name, errors.ECODE_STATE)
-    if pnode.drained:
-      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
-                                 pnode.name, errors.ECODE_STATE)
-    if not pnode.vm_capable:
-      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
-                                 " '%s'" % pnode.name, errors.ECODE_STATE)
-
-    self.secondaries = []
-
-    # Fill in any IPs from IP pools. This must happen here, because we need to
-    # know the nic's primary node, as specified by the iallocator
-    for idx, nic in enumerate(self.nics):
-      net_uuid = nic.network
-      if net_uuid is not None:
-        nobj = self.cfg.GetNetwork(net_uuid)
-        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
-        if netparams is None:
-          raise errors.OpPrereqError("No netparams found for network"
-                                     " %s. Probably not connected to"
-                                     " node's %s nodegroup" %
-                                     (nobj.name, self.pnode.name),
-                                     errors.ECODE_INVAL)
-        self.LogInfo("NIC/%d inherits netparams %s" %
-                     (idx, netparams.values()))
-        nic.nicparams = dict(netparams)
-        if nic.ip is not None:
-          if nic.ip.lower() == constants.NIC_IP_POOL:
-            try:
-              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
-            except errors.ReservationError:
-              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
-                                         " from the address pool" % idx,
-                                         errors.ECODE_STATE)
-            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
-          else:
-            try:
-              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
-                                 check=self.op.conflicts_check)
-            except errors.ReservationError:
-              raise errors.OpPrereqError("IP address %s already in use"
-                                         " or does not belong to network %s" %
-                                         (nic.ip, nobj.name),
-                                         errors.ECODE_NOTUNIQUE)
-
-      # net is None, ip None or given
-      elif self.op.conflicts_check:
-        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
-
-    # mirror node verification
-    if self.op.disk_template in constants.DTS_INT_MIRROR:
-      if self.op.snode_uuid == pnode.uuid:
-        raise errors.OpPrereqError("The secondary node cannot be the"
-                                   " primary node", errors.ECODE_INVAL)
-      CheckNodeOnline(self, self.op.snode_uuid)
-      CheckNodeNotDrained(self, self.op.snode_uuid)
-      CheckNodeVmCapable(self, self.op.snode_uuid)
-      self.secondaries.append(self.op.snode_uuid)
-
-      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
-      if pnode.group != snode.group:
-        self.LogWarning("The primary and secondary nodes are in two"
-                        " different node groups; the disk parameters"
-                        " from the first disk's node group will be"
-                        " used")
-
-    nodes = [pnode]
-    if self.op.disk_template in constants.DTS_INT_MIRROR:
-      nodes.append(snode)
-    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
-    excl_stor = compat.any(map(has_es, nodes))
-    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
-      raise errors.OpPrereqError("Disk template %s not supported with"
-                                 " exclusive storage" % self.op.disk_template,
-                                 errors.ECODE_STATE)
-    for disk in self.disks:
-      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
-
-    node_uuids = [pnode.uuid] + self.secondaries
-
-    if not self.adopt_disks:
-      if self.op.disk_template == constants.DT_RBD:
-        # _CheckRADOSFreeSpace() is just a placeholder.
-        # Any function that checks prerequisites can be placed here.
-        # Check if there is enough space on the RADOS cluster.
-        CheckRADOSFreeSpace()
-      elif self.op.disk_template == constants.DT_EXT:
-        # FIXME: Function that checks prereqs if needed
-        pass
-      elif self.op.disk_template in constants.DTS_LVM:
-        # Check lv size requirements, if not adopting
-        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
-        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
-      else:
-        # FIXME: add checks for other, non-adopting, non-lvm disk templates
-        pass
-
-    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
-      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
-                                disk[constants.IDISK_ADOPT])
-                     for disk in self.disks])
-      if len(all_lvs) != len(self.disks):
-        raise errors.OpPrereqError("Duplicate volume names given for adoption",
-                                   errors.ECODE_INVAL)
-      for lv_name in all_lvs:
-        try:
-          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
-          # to ReserveLV uses the same syntax
-          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
-        except errors.ReservationError:
-          raise errors.OpPrereqError("LV named %s used by another instance" %
-                                     lv_name, errors.ECODE_NOTUNIQUE)
-
-      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
-      vg_names.Raise("Cannot get VG information from node %s" % pnode.name,
-                     prereq=True)
-
-      node_lvs = self.rpc.call_lv_list([pnode.uuid],
-                                       vg_names.payload.keys())[pnode.uuid]
-      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name,
-                     prereq=True)
-      node_lvs = node_lvs.payload
-
-      delta = all_lvs.difference(node_lvs.keys())
-      if delta:
-        raise errors.OpPrereqError("Missing logical volume(s): %s" %
-                                   utils.CommaJoin(delta),
-                                   errors.ECODE_INVAL)
-      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
-      if online_lvs:
-        raise errors.OpPrereqError("Online logical volumes found, cannot"
-                                   " adopt: %s" % utils.CommaJoin(online_lvs),
-                                   errors.ECODE_STATE)
-      # update the size of disk based on what is found
-      for dsk in self.disks:
-        dsk[constants.IDISK_SIZE] = \
-          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
-                                        dsk[constants.IDISK_ADOPT])][0]))
-
-    elif self.op.disk_template == constants.DT_BLOCK:
-      # Normalize and de-duplicate device paths
-      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
-                       for disk in self.disks])
-      if len(all_disks) != len(self.disks):
-        raise errors.OpPrereqError("Duplicate disk names given for adoption",
-                                   errors.ECODE_INVAL)
-      baddisks = [d for d in all_disks
-                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
-      if baddisks:
-        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
-                                   " cannot be adopted" %
-                                   (utils.CommaJoin(baddisks),
-                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
-                                   errors.ECODE_INVAL)
-
-      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
-                                            list(all_disks))[pnode.uuid]
-      node_disks.Raise("Cannot get block device information from node %s" %
-                       pnode.name, prereq=True)
-      node_disks = node_disks.payload
-      delta = all_disks.difference(node_disks.keys())
-      if delta:
-        raise errors.OpPrereqError("Missing block device(s): %s" %
-                                   utils.CommaJoin(delta),
-                                   errors.ECODE_INVAL)
-      for dsk in self.disks:
-        dsk[constants.IDISK_SIZE] = \
-          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
-
-    # Check disk access param to be compatible with specified hypervisor
-    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
-    node_group = self.cfg.GetNodeGroup(node_info.group)
-    group_disk_params = self.cfg.GetGroupDiskParams(node_group)
-    group_access_type = group_disk_params[self.op.disk_template].get(
-      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
-    )
-    for dsk in self.disks:
-      access_type = dsk.get(constants.IDISK_ACCESS, group_access_type)
-      if not IsValidDiskAccessModeCombination(self.op.hypervisor,
-                                              self.op.disk_template,
-                                              access_type):
-        raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
-                                   " used with %s disk access param" %
-                                   (self.op.hypervisor, access_type),
-                                    errors.ECODE_STATE)
-
-    # Verify instance specs
-    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
-    ispec = {
-      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
-      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
-      constants.ISPEC_DISK_COUNT: len(self.disks),
-      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
-                                  for disk in self.disks],
-      constants.ISPEC_NIC_COUNT: len(self.nics),
-      constants.ISPEC_SPINDLE_USE: spindle_use,
-      }
-
-    group_info = self.cfg.GetNodeGroup(pnode.group)
-    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
-                                               self.op.disk_template)
-    if not self.op.ignore_ipolicy and res:
-      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
-             (pnode.group, group_info.name, utils.CommaJoin(res)))
-      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-
-    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
-
-    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full,
-                  self.op.force_variant)
-
-    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
-
-    CheckCompressionTool(self, self.op.compress)
-
-    #TODO: _CheckExtParams (remotely)
-    # Check parameters for extstorage
-
-    # memory check on primary node
-    #TODO(dynmem): use MINMEM for checking
-    if self.op.start:
-      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
-                                self.op.hvparams)
-      CheckNodeFreeMemory(self, self.pnode.uuid,
-                          "creating instance %s" % self.op.instance_name,
-                          self.be_full[constants.BE_MAXMEM],
-                          self.op.hypervisor, hvfull)
-
-    self.dry_run_result = list(node_uuids)
-
-  def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
-    """Removes degraded disks and instance.
-
-    It optionally checks whether disks are degraded.  If the disks are
-    degraded, they are removed and the instance is also removed from
-    the configuration.
-
-    If L{disk_abort} is True, then the disks are considered degraded
-    and removed, and the instance is removed from the configuration.
-
-    If L{disk_abort} is False, then it first checks whether disks are
-    degraded and, if so, it removes the disks and the instance is
-    removed from the configuration.
-
-    @type feedback_fn: callable
-    @param feedback_fn: function used send feedback back to the caller
-
-    @type disk_abort: boolean
-    @param disk_abort:
-      True if disks are degraded, False to first check if disks are
-      degraded
-    @type instance: L{objects.Instance}
-    @param instance: instance containing the disks to check
-
-    @rtype: NoneType
-    @return: None
-    @raise errors.OpPrereqError: if disks are degraded
-
-    """
-    if disk_abort:
-      pass
-    elif self.op.wait_for_sync:
-      disk_abort = not WaitForSync(self, instance)
-    elif instance.disk_template in constants.DTS_INT_MIRROR:
-      # make sure the disks are not degraded (still sync-ing is ok)
-      feedback_fn("* checking mirrors status")
-      disk_abort = not WaitForSync(self, instance, oneshot=True)
-    else:
-      disk_abort = False
-
-    if disk_abort:
-      RemoveDisks(self, instance)
-      for disk_uuid in instance.disks:
-        self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid)
-      self.cfg.RemoveInstance(instance.uuid)
-      raise errors.OpExecError("There are some degraded disks for"
-                               " this instance")
-
-  def RunOsScripts(self, feedback_fn, iobj):
-    """Run OS scripts
-
-    If necessary, disks are paused.  It handles instance create,
-    import, and remote import.
-
-    @type feedback_fn: callable
-    @param feedback_fn: function used send feedback back to the caller
-
-    @type iobj: L{objects.Instance}
-    @param iobj: instance object
-
-    """
-    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
-      disks = self.cfg.GetInstanceDisks(iobj.uuid)
-      if self.op.mode == constants.INSTANCE_CREATE:
-        os_image = objects.GetOSImage(self.op.osparams)
-
-        if os_image is None and not self.op.no_install:
-          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
-                        not self.op.wait_for_sync)
-          if pause_sync:
-            feedback_fn("* pausing disk sync to install instance OS")
-            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
-                                                              (disks, iobj),
-                                                              True)
-            for idx, success in enumerate(result.payload):
-              if not success:
-                logging.warn("pause-sync of instance %s for disk %d failed",
-                             self.op.instance_name, idx)
-
-          feedback_fn("* running the instance OS create scripts...")
-          # FIXME: pass debug option from opcode to backend
-          os_add_result = \
-            self.rpc.call_instance_os_add(self.pnode.uuid,
-                                          (iobj, self.op.osparams_secret),
-                                          False,
-                                          self.op.debug_level)
-          if pause_sync:
-            feedback_fn("* resuming disk sync")
-            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
-                                                              (disks, iobj),
-                                                              False)
-            for idx, success in enumerate(result.payload):
-              if not success:
-                logging.warn("resume-sync of instance %s for disk %d failed",
-                             self.op.instance_name, idx)
-
-          os_add_result.Raise("Could not add os for instance %s"
-                              " on node %s" % (self.op.instance_name,
-                                               self.pnode.name))
-
-      else:
-        if self.op.mode == constants.INSTANCE_IMPORT:
-          feedback_fn("* running the instance OS import scripts...")
-
-          transfers = []
-
-          for idx, image in enumerate(self.src_images):
-            if not image:
-              continue
-
-            if iobj.os:
-              dst_io = constants.IEIO_SCRIPT
-              dst_ioargs = ((disks[idx], iobj), idx)
-            else:
-              dst_io = constants.IEIO_RAW_DISK
-              dst_ioargs = (disks[idx], iobj)
-
-            # FIXME: pass debug option from opcode to backend
-            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
-                                               constants.IEIO_FILE, (image, ),
-                                               dst_io, dst_ioargs,
-                                               None)
-            transfers.append(dt)
-
-          import_result = \
-            masterd.instance.TransferInstanceData(self, feedback_fn,
-                                                  self.op.src_node_uuid,
-                                                  self.pnode.uuid,
-                                                  self.pnode.secondary_ip,
-                                                  self.op.compress,
-                                                  iobj, transfers)
-          if not compat.all(import_result):
-            self.LogWarning("Some disks for instance %s on node %s were not"
-                            " imported successfully" % (self.op.instance_name,
-                                                        self.pnode.name))
-
-          rename_from = self._old_instance_name
-
-        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
-          feedback_fn("* preparing remote import...")
-          # The source cluster will stop the instance before attempting to make
-          # a connection. In some cases stopping an instance can take a long
-          # time, hence the shutdown timeout is added to the connection
-          # timeout.
-          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
-                             self.op.source_shutdown_timeout)
-          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
-
-          assert iobj.primary_node == self.pnode.uuid
-          disk_results = \
-            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
-                                          self.source_x509_ca,
-                                          self._cds, self.op.compress, timeouts)
-          if not compat.all(disk_results):
-            # TODO: Should the instance still be started, even if some disks
-            # failed to import (valid for local imports, too)?
-            self.LogWarning("Some disks for instance %s on node %s were not"
-                            " imported successfully" % (self.op.instance_name,
-                                                        self.pnode.name))
-
-          rename_from = self.source_instance_name
-
-        else:
-          # also checked in the prereq part
-          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
-                                       % self.op.mode)
-
-        assert iobj.name == self.op.instance_name
-
-        # Run rename script on newly imported instance
-        if iobj.os:
-          feedback_fn("Running rename script for %s" % self.op.instance_name)
-          result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
-                                                     rename_from,
-                                                     self.op.debug_level)
-          result.Warn("Failed to run rename script for %s on node %s" %
-                      (self.op.instance_name, self.pnode.name), self.LogWarning)
-
-  def GetOsInstallPackageEnvironment(self, instance, script):
-    """Returns the OS scripts environment for the helper VM
-
-    @type instance: L{objects.Instance}
-    @param instance: instance for which the OS scripts are run
-
-    @type script: string
-    @param script: script to run (e.g.,
-                   constants.OS_SCRIPT_CREATE_UNTRUSTED)
-
-    @rtype: dict of string to string
-    @return: OS scripts environment for the helper VM
-
-    """
-    env = {"OS_SCRIPT": script}
-
-    # We pass only the instance's disks, not the helper VM's disks.
-    if instance.hypervisor == constants.HT_KVM:
-      prefix = "/dev/vd"
-    elif instance.hypervisor in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]:
-      prefix = "/dev/xvd"
-    else:
-      raise errors.OpExecError("Cannot run OS scripts in a virtualized"
-                               " environment for hypervisor '%s'"
-                               % instance.hypervisor)
-
-    num_disks = len(self.cfg.GetInstanceDisks(instance.uuid))
-
-    for idx, disk_label in enumerate(utils.GetDiskLabels(prefix, num_disks + 1,
-                                                         start=1)):
-      env["DISK_%d_PATH" % idx] = disk_label
-
-    return env
-
-  def UpdateInstanceOsInstallPackage(self, feedback_fn, instance, override_env):
-    """Updates the OS parameter 'os-install-package' for an instance.
-
-    The OS install package is an archive containing an OS definition
-    and a file containing the environment variables needed to run the
-    OS scripts.
-
-    The OS install package is served by the metadata daemon to the
-    instances, so the OS scripts can run inside the virtualized
-    environment.
-
-    @type feedback_fn: callable
-    @param feedback_fn: function used send feedback back to the caller
-
-    @type instance: L{objects.Instance}
-    @param instance: instance for which the OS parameter
-                     'os-install-package' is updated
-
-    @type override_env: dict of string to string
-    @param override_env: if supplied, it overrides the environment of
-                         the export OS scripts archive
-
-    """
-    if "os-install-package" in instance.osparams:
-      feedback_fn("Using OS install package '%s'" %
-                  instance.osparams["os-install-package"])
-    else:
-      result = self.rpc.call_os_export(instance.primary_node, instance,
-                                       override_env)
-      result.Raise("Could not export OS '%s'" % instance.os)
-      instance.osparams["os-install-package"] = result.payload
-
-      feedback_fn("Created OS install package '%s'" % result.payload)
-
-  def RunOsScriptsVirtualized(self, feedback_fn, instance):
-    """Runs the OS scripts inside a safe virtualized environment.
-
-    The virtualized environment reuses the instance and temporarily
-    creates a disk onto which the image of the helper VM is dumped.
-    The temporary disk is used to boot the helper VM.  The OS scripts
-    are passed to the helper VM through the metadata daemon and the OS
-    install package.
-
-    @type feedback_fn: callable
-    @param feedback_fn: function used send feedback back to the caller
-
-    @type instance: L{objects.Instance}
-    @param instance: instance for which the OS scripts must be run
-                     inside the virtualized environment
-
-    """
-    install_image = self.cfg.GetInstallImage()
-
-    if not install_image:
-      raise errors.OpExecError("Cannot create install instance because an"
-                               " install image has not been specified")
-
-    disk_size = DetermineImageSize(self, install_image, instance.primary_node)
-
-    env = self.GetOsInstallPackageEnvironment(
-      instance,
-      constants.OS_SCRIPT_CREATE_UNTRUSTED)
-    self.UpdateInstanceOsInstallPackage(feedback_fn, instance, env)
-    UpdateMetadata(feedback_fn, self.rpc, instance,
-                   osparams_private=self.op.osparams_private,
-                   osparams_secret=self.op.osparams_secret)
-
-    with TemporaryDisk(self,
-                       instance,
-                       [(constants.DT_PLAIN, constants.DISK_RDWR, disk_size)],
-                       feedback_fn):
-      feedback_fn("Activating instance disks")
-      StartInstanceDisks(self, instance, False)
-
-      feedback_fn("Imaging disk with install image")
-      ImageDisks(self, instance, install_image)
-
-      feedback_fn("Starting instance with install image")
-      result = self.rpc.call_instance_start(instance.primary_node,
-                                            (instance, [], []),
-                                            False, self.op.reason)
-      result.Raise("Could not start instance '%s' with the install image '%s'"
-                   % (instance.name, install_image))
-
-      # First wait for the instance to start up
-      running_check = lambda: IsInstanceRunning(self, instance, prereq=False)
-      instance_up = retry.SimpleRetry(True, running_check, 5.0,
-                                      self.op.helper_startup_timeout)
-      if not instance_up:
-        raise errors.OpExecError("Could not boot instance using install image"
-                                 " '%s'" % install_image)
-
-      feedback_fn("Instance is up, now awaiting shutdown")
-
-      # Then for it to be finished, detected by its shutdown
-      instance_up = retry.SimpleRetry(False, running_check, 20.0,
-                                      self.op.helper_shutdown_timeout)
-      if instance_up:
-        self.LogWarning("Installation not completed prior to timeout, shutting"
-                        " down instance forcibly")
-
-    feedback_fn("Installation complete")
-
-  def Exec(self, feedback_fn):
-    """Create and add the instance to the cluster.
-
-    """
-    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
-                self.owned_locks(locking.LEVEL_NODE)), \
-      "Node locks differ from node resource locks"
-
-    ht_kind = self.op.hypervisor
-    if ht_kind in constants.HTS_REQ_PORT:
-      network_port = self.cfg.AllocatePort()
-    else:
-      network_port = None
-
-    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
-
-    # This is ugly but we got a chicken-egg problem here
-    # We can only take the group disk parameters, as the instance
-    # has no disks yet (we are generating them right here).
-    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
-    disks = GenerateDiskTemplate(self,
-                                 self.op.disk_template,
-                                 instance_uuid, self.pnode.uuid,
-                                 self.secondaries,
-                                 self.disks,
-                                 self.instance_file_storage_dir,
-                                 self.op.file_driver,
-                                 0,
-                                 feedback_fn,
-                                 self.cfg.GetGroupDiskParams(nodegroup))
-
-    if self.op.os_type is None:
-      os_type = ""
-    else:
-      os_type = self.op.os_type
-
-    iobj = objects.Instance(name=self.op.instance_name,
-                            uuid=instance_uuid,
-                            os=os_type,
-                            primary_node=self.pnode.uuid,
-                            nics=self.nics, disks=[],
-                            disk_template=self.op.disk_template,
-                            disks_active=False,
-                            admin_state=constants.ADMINST_DOWN,
-                            admin_state_source=constants.ADMIN_SOURCE,
-                            network_port=network_port,
-                            beparams=self.op.beparams,
-                            hvparams=self.op.hvparams,
-                            hypervisor=self.op.hypervisor,
-                            osparams=self.op.osparams,
-                            osparams_private=self.op.osparams_private,
-                            )
-
-    if self.op.tags:
-      for tag in self.op.tags:
-        iobj.AddTag(tag)
-
-    if self.adopt_disks:
-      if self.op.disk_template == constants.DT_PLAIN:
-        # rename LVs to the newly-generated names; we need to construct
-        # 'fake' LV disks with the old data, plus the new unique_id
-        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
-        rename_to = []
-        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
-          rename_to.append(t_dsk.logical_id)
-          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
-        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
-                                               zip(tmp_disks, rename_to))
-        result.Raise("Failed to rename adoped LVs")
-    else:
-      feedback_fn("* creating instance disks...")
-      try:
-        CreateDisks(self, iobj, disks=disks)
-      except errors.OpExecError:
-        self.LogWarning("Device creation failed")
-        self.cfg.ReleaseDRBDMinors(instance_uuid)
-        raise
-
-    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
-    self.cfg.AddInstance(iobj, self.proc.GetECId())
-
-    feedback_fn("adding disks to cluster config")
-    for disk in disks:
-      self.cfg.AddInstanceDisk(iobj.uuid, disk)
-
-    # re-read the instance from the configuration
-    iobj = self.cfg.GetInstanceInfo(iobj.uuid)
-
-    if self.op.mode == constants.INSTANCE_IMPORT:
-      # Release unused nodes
-      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
-    else:
-      # Release all nodes
-      ReleaseLocks(self, locking.LEVEL_NODE)
-
-    # Wipe disks
-    disk_abort = False
-    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
-      feedback_fn("* wiping instance disks...")
-      try:
-        WipeDisks(self, iobj)
-      except errors.OpExecError, err:
-        logging.exception("Wiping disks failed")
-        self.LogWarning("Wiping instance disks failed (%s)", err)
-        disk_abort = True
-
-    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
-
-    # Image disks
-    os_image = objects.GetOSImage(iobj.osparams)
-    disk_abort = False
-
-    if not self.adopt_disks and os_image is not None:
-      feedback_fn("* imaging instance disks...")
-      try:
-        ImageDisks(self, iobj, os_image)
-      except errors.OpExecError, err:
-        logging.exception("Imaging disks failed")
-        self.LogWarning("Imaging instance disks failed (%s)", err)
-        disk_abort = True
-
-    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
-
-    # instance disks are now active
-    iobj.disks_active = True
-
-    # Release all node resource locks
-    ReleaseLocks(self, locking.LEVEL_NODE_RES)
-
-    if iobj.os:
-      result = self.rpc.call_os_diagnose([iobj.primary_node])[iobj.primary_node]
-      result.Raise("Failed to get OS '%s'" % iobj.os)
-
-      trusted = None
-
-      for (name, _, _, _, _, _, _, os_trusted) in result.payload:
-        if name == objects.OS.GetName(iobj.os):
-          trusted = os_trusted
-          break
-
-      if trusted is None:
-        raise errors.OpPrereqError("OS '%s' is not available in node '%s'" %
-                                   (iobj.os, iobj.primary_node))
-      elif trusted:
-        self.RunOsScripts(feedback_fn, iobj)
-      else:
-        self.RunOsScriptsVirtualized(feedback_fn, iobj)
-        # Instance is modified by 'RunOsScriptsVirtualized',
-        # therefore, it must be retrieved once again from the
-        # configuration, otherwise there will be a config object
-        # version mismatch.
-        iobj = self.cfg.GetInstanceInfo(iobj.uuid)
-
-    # Update instance metadata so that it can be reached from the
-    # metadata service.
-    UpdateMetadata(feedback_fn, self.rpc, iobj,
-                   osparams_private=self.op.osparams_private,
-                   osparams_secret=self.op.osparams_secret)
-
-    assert not self.owned_locks(locking.LEVEL_NODE_RES)
-
-    if self.op.start:
-      iobj.admin_state = constants.ADMINST_UP
-      self.cfg.Update(iobj, feedback_fn)
-      logging.info("Starting instance %s on node %s", self.op.instance_name,
-                   self.pnode.name)
-      feedback_fn("* starting instance...")
-      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
-                                            False, self.op.reason)
-      result.Raise("Could not start instance")
-
-    return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj.uuid)))
-
-  def PrepareRetry(self, feedback_fn):
-    # A temporary lack of resources can only happen if opportunistic locking
-    # is used.
-    assert self.op.opportunistic_locking
-
-    logging.info("Opportunistic locking did not suceed, falling back to"
-                 " full lock allocation")
-    feedback_fn("* falling back to full lock allocation")
-    self.op.opportunistic_locking = False
-
-
 class LUInstanceRename(LogicalUnit):
   """Rename an instance.
 
@@ -1858,7 +112,7 @@
     if self._new_name_resolved or not self.op.name_check:
       return
 
-    hostname = _CheckHostnameSane(self, self.op.new_name)
+    hostname = CheckHostnameSane(self, self.op.new_name)
     self.op.new_name = hostname.name
     if (self.op.ip_check and
         netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
@@ -1882,10 +136,12 @@
     # It should actually not happen that an instance is running with a disabled
     # disk template, but in case it does, the renaming of file-based instances
     # will fail horribly. Thus, we test it before.
-    if (instance.disk_template in constants.DTS_FILEBASED and
-        self.op.new_name != instance.name):
-      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
-                               instance.disk_template)
+    for disk in self.cfg.GetInstanceDisks(instance.uuid):
+      if (disk.dev_type in constants.DTS_FILEBASED and
+          self.op.new_name != instance.name):
+        # TODO: when disks are separate objects, this should check for disk
+        # types, not disk templates.
+        CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(), disk.dev_type)
 
     CheckNodeOnline(self, instance.primary_node)
     CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
@@ -1898,7 +154,7 @@
       CheckInstanceExistence(self, self.op.new_name)
 
   def ExpandNames(self):
-    self._ExpandAndLockInstance()
+    self._ExpandAndLockInstance(allow_forthcoming=True)
 
     # Note that this call might not resolve anything if name checks have been
     # disabled in the opcode. In this case, we might have a renaming collision
@@ -1919,9 +175,11 @@
     old_name = self.instance.name
 
     rename_file_storage = False
-    if (self.instance.disk_template in (constants.DT_FILE,
-                                        constants.DT_SHARED_FILE) and
-        self.op.new_name != self.instance.name):
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+    renamed_storage = [d for d in disks
+                       if (d.dev_type in constants.DTS_FILEBASED and
+                           d.dev_type != constants.DT_GLUSTER)]
+    if (renamed_storage and self.op.new_name != self.instance.name):
       disks = self.cfg.GetInstanceDisks(self.instance.uuid)
       old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
       rename_file_storage = True
@@ -1936,6 +194,9 @@
     renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
     disks = self.cfg.GetInstanceDisks(renamed_inst.uuid)
 
+    if self.instance.forthcoming:
+      return renamed_inst.name
+
     if rename_file_storage:
       new_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
       result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
@@ -1981,7 +242,7 @@
   REQ_BGL = False
 
   def ExpandNames(self):
-    self._ExpandAndLockInstance()
+    self._ExpandAndLockInstance(allow_forthcoming=True)
     self.needed_locks[locking.LEVEL_NODE] = []
     self.needed_locks[locking.LEVEL_NODE_RES] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
@@ -2033,26 +294,31 @@
     """Remove the instance.
 
     """
-    logging.info("Shutting down instance %s on node %s", self.instance.name,
-                 self.cfg.GetNodeName(self.instance.primary_node))
-
-    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
-                                             self.instance,
-                                             self.op.shutdown_timeout,
-                                             self.op.reason)
-    if self.op.ignore_failures:
-      result.Warn("Warning: can't shutdown instance", feedback_fn)
-    else:
-      result.Raise("Could not shutdown instance %s on node %s" %
-                   (self.instance.name,
-                    self.cfg.GetNodeName(self.instance.primary_node)))
-
     assert (self.owned_locks(locking.LEVEL_NODE) ==
             self.owned_locks(locking.LEVEL_NODE_RES))
     assert not (set(self.cfg.GetInstanceNodes(self.instance.uuid)) -
                 self.owned_locks(locking.LEVEL_NODE)), \
       "Not owning correct locks"
 
+    if not self.instance.forthcoming:
+      logging.info("Shutting down instance %s on node %s", self.instance.name,
+                   self.cfg.GetNodeName(self.instance.primary_node))
+
+      result = self.rpc.call_instance_shutdown(self.instance.primary_node,
+                                               self.instance,
+                                               self.op.shutdown_timeout,
+                                               self.op.reason)
+      if self.op.ignore_failures:
+        result.Warn("Warning: can't shutdown instance", feedback_fn)
+      else:
+        result.Raise("Could not shutdown instance %s on node %s" %
+                     (self.instance.name,
+                      self.cfg.GetNodeName(self.instance.primary_node)))
+    else:
+      logging.info("Instance %s on node %s is forthcoming; not shutting down",
+                   self.instance.name,
+                   self.cfg.GetNodeName(self.instance.primary_node))
+
     RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
 
 
@@ -2201,7 +467,8 @@
       CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
     except errors.OpExecError:
       self.LogWarning("Device creation failed")
-      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
+      for disk_uuid in self.instance.disks:
+        self.cfg.ReleaseDRBDMinors(disk_uuid)
       raise
 
     errs = []
@@ -2234,12 +501,15 @@
       try:
         RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
       finally:
-        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
+        for disk_uuid in self.instance.disks:
+          self.cfg.ReleaseDRBDMinors(disk_uuid)
         raise errors.OpExecError("Errors during disk copy: %s" %
                                  (",".join(errs),))
 
     self.instance.primary_node = target_node.uuid
     self.cfg.Update(self.instance, feedback_fn)
+    for disk in disks:
+      self.cfg.SetDiskNodes(disk.uuid, [target_node.uuid])
 
     self.LogInfo("Removing the disks on the original node")
     RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
@@ -2249,8 +519,8 @@
       self.LogInfo("Starting instance %s on node %s",
                    self.instance.name, target_node.name)
 
-      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
-                                          ignore_secondaries=True)
+      disks_ok, _, _ = AssembleInstanceDisks(self, self.instance,
+                                             ignore_secondaries=True)
       if not disks_ok:
         ShutdownInstanceDisks(self, self.instance)
         raise errors.OpExecError("Can't activate the instance's disks")
@@ -2302,7 +572,7 @@
                                    " or set a cluster-wide default iallocator",
                                    errors.ECODE_INVAL)
 
-    _CheckOpportunisticLocking(self.op)
+    CheckOpportunisticLocking(self.op)
 
     dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
     if dups:
@@ -2314,11 +584,7 @@
 
     """
     self.share_locks = ShareAll()
-    self.needed_locks = {
-      # iallocator will select nodes and even if no iallocator is used,
-      # collisions with LUInstanceCreate should be avoided
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-      }
+    self.needed_locks = {}
 
     if self.op.iallocator:
       self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
@@ -2360,13 +626,13 @@
       else:
         node_whitelist = None
 
-      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op.disks,
-                                                            op.disk_template,
-                                                            default_vg),
-                                           _ComputeNics(op, cluster, None,
-                                                        self.cfg, ec_id),
-                                           _ComputeFullBeParams(op, cluster),
-                                           node_whitelist)
+      insts = [CreateInstanceAllocRequest(op, ComputeDisks(op.disks,
+                                                           op.disk_template,
+                                                           default_vg),
+                                          ComputeNics(op, cluster, None,
+                                                      self.cfg, ec_id),
+                                          ComputeFullBeParams(op, cluster),
+                                          node_whitelist)
                for op in self.op.instances]
 
       req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
@@ -2433,1883 +699,6 @@
     return ResultWithJobs(jobs, **self._ConstructPartialResult())
 
 
-class _InstNicModPrivate(object):
-  """Data structure for network interface modifications.
-
-  Used by L{LUInstanceSetParams}.
-
-  """
-  def __init__(self):
-    self.params = None
-    self.filled = None
-
-
-def _PrepareContainerMods(mods, private_fn):
-  """Prepares a list of container modifications by adding a private data field.
-
-  @type mods: list of tuples; (operation, index, parameters)
-  @param mods: List of modifications
-  @type private_fn: callable or None
-  @param private_fn: Callable for constructing a private data field for a
-    modification
-  @rtype: list
-
-  """
-  if private_fn is None:
-    fn = lambda: None
-  else:
-    fn = private_fn
-
-  return [(op, idx, params, fn()) for (op, idx, params) in mods]
-
-
-def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
-  """Checks if nodes have enough physical CPUs
-
-  This function checks if all given nodes have the needed number of
-  physical CPUs. In case any node has less CPUs or we cannot get the
-  information from the node, this function raises an OpPrereqError
-  exception.
-
-  @type lu: C{LogicalUnit}
-  @param lu: a logical unit from which we get configuration data
-  @type node_uuids: C{list}
-  @param node_uuids: the list of node UUIDs to check
-  @type requested: C{int}
-  @param requested: the minimum acceptable number of physical CPUs
-  @type hypervisor_specs: list of pairs (string, dict of strings)
-  @param hypervisor_specs: list of hypervisor specifications in
-      pairs (hypervisor_name, hvparams)
-  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
-      or we cannot check the node
-
-  """
-  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
-  for node_uuid in node_uuids:
-    info = nodeinfo[node_uuid]
-    node_name = lu.cfg.GetNodeName(node_uuid)
-    info.Raise("Cannot get current information from node %s" % node_name,
-               prereq=True, ecode=errors.ECODE_ENVIRON)
-    (_, _, (hv_info, )) = info.payload
-    num_cpus = hv_info.get("cpu_total", None)
-    if not isinstance(num_cpus, int):
-      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
-                                 " on node %s, result was '%s'" %
-                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
-    if requested > num_cpus:
-      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
-                                 "required" % (node_name, num_cpus, requested),
-                                 errors.ECODE_NORES)
-
-
-def GetItemFromContainer(identifier, kind, container):
-  """Return the item refered by the identifier.
-
-  @type identifier: string
-  @param identifier: Item index or name or UUID
-  @type kind: string
-  @param kind: One-word item description
-  @type container: list
-  @param container: Container to get the item from
-
-  """
-  # Index
-  try:
-    idx = int(identifier)
-    if idx == -1:
-      # Append
-      absidx = len(container) - 1
-    elif idx < 0:
-      raise IndexError("Not accepting negative indices other than -1")
-    elif idx > len(container):
-      raise IndexError("Got %s index %s, but there are only %s" %
-                       (kind, idx, len(container)))
-    else:
-      absidx = idx
-    return (absidx, container[idx])
-  except ValueError:
-    pass
-
-  for idx, item in enumerate(container):
-    if item.uuid == identifier or item.name == identifier:
-      return (idx, item)
-
-  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
-                             (kind, identifier), errors.ECODE_NOENT)
-
-
-def _ApplyContainerMods(kind, container, chgdesc, mods,
-                        create_fn, modify_fn, remove_fn,
-                        post_add_fn=None):
-  """Applies descriptions in C{mods} to C{container}.
-
-  @type kind: string
-  @param kind: One-word item description
-  @type container: list
-  @param container: Container to modify
-  @type chgdesc: None or list
-  @param chgdesc: List of applied changes
-  @type mods: list
-  @param mods: Modifications as returned by L{_PrepareContainerMods}
-  @type create_fn: callable
-  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
-    receives absolute item index, parameters and private data object as added
-    by L{_PrepareContainerMods}, returns tuple containing new item and changes
-    as list
-  @type modify_fn: callable
-  @param modify_fn: Callback for modifying an existing item
-    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
-    and private data object as added by L{_PrepareContainerMods}, returns
-    changes as list
-  @type remove_fn: callable
-  @param remove_fn: Callback on removing item; receives absolute item index,
-    item and private data object as added by L{_PrepareContainerMods}
-  @type post_add_fn: callable
-  @param post_add_fn: Callable for post-processing a newly created item after
-    it has been put into the container. It receives the index of the new item
-    and the new item as parameters.
-
-  """
-  for (op, identifier, params, private) in mods:
-    changes = None
-
-    if op == constants.DDM_ADD:
-      # Calculate where item will be added
-      # When adding an item, identifier can only be an index
-      try:
-        idx = int(identifier)
-      except ValueError:
-        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
-                                   " identifier for %s" % constants.DDM_ADD,
-                                   errors.ECODE_INVAL)
-      if idx == -1:
-        addidx = len(container)
-      else:
-        if idx < 0:
-          raise IndexError("Not accepting negative indices other than -1")
-        elif idx > len(container):
-          raise IndexError("Got %s index %s, but there are only %s" %
-                           (kind, idx, len(container)))
-        addidx = idx
-
-      if create_fn is None:
-        item = params
-      else:
-        (item, changes) = create_fn(addidx, params, private)
-
-      if idx == -1:
-        container.append(item)
-      else:
-        assert idx >= 0
-        assert idx <= len(container)
-        # list.insert does so before the specified index
-        container.insert(idx, item)
-
-      if post_add_fn is not None:
-        post_add_fn(addidx, item)
-
-    else:
-      # Retrieve existing item
-      (absidx, item) = GetItemFromContainer(identifier, kind, container)
-
-      if op == constants.DDM_REMOVE:
-        assert not params
-
-        changes = [("%s/%s" % (kind, absidx), "remove")]
-
-        if remove_fn is not None:
-          msg = remove_fn(absidx, item, private)
-          if msg:
-            changes.append(("%s/%s" % (kind, absidx), msg))
-
-        assert container[absidx] == item
-        del container[absidx]
-      elif op == constants.DDM_MODIFY:
-        if modify_fn is not None:
-          changes = modify_fn(absidx, item, params, private)
-      else:
-        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
-
-    assert _TApplyContModsCbChanges(changes)
-
-    if not (chgdesc is None or changes is None):
-      chgdesc.extend(changes)
-
-
-class LUInstanceSetParams(LogicalUnit):
-  """Modifies an instances's parameters.
-
-  """
-  HPATH = "instance-modify"
-  HTYPE = constants.HTYPE_INSTANCE
-  REQ_BGL = False
-
-  @staticmethod
-  def _UpgradeDiskNicMods(kind, mods, verify_fn):
-    assert ht.TList(mods)
-    assert not mods or len(mods[0]) in (2, 3)
-
-    if mods and len(mods[0]) == 2:
-      result = []
-
-      addremove = 0
-      for op, params in mods:
-        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
-          result.append((op, -1, params))
-          addremove += 1
-
-          if addremove > 1:
-            raise errors.OpPrereqError("Only one %s add or remove operation is"
-                                       " supported at a time" % kind,
-                                       errors.ECODE_INVAL)
-        else:
-          result.append((constants.DDM_MODIFY, op, params))
-
-      assert verify_fn(result)
-    else:
-      result = mods
-
-    return result
-
-  @staticmethod
-  def _CheckMods(kind, mods, key_types, item_fn):
-    """Ensures requested disk/NIC modifications are valid.
-
-    """
-    for (op, _, params) in mods:
-      assert ht.TDict(params)
-
-      # If 'key_types' is an empty dict, we assume we have an
-      # 'ext' template and thus do not ForceDictType
-      if key_types:
-        utils.ForceDictType(params, key_types)
-
-      if op == constants.DDM_REMOVE:
-        if params:
-          raise errors.OpPrereqError("No settings should be passed when"
-                                     " removing a %s" % kind,
-                                     errors.ECODE_INVAL)
-      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
-        item_fn(op, params)
-      else:
-        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
-
-  def _VerifyDiskModification(self, op, params, excl_stor, group_access_type):
-    """Verifies a disk modification.
-
-    """
-    if op == constants.DDM_ADD:
-      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
-      if mode not in constants.DISK_ACCESS_SET:
-        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
-                                   errors.ECODE_INVAL)
-
-      size = params.get(constants.IDISK_SIZE, None)
-      if size is None:
-        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
-                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
-      size = int(size)
-
-      params[constants.IDISK_SIZE] = size
-      name = params.get(constants.IDISK_NAME, None)
-      if name is not None and name.lower() == constants.VALUE_NONE:
-        params[constants.IDISK_NAME] = None
-
-      CheckSpindlesExclusiveStorage(params, excl_stor, True)
-
-      # Check disk access param (only for specific disks)
-      if self.instance.disk_template in constants.DTS_HAVE_ACCESS:
-        access_type = params.get(constants.IDISK_ACCESS, group_access_type)
-        if not IsValidDiskAccessModeCombination(self.instance.hypervisor,
-                                                self.instance.disk_template,
-                                                access_type):
-          raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
-                                     " used with %s disk access param" %
-                                     (self.instance.hypervisor, access_type),
-                                      errors.ECODE_STATE)
-
-    elif op == constants.DDM_MODIFY:
-      if constants.IDISK_SIZE in params:
-        raise errors.OpPrereqError("Disk size change not possible, use"
-                                   " grow-disk", errors.ECODE_INVAL)
-
-      # Disk modification supports changing only the disk name and mode.
-      # Changing arbitrary parameters is allowed only for ext disk template",
-      if self.instance.disk_template != constants.DT_EXT:
-        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
-      else:
-        # We have to check that 'access' parameter can not be modified
-        if constants.IDISK_ACCESS in params:
-          raise errors.OpPrereqError("Disk 'access' parameter change is"
-                                     " not possible", errors.ECODE_INVAL)
-
-      name = params.get(constants.IDISK_NAME, None)
-      if name is not None and name.lower() == constants.VALUE_NONE:
-        params[constants.IDISK_NAME] = None
-
-  @staticmethod
-  def _VerifyNicModification(op, params):
-    """Verifies a network interface modification.
-
-    """
-    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
-      ip = params.get(constants.INIC_IP, None)
-      name = params.get(constants.INIC_NAME, None)
-      req_net = params.get(constants.INIC_NETWORK, None)
-      link = params.get(constants.NIC_LINK, None)
-      mode = params.get(constants.NIC_MODE, None)
-      if name is not None and name.lower() == constants.VALUE_NONE:
-        params[constants.INIC_NAME] = None
-      if req_net is not None:
-        if req_net.lower() == constants.VALUE_NONE:
-          params[constants.INIC_NETWORK] = None
-          req_net = None
-        elif link is not None or mode is not None:
-          raise errors.OpPrereqError("If network is given"
-                                     " mode or link should not",
-                                     errors.ECODE_INVAL)
-
-      if op == constants.DDM_ADD:
-        macaddr = params.get(constants.INIC_MAC, None)
-        if macaddr is None:
-          params[constants.INIC_MAC] = constants.VALUE_AUTO
-
-      if ip is not None:
-        if ip.lower() == constants.VALUE_NONE:
-          params[constants.INIC_IP] = None
-        else:
-          if ip.lower() == constants.NIC_IP_POOL:
-            if op == constants.DDM_ADD and req_net is None:
-              raise errors.OpPrereqError("If ip=pool, parameter network"
-                                         " cannot be none",
-                                         errors.ECODE_INVAL)
-          else:
-            if not netutils.IPAddress.IsValid(ip):
-              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
-                                         errors.ECODE_INVAL)
-
-      if constants.INIC_MAC in params:
-        macaddr = params[constants.INIC_MAC]
-        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-          macaddr = utils.NormalizeAndValidateMac(macaddr)
-
-        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
-          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
-                                     " modifying an existing NIC",
-                                     errors.ECODE_INVAL)
-
-  def CheckArguments(self):
-    if not (self.op.nics or self.op.disks or self.op.disk_template or
-            self.op.hvparams or self.op.beparams or self.op.os_name or
-            self.op.osparams or self.op.offline is not None or
-            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
-            self.op.instance_communication is not None):
-      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
-
-    if self.op.hvparams:
-      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
-                           "hypervisor", "instance", "cluster")
-
-    self.op.disks = self._UpgradeDiskNicMods(
-      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
-    self.op.nics = self._UpgradeDiskNicMods(
-      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
-
-    # Check disk template modifications
-    if self.op.disk_template:
-      if self.op.disks:
-        raise errors.OpPrereqError("Disk template conversion and other disk"
-                                   " changes not supported at the same time",
-                                   errors.ECODE_INVAL)
-
-      # mirrored template node checks
-      if self.op.disk_template in constants.DTS_INT_MIRROR:
-        if not self.op.remote_node:
-          raise errors.OpPrereqError("Changing the disk template to a mirrored"
-                                     " one requires specifying a secondary"
-                                     " node", errors.ECODE_INVAL)
-      elif self.op.remote_node:
-        self.LogWarning("Changing the disk template to a non-mirrored one,"
-                        " the secondary node will be ignored")
-        # the secondary node must be cleared in order to be ignored, otherwise
-        # the operation will fail, in the GenerateDiskTemplate method
-        self.op.remote_node = None
-
-      # file-based template checks
-      if self.op.disk_template in constants.DTS_FILEBASED:
-        if not self.op.file_driver:
-          self.op.file_driver = constants.FD_DEFAULT
-        elif self.op.file_driver not in constants.FILE_DRIVER:
-          raise errors.OpPrereqError("Invalid file driver name '%s'" %
-                                     self.op.file_driver, errors.ECODE_INVAL)
-
-    # Check NIC modifications
-    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
-                    self._VerifyNicModification)
-
-    if self.op.pnode:
-      (self.op.pnode_uuid, self.op.pnode) = \
-        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
-
-  def ExpandNames(self):
-    self._ExpandAndLockInstance()
-    self.needed_locks[locking.LEVEL_NODEGROUP] = []
-    # Can't even acquire node locks in shared mode as upcoming changes in
-    # Ganeti 2.6 will start to modify the node object on disk conversion
-    self.needed_locks[locking.LEVEL_NODE] = []
-    self.needed_locks[locking.LEVEL_NODE_RES] = []
-    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
-    # Look node group to look up the ipolicy
-    self.share_locks[locking.LEVEL_NODEGROUP] = 1
-    self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
-    self.dont_collate_locks[locking.LEVEL_NODE] = True
-    self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
-
-  def DeclareLocks(self, level):
-    if level == locking.LEVEL_NODEGROUP:
-      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
-      # Acquire locks for the instance's nodegroups optimistically. Needs
-      # to be verified in CheckPrereq
-      self.needed_locks[locking.LEVEL_NODEGROUP] = \
-        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
-    elif level == locking.LEVEL_NODE:
-      self._LockInstancesNodes()
-      if self.op.disk_template and self.op.remote_node:
-        (self.op.remote_node_uuid, self.op.remote_node) = \
-          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
-                                self.op.remote_node)
-        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
-    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
-      # Copy node locks
-      self.needed_locks[locking.LEVEL_NODE_RES] = \
-        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
-
-  def BuildHooksEnv(self):
-    """Build hooks env.
-
-    This runs on the master, primary and secondaries.
-
-    """
-    args = {}
-    if constants.BE_MINMEM in self.be_new:
-      args["minmem"] = self.be_new[constants.BE_MINMEM]
-    if constants.BE_MAXMEM in self.be_new:
-      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
-    if constants.BE_VCPUS in self.be_new:
-      args["vcpus"] = self.be_new[constants.BE_VCPUS]
-    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
-    # information at all.
-
-    if self._new_nics is not None:
-      nics = []
-
-      for nic in self._new_nics:
-        n = copy.deepcopy(nic)
-        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
-        n.nicparams = nicparams
-        nics.append(NICToTuple(self, n))
-
-      args["nics"] = nics
-
-    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
-    if self.op.disk_template:
-      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
-    if self.op.runtime_mem:
-      env["RUNTIME_MEMORY"] = self.op.runtime_mem
-
-    return env
-
-  def BuildHooksNodes(self):
-    """Build hooks nodes.
-
-    """
-    nl = [self.cfg.GetMasterNode()] + \
-        list(self.cfg.GetInstanceNodes(self.instance.uuid))
-    return (nl, nl)
-
-  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
-                              old_params, cluster, pnode_uuid):
-
-    update_params_dict = dict([(key, params[key])
-                               for key in constants.NICS_PARAMETERS
-                               if key in params])
-
-    req_link = update_params_dict.get(constants.NIC_LINK, None)
-    req_mode = update_params_dict.get(constants.NIC_MODE, None)
-
-    new_net_uuid = None
-    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
-    if new_net_uuid_or_name:
-      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
-      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
-
-    if old_net_uuid:
-      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
-
-    if new_net_uuid:
-      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
-      if not netparams:
-        raise errors.OpPrereqError("No netparams found for the network"
-                                   " %s, probably not connected" %
-                                   new_net_obj.name, errors.ECODE_INVAL)
-      new_params = dict(netparams)
-    else:
-      new_params = GetUpdatedParams(old_params, update_params_dict)
-
-    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
-
-    new_filled_params = cluster.SimpleFillNIC(new_params)
-    objects.NIC.CheckParameterSyntax(new_filled_params)
-
-    new_mode = new_filled_params[constants.NIC_MODE]
-    if new_mode == constants.NIC_MODE_BRIDGED:
-      bridge = new_filled_params[constants.NIC_LINK]
-      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
-      if msg:
-        msg = "Error checking bridges on node '%s': %s" % \
-                (self.cfg.GetNodeName(pnode_uuid), msg)
-        if self.op.force:
-          self.warn.append(msg)
-        else:
-          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
-
-    elif new_mode == constants.NIC_MODE_ROUTED:
-      ip = params.get(constants.INIC_IP, old_ip)
-      if ip is None and not new_net_uuid:
-        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
-                                   " on a routed NIC if not attached to a"
-                                   " network", errors.ECODE_INVAL)
-
-    elif new_mode == constants.NIC_MODE_OVS:
-      # TODO: check OVS link
-      self.LogInfo("OVS links are currently not checked for correctness")
-
-    if constants.INIC_MAC in params:
-      mac = params[constants.INIC_MAC]
-      if mac is None:
-        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
-                                   errors.ECODE_INVAL)
-      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-        # otherwise generate the MAC address
-        params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
-      else:
-        # or validate/reserve the current one
-        try:
-          self.cfg.ReserveMAC(mac, self.proc.GetECId())
-        except errors.ReservationError:
-          raise errors.OpPrereqError("MAC address '%s' already in use"
-                                     " in cluster" % mac,
-                                     errors.ECODE_NOTUNIQUE)
-    elif new_net_uuid != old_net_uuid:
-
-      def get_net_prefix(net_uuid):
-        mac_prefix = None
-        if net_uuid:
-          nobj = self.cfg.GetNetwork(net_uuid)
-          mac_prefix = nobj.mac_prefix
-
-        return mac_prefix
-
-      new_prefix = get_net_prefix(new_net_uuid)
-      old_prefix = get_net_prefix(old_net_uuid)
-      if old_prefix != new_prefix:
-        params[constants.INIC_MAC] = \
-          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
-
-    # if there is a change in (ip, network) tuple
-    new_ip = params.get(constants.INIC_IP, old_ip)
-    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
-      if new_ip:
-        # if IP is pool then require a network and generate one IP
-        if new_ip.lower() == constants.NIC_IP_POOL:
-          if new_net_uuid:
-            try:
-              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
-            except errors.ReservationError:
-              raise errors.OpPrereqError("Unable to get a free IP"
-                                         " from the address pool",
-                                         errors.ECODE_STATE)
-            self.LogInfo("Chose IP %s from network %s",
-                         new_ip,
-                         new_net_obj.name)
-            params[constants.INIC_IP] = new_ip
-          else:
-            raise errors.OpPrereqError("ip=pool, but no network found",
-                                       errors.ECODE_INVAL)
-        # Reserve new IP if in the new network if any
-        elif new_net_uuid:
-          try:
-            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
-                               check=self.op.conflicts_check)
-            self.LogInfo("Reserving IP %s in network %s",
-                         new_ip, new_net_obj.name)
-          except errors.ReservationError:
-            raise errors.OpPrereqError("IP %s not available in network %s" %
-                                       (new_ip, new_net_obj.name),
-                                       errors.ECODE_NOTUNIQUE)
-        # new network is None so check if new IP is a conflicting IP
-        elif self.op.conflicts_check:
-          _CheckForConflictingIp(self, new_ip, pnode_uuid)
-
-      # release old IP if old network is not None
-      if old_ip and old_net_uuid:
-        try:
-          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
-        except errors.AddressPoolError:
-          logging.warning("Release IP %s not contained in network %s",
-                          old_ip, old_net_obj.name)
-
-    # there are no changes in (ip, network) tuple and old network is not None
-    elif (old_net_uuid is not None and
-          (req_link is not None or req_mode is not None)):
-      raise errors.OpPrereqError("Not allowed to change link or mode of"
-                                 " a NIC that is connected to a network",
-                                 errors.ECODE_INVAL)
-
-    private.params = new_params
-    private.filled = new_filled_params
-
-  def _PreCheckDiskTemplate(self, pnode_info):
-    """CheckPrereq checks related to a new disk template."""
-    # Arguments are passed to avoid configuration lookups
-    pnode_uuid = self.instance.primary_node
-
-    if self.instance.disk_template in constants.DTS_NOT_CONVERTIBLE_FROM:
-      raise errors.OpPrereqError("Conversion from the '%s' disk template is"
-                                 " not supported" % self.instance.disk_template,
-                                 errors.ECODE_INVAL)
-
-    elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO:
-      raise errors.OpPrereqError("Conversion to the '%s' disk template is"
-                                 " not supported" % self.op.disk_template,
-                                 errors.ECODE_INVAL)
-
-    if (self.op.disk_template != constants.DT_EXT and
-        self.instance.disk_template == self.op.disk_template):
-      raise errors.OpPrereqError("Instance already has disk template %s" %
-                                 self.instance.disk_template,
-                                 errors.ECODE_INVAL)
-
-    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
-      enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates)
-      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
-                                 " cluster (enabled templates: %s)" %
-                                 (self.op.disk_template, enabled_dts),
-                                  errors.ECODE_STATE)
-
-    default_vg = self.cfg.GetVGName()
-    if (not default_vg and
-        self.op.disk_template not in constants.DTS_NOT_LVM):
-      raise errors.OpPrereqError("Disk template conversions to lvm-based"
-                                 " instances are not supported by the cluster",
-                                 errors.ECODE_STATE)
-
-    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
-                       msg="cannot change disk template")
-
-    # compute new disks' information
-    inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-    self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template,
-                                       default_vg, self.op.ext_params)
-
-    # mirror node verification
-    if self.op.disk_template in constants.DTS_INT_MIRROR:
-      if self.op.remote_node_uuid == pnode_uuid:
-        raise errors.OpPrereqError("Given new secondary node %s is the same"
-                                   " as the primary node of the instance" %
-                                   self.op.remote_node, errors.ECODE_STATE)
-      CheckNodeOnline(self, self.op.remote_node_uuid)
-      CheckNodeNotDrained(self, self.op.remote_node_uuid)
-      CheckNodeVmCapable(self, self.op.remote_node_uuid)
-
-      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
-      snode_group = self.cfg.GetNodeGroup(snode_info.group)
-      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
-                                                              snode_group)
-      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
-                             ignore=self.op.ignore_ipolicy)
-      if pnode_info.group != snode_info.group:
-        self.LogWarning("The primary and secondary nodes are in two"
-                        " different node groups; the disk parameters"
-                        " from the first disk's node group will be"
-                        " used")
-
-    # check that the template is in the primary node group's allowed templates
-    pnode_group = self.cfg.GetNodeGroup(pnode_info.group)
-    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
-                                                            pnode_group)
-    allowed_dts = ipolicy[constants.IPOLICY_DTS]
-    if self.op.disk_template not in allowed_dts:
-      raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed"
-                                 " templates: %s)" % (self.op.disk_template,
-                                 utils.CommaJoin(allowed_dts)),
-                                 errors.ECODE_STATE)
-
-    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
-      # Make sure none of the nodes require exclusive storage
-      nodes = [pnode_info]
-      if self.op.disk_template in constants.DTS_INT_MIRROR:
-        assert snode_info
-        nodes.append(snode_info)
-      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
-      if compat.any(map(has_es, nodes)):
-        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
-                  " storage is enabled" % (self.instance.disk_template,
-                                           self.op.disk_template))
-        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
-
-    # node capacity checks
-    if (self.op.disk_template == constants.DT_PLAIN and
-        self.instance.disk_template == constants.DT_DRBD8):
-      # we ensure that no capacity checks will be made for conversions from
-      # the 'drbd' to the 'plain' disk template
-      pass
-    elif (self.op.disk_template == constants.DT_DRBD8 and
-          self.instance.disk_template == constants.DT_PLAIN):
-      # for conversions from the 'plain' to the 'drbd' disk template, check
-      # only the remote node's capacity
-      req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
-      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes)
-    elif self.op.disk_template in constants.DTS_LVM:
-      # rest lvm-based capacity checks
-      node_uuids = [pnode_uuid]
-      if self.op.remote_node_uuid:
-        node_uuids.append(self.op.remote_node_uuid)
-      req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
-      CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
-    elif self.op.disk_template == constants.DT_RBD:
-      # CheckRADOSFreeSpace() is simply a placeholder
-      CheckRADOSFreeSpace()
-    elif self.op.disk_template == constants.DT_EXT:
-      # FIXME: Capacity checks for extstorage template, if exists
-      pass
-    else:
-      # FIXME: Checks about other non lvm-based disk templates
-      pass
-
-  def _PreCheckDisks(self, ispec):
-    """CheckPrereq checks related to disk changes.
-
-    @type ispec: dict
-    @param ispec: instance specs to be updated with the new disks
-
-    """
-    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
-
-    inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
-    excl_stor = compat.any(
-      rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
-      )
-
-    # Get the group access type
-    node_info = self.cfg.GetNodeInfo(self.instance.primary_node)
-    node_group = self.cfg.GetNodeGroup(node_info.group)
-    group_disk_params = self.cfg.GetGroupDiskParams(node_group)
-    group_access_type = group_disk_params[self.instance.disk_template].get(
-      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
-    )
-
-    # Check disk modifications. This is done here and not in CheckArguments
-    # (as with NICs), because we need to know the instance's disk template
-    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor,
-                                                          group_access_type)
-    if self.instance.disk_template == constants.DT_EXT:
-      self._CheckMods("disk", self.op.disks, {}, ver_fn)
-    else:
-      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
-                      ver_fn)
-
-    self.diskmod = _PrepareContainerMods(self.op.disks, None)
-
-    # Check the validity of the `provider' parameter
-    if self.instance.disk_template in constants.DT_EXT:
-      for mod in self.diskmod:
-        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
-        if mod[0] == constants.DDM_ADD:
-          if ext_provider is None:
-            raise errors.OpPrereqError("Instance template is '%s' and parameter"
-                                       " '%s' missing, during disk add" %
-                                       (constants.DT_EXT,
-                                        constants.IDISK_PROVIDER),
-                                       errors.ECODE_NOENT)
-        elif mod[0] == constants.DDM_MODIFY:
-          if ext_provider:
-            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
-                                       " modification" %
-                                       constants.IDISK_PROVIDER,
-                                       errors.ECODE_INVAL)
-    else:
-      for mod in self.diskmod:
-        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
-        if ext_provider is not None:
-          raise errors.OpPrereqError("Parameter '%s' is only valid for"
-                                     " instances of type '%s'" %
-                                     (constants.IDISK_PROVIDER,
-                                      constants.DT_EXT),
-                                     errors.ECODE_INVAL)
-
-    if not self.op.wait_for_sync and not self.instance.disks_active:
-      for mod in self.diskmod:
-        if mod[0] == constants.DDM_ADD:
-          raise errors.OpPrereqError("Can't add a disk to an instance with"
-                                     " deactivated disks and"
-                                     " --no-wait-for-sync given.",
-                                     errors.ECODE_INVAL)
-
-    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
-      raise errors.OpPrereqError("Disk operations not supported for"
-                                 " diskless instances", errors.ECODE_INVAL)
-
-    def _PrepareDiskMod(_, disk, params, __):
-      disk.name = params.get(constants.IDISK_NAME, None)
-
-    # Verify disk changes (operating on a copy)
-    inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-    disks = copy.deepcopy(inst_disks)
-    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
-                        _PrepareDiskMod, None)
-    utils.ValidateDeviceNames("disk", disks)
-    if len(disks) > constants.MAX_DISKS:
-      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
-                                 " more" % constants.MAX_DISKS,
-                                 errors.ECODE_STATE)
-    disk_sizes = [disk.size for disk in inst_disks]
-    disk_sizes.extend(params["size"] for (op, idx, params, private) in
-                      self.diskmod if op == constants.DDM_ADD)
-    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
-    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
-
-    # either --online or --offline was passed
-    if self.op.offline is not None:
-      if self.op.offline:
-        msg = "can't change to offline without being down first"
-      else:
-        msg = "can't change to online (down) without being offline first"
-      CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,
-                         msg=msg)
-
-  @staticmethod
-  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
-    """Create a NIC mod that adds or removes the instance
-    communication NIC to a running instance.
-
-    The NICS are dynamically created using the Dynamic Device
-    Modification (DDM).  This function produces a NIC modification
-    (mod) that inserts an additional NIC meant for instance
-    communication in or removes an existing instance communication NIC
-    from a running instance, using DDM.
-
-    @type cfg: L{config.ConfigWriter}
-    @param cfg: cluster configuration
-
-    @type instance_communication: boolean
-    @param instance_communication: whether instance communication is
-                                   enabled or disabled
-
-    @type instance: L{objects.Instance}
-    @param instance: instance to which the NIC mod will be applied to
-
-    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
-            (L{constants.DDM_REMOVE}, -1, parameters) or
-            L{None}
-    @return: DDM mod containing an action to add or remove the NIC, or
-             None if nothing needs to be done
-
-    """
-    nic_name = _ComputeInstanceCommunicationNIC(instance.name)
-
-    instance_communication_nic = None
-
-    for nic in instance.nics:
-      if nic.name == nic_name:
-        instance_communication_nic = nic
-        break
-
-    if instance_communication and not instance_communication_nic:
-      action = constants.DDM_ADD
-      params = {constants.INIC_NAME: nic_name,
-                constants.INIC_MAC: constants.VALUE_GENERATE,
-                constants.INIC_IP: constants.NIC_IP_POOL,
-                constants.INIC_NETWORK:
-                  cfg.GetInstanceCommunicationNetwork()}
-    elif not instance_communication and instance_communication_nic:
-      action = constants.DDM_REMOVE
-      params = None
-    else:
-      action = None
-      params = None
-
-    if action is not None:
-      return (action, -1, params)
-    else:
-      return None
-
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This only checks the instance list against the existing names.
-
-    """
-    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
-    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
-    self.cluster = self.cfg.GetClusterInfo()
-    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
-
-    assert self.instance is not None, \
-      "Cannot retrieve locked instance %s" % self.op.instance_name
-
-    pnode_uuid = self.instance.primary_node
-
-    self.warn = []
-
-    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
-        not self.op.force):
-      # verify that the instance is not up
-      instance_info = self.rpc.call_instance_info(
-          pnode_uuid, self.instance.name, self.instance.hypervisor,
-          cluster_hvparams)
-      if instance_info.fail_msg:
-        self.warn.append("Can't get instance runtime information: %s" %
-                         instance_info.fail_msg)
-      elif instance_info.payload:
-        raise errors.OpPrereqError("Instance is still running on %s" %
-                                   self.cfg.GetNodeName(pnode_uuid),
-                                   errors.ECODE_STATE)
-
-    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
-    node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
-    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
-
-    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
-    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
-    group_info = self.cfg.GetNodeGroup(pnode_info.group)
-
-    # dictionary with instance information after the modification
-    ispec = {}
-
-    if self.op.hotplug or self.op.hotplug_if_possible:
-      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
-                                               self.instance)
-      if result.fail_msg:
-        if self.op.hotplug:
-          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
-                       prereq=True, ecode=errors.ECODE_STATE)
-        else:
-          self.LogWarning(result.fail_msg)
-          self.op.hotplug = False
-          self.LogInfo("Modification will take place without hotplugging.")
-      else:
-        self.op.hotplug = True
-
-    # Prepare NIC modifications
-    # add or remove NIC for instance communication
-    if self.op.instance_communication is not None:
-      mod = self._InstanceCommunicationDDM(self.cfg,
-                                           self.op.instance_communication,
-                                           self.instance)
-      if mod is not None:
-        self.op.nics.append(mod)
-
-    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
-
-    # disks processing
-    assert not (self.op.disk_template and self.op.disks), \
-      "Can't modify disk template and apply disk changes at the same time"
-
-    if self.op.disk_template:
-      self._PreCheckDiskTemplate(pnode_info)
-      self.instance_file_storage_dir = CalculateFileStorageDir(self)
-
-    self._PreCheckDisks(ispec)
-
-    # hvparams processing
-    if self.op.hvparams:
-      hv_type = self.instance.hypervisor
-      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
-      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
-      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
-
-      # local check
-      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
-      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
-      self.hv_proposed = self.hv_new = hv_new # the new actual values
-      self.hv_inst = i_hvdict # the new dict (without defaults)
-    else:
-      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
-                                                   self.instance.os,
-                                                   self.instance.hvparams)
-      self.hv_new = self.hv_inst = {}
-
-    # beparams processing
-    if self.op.beparams:
-      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
-                                  use_none=True)
-      objects.UpgradeBeParams(i_bedict)
-      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
-      be_new = self.cluster.SimpleFillBE(i_bedict)
-      self.be_proposed = self.be_new = be_new # the new actual values
-      self.be_inst = i_bedict # the new dict (without defaults)
-    else:
-      self.be_new = self.be_inst = {}
-      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
-    be_old = self.cluster.FillBE(self.instance)
-
-    # CPU param validation -- checking every time a parameter is
-    # changed to cover all cases where either CPU mask or vcpus have
-    # changed
-    if (constants.BE_VCPUS in self.be_proposed and
-        constants.HV_CPU_MASK in self.hv_proposed):
-      cpu_list = \
-        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
-      # Verify mask is consistent with number of vCPUs. Can skip this
-      # test if only 1 entry in the CPU mask, which means same mask
-      # is applied to all vCPUs.
-      if (len(cpu_list) > 1 and
-          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
-        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
-                                   " CPU mask [%s]" %
-                                   (self.be_proposed[constants.BE_VCPUS],
-                                    self.hv_proposed[constants.HV_CPU_MASK]),
-                                   errors.ECODE_INVAL)
-
-      # Only perform this test if a new CPU mask is given
-      if constants.HV_CPU_MASK in self.hv_new and cpu_list:
-        # Calculate the largest CPU number requested
-        max_requested_cpu = max(map(max, cpu_list))
-        # Check that all of the instance's nodes have enough physical CPUs to
-        # satisfy the requested CPU mask
-        hvspecs = [(self.instance.hypervisor,
-                    self.cfg.GetClusterInfo()
-                      .hvparams[self.instance.hypervisor])]
-        _CheckNodesPhysicalCPUs(self,
-                                self.cfg.GetInstanceNodes(self.instance.uuid),
-                                max_requested_cpu + 1,
-                                hvspecs)
-
-    # osparams processing
-    if self.op.os_name and not self.op.force:
-      instance_os = self.op.os_name
-    else:
-      instance_os = self.instance.os
-
-    if self.op.osparams or self.op.osparams_private:
-      public_parms = self.op.osparams or {}
-      private_parms = self.op.osparams_private or {}
-      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
-
-      if dupe_keys:
-        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
-                                   utils.CommaJoin(dupe_keys))
-
-      self.os_inst = GetUpdatedParams(self.instance.osparams,
-                                      public_parms)
-      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
-                                              private_parms)
-
-      CheckOSParams(self, True, node_uuids, instance_os,
-                    objects.FillDict(self.os_inst,
-                                     self.os_inst_private),
-                    self.op.force_variant)
-
-    else:
-      self.os_inst = {}
-      self.os_inst_private = {}
-
-    #TODO(dynmem): do the appropriate check involving MINMEM
-    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
-        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
-      mem_check_list = [pnode_uuid]
-      if be_new[constants.BE_AUTO_BALANCE]:
-        # either we changed auto_balance to yes or it was from before
-        mem_check_list.extend(
-          self.cfg.GetInstanceSecondaryNodes(self.instance.uuid))
-      instance_info = self.rpc.call_instance_info(
-          pnode_uuid, self.instance.name, self.instance.hypervisor,
-          cluster_hvparams)
-      hvspecs = [(self.instance.hypervisor,
-                  cluster_hvparams)]
-      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
-                                         hvspecs)
-      pninfo = nodeinfo[pnode_uuid]
-      msg = pninfo.fail_msg
-      if msg:
-        # Assume the primary node is unreachable and go ahead
-        self.warn.append("Can't get info from primary node %s: %s" %
-                         (self.cfg.GetNodeName(pnode_uuid), msg))
-      else:
-        (_, _, (pnhvinfo, )) = pninfo.payload
-        if not isinstance(pnhvinfo.get("memory_free", None), int):
-          self.warn.append("Node data from primary node %s doesn't contain"
-                           " free memory information" %
-                           self.cfg.GetNodeName(pnode_uuid))
-        elif instance_info.fail_msg:
-          self.warn.append("Can't get instance runtime information: %s" %
-                           instance_info.fail_msg)
-        else:
-          if instance_info.payload:
-            current_mem = int(instance_info.payload["memory"])
-          else:
-            # Assume instance not running
-            # (there is a slight race condition here, but it's not very
-            # probable, and we have no other way to check)
-            # TODO: Describe race condition
-            current_mem = 0
-          #TODO(dynmem): do the appropriate check involving MINMEM
-          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
-                      pnhvinfo["memory_free"])
-          if miss_mem > 0:
-            raise errors.OpPrereqError("This change will prevent the instance"
-                                       " from starting, due to %d MB of memory"
-                                       " missing on its primary node" %
-                                       miss_mem, errors.ECODE_NORES)
-
-      if be_new[constants.BE_AUTO_BALANCE]:
-        secondary_nodes = \
-          self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
-        for node_uuid, nres in nodeinfo.items():
-          if node_uuid not in secondary_nodes:
-            continue
-          nres.Raise("Can't get info from secondary node %s" %
-                     self.cfg.GetNodeName(node_uuid), prereq=True,
-                     ecode=errors.ECODE_STATE)
-          (_, _, (nhvinfo, )) = nres.payload
-          if not isinstance(nhvinfo.get("memory_free", None), int):
-            raise errors.OpPrereqError("Secondary node %s didn't return free"
-                                       " memory information" %
-                                       self.cfg.GetNodeName(node_uuid),
-                                       errors.ECODE_STATE)
-          #TODO(dynmem): do the appropriate check involving MINMEM
-          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
-            raise errors.OpPrereqError("This change will prevent the instance"
-                                       " from failover to its secondary node"
-                                       " %s, due to not enough memory" %
-                                       self.cfg.GetNodeName(node_uuid),
-                                       errors.ECODE_STATE)
-
-    if self.op.runtime_mem:
-      remote_info = self.rpc.call_instance_info(
-         self.instance.primary_node, self.instance.name,
-         self.instance.hypervisor,
-         cluster_hvparams)
-      remote_info.Raise("Error checking node %s" %
-                        self.cfg.GetNodeName(self.instance.primary_node),
-                        prereq=True)
-      if not remote_info.payload: # not running already
-        raise errors.OpPrereqError("Instance %s is not running" %
-                                   self.instance.name, errors.ECODE_STATE)
-
-      current_memory = remote_info.payload["memory"]
-      if (not self.op.force and
-           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
-            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
-        raise errors.OpPrereqError("Instance %s must have memory between %d"
-                                   " and %d MB of memory unless --force is"
-                                   " given" %
-                                   (self.instance.name,
-                                    self.be_proposed[constants.BE_MINMEM],
-                                    self.be_proposed[constants.BE_MAXMEM]),
-                                   errors.ECODE_INVAL)
-
-      delta = self.op.runtime_mem - current_memory
-      if delta > 0:
-        CheckNodeFreeMemory(
-            self, self.instance.primary_node,
-            "ballooning memory for instance %s" % self.instance.name, delta,
-            self.instance.hypervisor,
-            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
-
-    # make self.cluster visible in the functions below
-    cluster = self.cluster
-
-    def _PrepareNicCreate(_, params, private):
-      self._PrepareNicModification(params, private, None, None,
-                                   {}, cluster, pnode_uuid)
-      return (None, None)
-
-    def _PrepareNicMod(_, nic, params, private):
-      self._PrepareNicModification(params, private, nic.ip, nic.network,
-                                   nic.nicparams, cluster, pnode_uuid)
-      return None
-
-    def _PrepareNicRemove(_, params, __):
-      ip = params.ip
-      net = params.network
-      if net is not None and ip is not None:
-        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
-
-    # Verify NIC changes (operating on copy)
-    nics = [nic.Copy() for nic in self.instance.nics]
-    _ApplyContainerMods("NIC", nics, None, self.nicmod,
-                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
-    if len(nics) > constants.MAX_NICS:
-      raise errors.OpPrereqError("Instance has too many network interfaces"
-                                 " (%d), cannot add more" % constants.MAX_NICS,
-                                 errors.ECODE_STATE)
-
-    # Pre-compute NIC changes (necessary to use result in hooks)
-    self._nic_chgdesc = []
-    if self.nicmod:
-      # Operate on copies as this is still in prereq
-      nics = [nic.Copy() for nic in self.instance.nics]
-      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
-                          self._CreateNewNic, self._ApplyNicMods,
-                          self._RemoveNic)
-      # Verify that NIC names are unique and valid
-      utils.ValidateDeviceNames("NIC", nics)
-      self._new_nics = nics
-      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
-    else:
-      self._new_nics = None
-      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
-
-    if not self.op.ignore_ipolicy:
-      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
-                                                              group_info)
-
-      # Fill ispec with backend parameters
-      ispec[constants.ISPEC_SPINDLE_USE] = \
-        self.be_new.get(constants.BE_SPINDLE_USE, None)
-      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
-                                                         None)
-
-      # Copy ispec to verify parameters with min/max values separately
-      if self.op.disk_template:
-        new_disk_template = self.op.disk_template
-      else:
-        new_disk_template = self.instance.disk_template
-      ispec_max = ispec.copy()
-      ispec_max[constants.ISPEC_MEM_SIZE] = \
-        self.be_new.get(constants.BE_MAXMEM, None)
-      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
-                                                     new_disk_template)
-      ispec_min = ispec.copy()
-      ispec_min[constants.ISPEC_MEM_SIZE] = \
-        self.be_new.get(constants.BE_MINMEM, None)
-      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
-                                                     new_disk_template)
-
-      if (res_max or res_min):
-        # FIXME: Improve error message by including information about whether
-        # the upper or lower limit of the parameter fails the ipolicy.
-        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
-               (group_info, group_info.name,
-                utils.CommaJoin(set(res_max + res_min))))
-        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-
-  def _ConvertInstanceTemplate(self, feedback_fn):
-    """Converts the disk template of an instance.
-
-    This function converts the disk template of an instance. It supports
-    conversions among all the available disk templates except conversions
-    between the LVM-based disk templates, that use their separate code path.
-    Also, this method does not support conversions that include the 'diskless'
-    template and those targeting the 'blockdev' template.
-
-    @type feedback_fn: callable
-    @param feedback_fn: function used to send feedback back to the caller
-
-    @rtype: NoneType
-    @return: None
-    @raise errors.OpPrereqError: in case of failure
-
-    """
-    template_info = self.op.disk_template
-    if self.op.disk_template == constants.DT_EXT:
-      template_info = ":".join([self.op.disk_template,
-                                self.op.ext_params["provider"]])
-
-    feedback_fn("Converting disk template from '%s' to '%s'" %
-                (self.instance.disk_template, template_info))
-
-    assert not (self.instance.disk_template in
-                constants.DTS_NOT_CONVERTIBLE_FROM or
-                self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \
-      ("Unsupported disk template conversion from '%s' to '%s'" %
-       (self.instance.disk_template, self.op.disk_template))
-
-    pnode_uuid = self.instance.primary_node
-    snode_uuid = []
-    if self.op.remote_node_uuid:
-      snode_uuid = [self.op.remote_node_uuid]
-
-    old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-
-    feedback_fn("Generating new '%s' disk template..." % template_info)
-    new_disks = GenerateDiskTemplate(self,
-                                     self.op.disk_template,
-                                     self.instance.uuid,
-                                     pnode_uuid,
-                                     snode_uuid,
-                                     self.disks_info,
-                                     self.instance_file_storage_dir,
-                                     self.op.file_driver,
-                                     0,
-                                     feedback_fn,
-                                     self.diskparams)
-
-    # Create the new block devices for the instance.
-    feedback_fn("Creating new empty disks of type '%s'..." % template_info)
-    try:
-      CreateDisks(self, self.instance, disk_template=self.op.disk_template,
-                  disks=new_disks)
-    except errors.OpExecError:
-      self.LogWarning("Device creation failed")
-      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
-      raise
-
-    # Transfer the data from the old to the newly created disks of the instance.
-    feedback_fn("Populating the new empty disks of type '%s'..." %
-                template_info)
-    for idx, (old, new) in enumerate(zip(old_disks, new_disks)):
-      feedback_fn(" - copying data from disk %s (%s), size %s" %
-                  (idx, self.instance.disk_template,
-                   utils.FormatUnit(new.size, "h")))
-      if self.instance.disk_template == constants.DT_DRBD8:
-        old = old.children[0]
-      result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance),
-                                              (new, self.instance))
-      msg = result.fail_msg
-      if msg:
-        # A disk failed to copy. Abort the conversion operation and rollback
-        # the modifications to the previous state. The instance will remain
-        # intact.
-        if self.op.disk_template == constants.DT_DRBD8:
-          new = new.children[0]
-        self.Log(" - ERROR: Could not copy disk '%s' to '%s'" %
-                 (old.logical_id[1], new.logical_id[1]))
-        try:
-          self.LogInfo("Some disks failed to copy")
-          self.LogInfo("The instance will not be affected, aborting operation")
-          self.LogInfo("Removing newly created disks of type '%s'..." %
-                       template_info)
-          RemoveDisks(self, self.instance, disk_template=self.op.disk_template,
-                      disks=new_disks)
-          self.LogInfo("Newly created disks removed successfully")
-        finally:
-          self.cfg.ReleaseDRBDMinors(self.instance.uuid)
-          result.Raise("Error while converting the instance's template")
-
-    # In case of DRBD disk, return its port to the pool
-    if self.instance.disk_template == constants.DT_DRBD8:
-      for disk in old_disks:
-        tcp_port = disk.logical_id[2]
-        self.cfg.AddTcpUdpPort(tcp_port)
-
-    # Remove old disks from the instance.
-    feedback_fn("Detaching old disks (%s) from the instance and removing"
-                " them from cluster config" % self.instance.disk_template)
-    for old_disk in old_disks:
-      self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
-
-    # The old disk_template will be needed to remove the old block devices.
-    old_disk_template = self.instance.disk_template
-
-    # Update the disk template of the instance
-    self.cfg.SetInstanceDiskTemplate(self.instance.uuid, self.op.disk_template)
-
-    # Attach the new disks to the instance.
-    feedback_fn("Adding new disks (%s) to cluster config and attaching"
-                " them to the instance" % template_info)
-    for (idx, new_disk) in enumerate(new_disks):
-      self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
-
-    # Re-read the instance from the configuration.
-    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
-
-    # Release node locks while waiting for sync and disks removal.
-    ReleaseLocks(self, locking.LEVEL_NODE)
-
-    disk_abort = not WaitForSync(self, self.instance,
-                                 oneshot=not self.op.wait_for_sync)
-    if disk_abort:
-      raise errors.OpExecError("There are some degraded disks for"
-                               " this instance, please cleanup manually")
-
-    feedback_fn("Removing old block devices of type '%s'..." %
-                old_disk_template)
-    RemoveDisks(self, self.instance, disk_template=old_disk_template,
-                disks=old_disks)
-
-    # Node resource locks will be released by the caller.
-
-  def _ConvertPlainToDrbd(self, feedback_fn):
-    """Converts an instance from plain to drbd.
-
-    """
-    feedback_fn("Converting disk template from 'plain' to 'drbd'")
-
-    pnode_uuid = self.instance.primary_node
-    snode_uuid = self.op.remote_node_uuid
-
-    assert self.instance.disk_template == constants.DT_PLAIN
-
-    old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
-                                     self.instance.uuid, pnode_uuid,
-                                     [snode_uuid], self.disks_info,
-                                     None, None, 0,
-                                     feedback_fn, self.diskparams)
-    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
-    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
-    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
-    info = GetInstanceInfoText(self.instance)
-    feedback_fn("Creating additional volumes...")
-    # first, create the missing data and meta devices
-    for disk in anno_disks:
-      # unfortunately this is... not too nice
-      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
-                           info, True, p_excl_stor)
-      for child in disk.children:
-        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
-                             s_excl_stor)
-    # at this stage, all new LVs have been created, we can rename the
-    # old ones
-    feedback_fn("Renaming original volumes...")
-    rename_list = [(o, n.children[0].logical_id)
-                   for (o, n) in zip(old_disks, new_disks)]
-    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
-    result.Raise("Failed to rename original LVs")
-
-    feedback_fn("Initializing DRBD devices...")
-    # all child devices are in place, we can now create the DRBD devices
-    try:
-      for disk in anno_disks:
-        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
-                                       (snode_uuid, s_excl_stor)]:
-          f_create = node_uuid == pnode_uuid
-          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
-                               f_create, excl_stor)
-    except errors.GenericError, e:
-      feedback_fn("Initializing of DRBD devices failed;"
-                  " renaming back original volumes...")
-      rename_back_list = [(n.children[0], o.logical_id)
-                          for (n, o) in zip(new_disks, old_disks)]
-      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
-      result.Raise("Failed to rename LVs back after error %s" % str(e))
-      raise
-
-    # Remove the old disks from the instance
-    for old_disk in old_disks:
-      self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
-
-    # Update the disk template of the instance
-    self.cfg.SetInstanceDiskTemplate(self.instance.uuid, constants.DT_DRBD8)
-
-    # Attach the new disks to the instance
-    for (idx, new_disk) in enumerate(new_disks):
-      self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
-
-    # re-read the instance from the configuration
-    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
-
-    # Release node locks while waiting for sync
-    ReleaseLocks(self, locking.LEVEL_NODE)
-
-    # disks are created, waiting for sync
-    disk_abort = not WaitForSync(self, self.instance,
-                                 oneshot=not self.op.wait_for_sync)
-    if disk_abort:
-      raise errors.OpExecError("There are some degraded disks for"
-                               " this instance, please cleanup manually")
-
-    # Node resource locks will be released by caller
-
-  def _ConvertDrbdToPlain(self, feedback_fn):
-    """Converts an instance from drbd to plain.
-
-    """
-    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
-
-    assert self.instance.disk_template == constants.DT_DRBD8
-    assert len(secondary_nodes) == 1 or not self.instance.disks
-
-    # it will not be possible to calculate the snode_uuid later
-    snode_uuid = None
-    if secondary_nodes:
-      snode_uuid = secondary_nodes[0]
-
-    feedback_fn("Converting disk template from 'drbd' to 'plain'")
-
-    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-    old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
-    new_disks = [d.children[0] for d in disks]
-
-    # copy over size, mode and name
-    for parent, child in zip(old_disks, new_disks):
-      child.size = parent.size
-      child.mode = parent.mode
-      child.name = parent.name
-
-    # this is a DRBD disk, return its port to the pool
-    for disk in old_disks:
-      tcp_port = disk.logical_id[2]
-      self.cfg.AddTcpUdpPort(tcp_port)
-
-    # Remove the old disks from the instance
-    for old_disk in old_disks:
-      self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
-
-    # Update the disk template of the instance
-    self.cfg.SetInstanceDiskTemplate(self.instance.uuid, constants.DT_PLAIN)
-
-    # Attach the new disks to the instance
-    for (idx, new_disk) in enumerate(new_disks):
-      self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
-
-    # re-read the instance from the configuration
-    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
-
-    # Release locks in case removing disks takes a while
-    ReleaseLocks(self, locking.LEVEL_NODE)
-
-    feedback_fn("Removing volumes on the secondary node...")
-    RemoveDisks(self, self.instance, disk_template=constants.DT_DRBD8,
-                disks=old_disks, target_node_uuid=snode_uuid)
-
-    feedback_fn("Removing unneeded volumes on the primary node...")
-    meta_disks = []
-    for idx, disk in enumerate(old_disks):
-      meta_disks.append(disk.children[1])
-    RemoveDisks(self, self.instance, disk_template=constants.DT_DRBD8,
-                disks=meta_disks)
-
-  def _HotplugDevice(self, action, dev_type, device, extra, seq):
-    self.LogInfo("Trying to hotplug device...")
-    msg = "hotplug:"
-    result = self.rpc.call_hotplug_device(self.instance.primary_node,
-                                          self.instance, action, dev_type,
-                                          (device, self.instance),
-                                          extra, seq)
-    if result.fail_msg:
-      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
-      self.LogInfo("Continuing execution..")
-      msg += "failed"
-    else:
-      self.LogInfo("Hotplug done.")
-      msg += "done"
-    return msg
-
-  def _CreateNewDisk(self, idx, params, _):
-    """Creates a new disk.
-
-    """
-    # add a new disk
-    instance_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-    if self.instance.disk_template in constants.DTS_FILEBASED:
-      (file_driver, file_path) = instance_disks[0].logical_id
-      file_path = os.path.dirname(file_path)
-    else:
-      file_driver = file_path = None
-
-    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
-    disk = \
-      GenerateDiskTemplate(self, self.instance.disk_template,
-                           self.instance.uuid, self.instance.primary_node,
-                           secondary_nodes, [params], file_path,
-                           file_driver, idx, self.Log, self.diskparams)[0]
-
-    new_disks = CreateDisks(self, self.instance, disks=[disk])
-    self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)
-
-    # re-read the instance from the configuration
-    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
-
-    if self.cluster.prealloc_wipe_disks:
-      # Wipe new disk
-      WipeOrCleanupDisks(self, self.instance,
-                         disks=[(idx, disk, 0)],
-                         cleanup=new_disks)
-
-    changes = [
-      ("disk/%d" % idx,
-       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
-      ]
-    if self.op.hotplug:
-      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
-                                               (disk, self.instance),
-                                               self.instance, True, idx)
-      if result.fail_msg:
-        changes.append(("disk/%d" % idx, "assemble:failed"))
-        self.LogWarning("Can't assemble newly created disk %d: %s",
-                        idx, result.fail_msg)
-      else:
-        _, link_name, uri = result.payload
-        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
-                                  constants.HOTPLUG_TARGET_DISK,
-                                  disk, (link_name, uri), idx)
-        changes.append(("disk/%d" % idx, msg))
-
-    return (disk, changes)
-
-  def _PostAddDisk(self, _, disk):
-    if not WaitForSync(self, self.instance, disks=[disk],
-                       oneshot=not self.op.wait_for_sync):
-      raise errors.OpExecError("Failed to sync disks of %s" %
-                               self.instance.name)
-
-    # the disk is active at this point, so deactivate it if the instance disks
-    # are supposed to be inactive
-    if not self.instance.disks_active:
-      ShutdownInstanceDisks(self, self.instance, disks=[disk])
-
-  def _ModifyDisk(self, idx, disk, params, _):
-    """Modifies a disk.
-
-    """
-    changes = []
-    if constants.IDISK_MODE in params:
-      disk.mode = params.get(constants.IDISK_MODE)
-      changes.append(("disk.mode/%d" % idx, disk.mode))
-
-    if constants.IDISK_NAME in params:
-      disk.name = params.get(constants.IDISK_NAME)
-      changes.append(("disk.name/%d" % idx, disk.name))
-
-    # Modify arbitrary params in case instance template is ext
-    for key, value in params.iteritems():
-      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
-          self.instance.disk_template == constants.DT_EXT):
-        # stolen from GetUpdatedParams: default means reset/delete
-        if value.lower() == constants.VALUE_DEFAULT:
-          try:
-            del disk.params[key]
-          except KeyError:
-            pass
-        else:
-          disk.params[key] = value
-        changes.append(("disk.params:%s/%d" % (key, idx), value))
-
-    # Update disk object
-    self.cfg.Update(disk, self.feedback_fn)
-
-    return changes
-
-  def _RemoveDisk(self, idx, root, _):
-    """Removes a disk.
-
-    """
-    hotmsg = ""
-    if self.op.hotplug:
-      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
-                                   constants.HOTPLUG_TARGET_DISK,
-                                   root, None, idx)
-      ShutdownInstanceDisks(self, self.instance, [root])
-
-    RemoveDisks(self, self.instance, disks=[root])
-
-    # if this is a DRBD disk, return its port to the pool
-    if root.dev_type in constants.DTS_DRBD:
-      self.cfg.AddTcpUdpPort(root.logical_id[2])
-
-    # Remove disk from config
-    self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid)
-
-    # re-read the instance from the configuration
-    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
-
-    return hotmsg
-
-  def _CreateNewNic(self, idx, params, private):
-    """Creates data structure for a new network interface.
-
-    """
-    mac = params[constants.INIC_MAC]
-    ip = params.get(constants.INIC_IP, None)
-    net = params.get(constants.INIC_NETWORK, None)
-    name = params.get(constants.INIC_NAME, None)
-    net_uuid = self.cfg.LookupNetwork(net)
-    #TODO: not private.filled?? can a nic have no nicparams??
-    nicparams = private.filled
-    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
-                       nicparams=nicparams)
-    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
-
-    changes = [
-      ("nic.%d" % idx,
-       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
-       (mac, ip, private.filled[constants.NIC_MODE],
-       private.filled[constants.NIC_LINK], net)),
-      ]
-
-    if self.op.hotplug:
-      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
-                                constants.HOTPLUG_TARGET_NIC,
-                                nobj, None, idx)
-      changes.append(("nic.%d" % idx, msg))
-
-    return (nobj, changes)
-
-  def _ApplyNicMods(self, idx, nic, params, private):
-    """Modifies a network interface.
-
-    """
-    changes = []
-
-    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
-      if key in params:
-        changes.append(("nic.%s/%d" % (key, idx), params[key]))
-        setattr(nic, key, params[key])
-
-    new_net = params.get(constants.INIC_NETWORK, nic.network)
-    new_net_uuid = self.cfg.LookupNetwork(new_net)
-    if new_net_uuid != nic.network:
-      changes.append(("nic.network/%d" % idx, new_net))
-      nic.network = new_net_uuid
-
-    if private.filled:
-      nic.nicparams = private.filled
-
-      for (key, val) in nic.nicparams.items():
-        changes.append(("nic.%s/%d" % (key, idx), val))
-
-    if self.op.hotplug:
-      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
-                                constants.HOTPLUG_TARGET_NIC,
-                                nic, None, idx)
-      changes.append(("nic/%d" % idx, msg))
-
-    return changes
-
-  def _RemoveNic(self, idx, nic, _):
-    if self.op.hotplug:
-      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
-                                 constants.HOTPLUG_TARGET_NIC,
-                                 nic, None, idx)
-
-  def Exec(self, feedback_fn):
-    """Modifies an instance.
-
-    All parameters take effect only at the next restart of the instance.
-
-    """
-    self.feedback_fn = feedback_fn
-    # Process here the warnings from CheckPrereq, as we don't have a
-    # feedback_fn there.
-    # TODO: Replace with self.LogWarning
-    for warn in self.warn:
-      feedback_fn("WARNING: %s" % warn)
-
-    assert ((self.op.disk_template is None) ^
-            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
-      "Not owning any node resource locks"
-
-    result = []
-
-    # New primary node
-    if self.op.pnode_uuid:
-      self.instance.primary_node = self.op.pnode_uuid
-
-    # runtime memory
-    if self.op.runtime_mem:
-      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
-                                                     self.instance,
-                                                     self.op.runtime_mem)
-      rpcres.Raise("Cannot modify instance runtime memory")
-      result.append(("runtime_memory", self.op.runtime_mem))
-
-    # Apply disk changes
-    inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-    _ApplyContainerMods("disk", inst_disks, result, self.diskmod,
-                        self._CreateNewDisk, self._ModifyDisk,
-                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
-
-    if self.op.disk_template:
-      if __debug__:
-        check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid))
-        if self.op.remote_node_uuid:
-          check_nodes.add(self.op.remote_node_uuid)
-        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
-          owned = self.owned_locks(level)
-          assert not (check_nodes - owned), \
-            ("Not owning the correct locks, owning %r, expected at least %r" %
-             (owned, check_nodes))
-
-      r_shut = ShutdownInstanceDisks(self, self.instance)
-      if not r_shut:
-        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
-                                 " proceed with disk template conversion")
-      mode = (self.instance.disk_template, self.op.disk_template)
-      try:
-        if mode in self._DISK_CONVERSIONS:
-          self._DISK_CONVERSIONS[mode](self, feedback_fn)
-        else:
-          self._ConvertInstanceTemplate(feedback_fn)
-      except:
-        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
-        raise
-      result.append(("disk_template", self.op.disk_template))
-
-      assert self.instance.disk_template == self.op.disk_template, \
-        ("Expected disk template '%s', found '%s'" %
-         (self.op.disk_template, self.instance.disk_template))
-
-    # Release node and resource locks if there are any (they might already have
-    # been released during disk conversion)
-    ReleaseLocks(self, locking.LEVEL_NODE)
-    ReleaseLocks(self, locking.LEVEL_NODE_RES)
-
-    # Apply NIC changes
-    if self._new_nics is not None:
-      self.instance.nics = self._new_nics
-      result.extend(self._nic_chgdesc)
-
-    # hvparams changes
-    if self.op.hvparams:
-      self.instance.hvparams = self.hv_inst
-      for key, val in self.op.hvparams.iteritems():
-        result.append(("hv/%s" % key, val))
-
-    # beparams changes
-    if self.op.beparams:
-      self.instance.beparams = self.be_inst
-      for key, val in self.op.beparams.iteritems():
-        result.append(("be/%s" % key, val))
-
-    # OS change
-    if self.op.os_name:
-      self.instance.os = self.op.os_name
-
-    # osparams changes
-    if self.op.osparams:
-      self.instance.osparams = self.os_inst
-      for key, val in self.op.osparams.iteritems():
-        result.append(("os/%s" % key, val))
-
-    if self.op.osparams_private:
-      self.instance.osparams_private = self.os_inst_private
-      for key, val in self.op.osparams_private.iteritems():
-        # Show the Private(...) blurb.
-        result.append(("os_private/%s" % key, repr(val)))
-
-    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
-
-    if self.op.offline is None:
-      # Ignore
-      pass
-    elif self.op.offline:
-      # Mark instance as offline
-      self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid)
-      result.append(("admin_state", constants.ADMINST_OFFLINE))
-    else:
-      # Mark instance as online, but stopped
-      self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)
-      result.append(("admin_state", constants.ADMINST_DOWN))
-
-    UpdateMetadata(feedback_fn, self.rpc, self.instance)
-
-    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
-                self.owned_locks(locking.LEVEL_NODE)), \
-      "All node locks should have been released by now"
-
-    return result
-
-  _DISK_CONVERSIONS = {
-    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
-    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
-    }
-
-
 class LUInstanceChangeGroup(LogicalUnit):
   HPATH = "instance-change-group"
   HTYPE = constants.HTYPE_INSTANCE
@@ -4321,7 +710,6 @@
     self.needed_locks = {
       locking.LEVEL_NODEGROUP: [],
       locking.LEVEL_NODE: [],
-      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
       }
 
     self._ExpandAndLockInstance()
diff --git a/lib/cmdlib/instance_create.py b/lib/cmdlib/instance_create.py
new file mode 100644
index 0000000..cb2a6da
--- /dev/null
+++ b/lib/cmdlib/instance_create.py
@@ -0,0 +1,1652 @@
+#
+#
+
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Logical unit for creating a single instance."""
+
+import OpenSSL
+import logging
+import os
+
+
+from ganeti import compat
+from ganeti import constants
+from ganeti import errors
+from ganeti import hypervisor
+from ganeti import locking
+from ganeti.masterd import iallocator
+from ganeti import masterd
+from ganeti import netutils
+from ganeti import objects
+from ganeti import pathutils
+from ganeti import utils
+from ganeti.utils import retry
+from ganeti import serializer
+
+from ganeti.cmdlib.base import LogicalUnit
+
+from ganeti.cmdlib.common import \
+  CheckNodeOnline, \
+  CheckParamsNotGlobal, \
+  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
+  ExpandNodeUuidAndName, \
+  IsValidDiskAccessModeCombination, \
+  CheckDiskTemplateEnabled, CheckIAllocatorOrNode, CheckOSImage, \
+  IsInstanceRunning, DetermineImageSize
+from ganeti.cmdlib.instance_storage import CalculateFileStorageDir, \
+  CheckNodesFreeDiskPerVG, CheckRADOSFreeSpace, CheckSpindlesExclusiveStorage, \
+  ComputeDiskSizePerVG, CreateDisks, \
+  GenerateDiskTemplate, CommitDisks, StartInstanceDisks, \
+  WaitForSync, ComputeDisks, \
+  TemporaryDisk, ImageDisks, WipeDisks
+from ganeti.cmdlib.instance_utils import \
+  CheckNodeNotDrained, CopyLockList, \
+  ReleaseLocks, CheckNodeVmCapable, \
+  RemoveDisks, CheckNodeFreeMemory, \
+  UpdateMetadata, CheckForConflictingIp, \
+  ComputeInstanceCommunicationNIC, \
+  ComputeIPolicyInstanceSpecViolation, \
+  CheckHostnameSane, CheckOpportunisticLocking, \
+  ComputeFullBeParams, ComputeNics, GetClusterDomainSecret, \
+  CheckInstanceExistence, CreateInstanceAllocRequest, BuildInstanceHookEnv, \
+  NICListToTuple, CheckNicsBridgesExist, CheckCompressionTool
+import ganeti.masterd.instance
+
+
+class LUInstanceCreate(LogicalUnit):
+  """Create an instance.
+
+  """
+  HPATH = "instance-add"
+  HTYPE = constants.HTYPE_INSTANCE
+  REQ_BGL = False
+
+  def _CheckDiskTemplateValid(self):
+    """Checks validity of disk template.
+
+    """
+    cluster = self.cfg.GetClusterInfo()
+    if self.op.disk_template is None:
+      # FIXME: It would be better to take the default disk template from the
+      # ipolicy, but for the ipolicy we need the primary node, which we get from
+      # the iallocator, which wants the disk template as input. To solve this
+      # chicken-and-egg problem, it should be possible to specify just a node
+      # group from the iallocator and take the ipolicy from that.
+      self.op.disk_template = cluster.enabled_disk_templates[0]
+    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
+
+  def _CheckDiskArguments(self):
+    """Checks validity of disk-related arguments.
+
+    """
+    # check that disk's names are unique and valid
+    utils.ValidateDeviceNames("disk", self.op.disks)
+
+    self._CheckDiskTemplateValid()
+
+    # check disks. parameter names and consistent adopt/no-adopt strategy
+    has_adopt = has_no_adopt = False
+    for disk in self.op.disks:
+      if self.op.disk_template != constants.DT_EXT:
+        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
+      if constants.IDISK_ADOPT in disk:
+        has_adopt = True
+      else:
+        has_no_adopt = True
+    if has_adopt and has_no_adopt:
+      raise errors.OpPrereqError("Either all disks are adopted or none is",
+                                 errors.ECODE_INVAL)
+    if has_adopt:
+      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
+        raise errors.OpPrereqError("Disk adoption is not supported for the"
+                                   " '%s' disk template" %
+                                   self.op.disk_template,
+                                   errors.ECODE_INVAL)
+      if self.op.iallocator is not None:
+        raise errors.OpPrereqError("Disk adoption not allowed with an"
+                                   " iallocator script", errors.ECODE_INVAL)
+      if self.op.mode == constants.INSTANCE_IMPORT:
+        raise errors.OpPrereqError("Disk adoption not allowed for"
+                                   " instance import", errors.ECODE_INVAL)
+    else:
+      if self.op.disk_template in constants.DTS_MUST_ADOPT:
+        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
+                                   " but no 'adopt' parameter given" %
+                                   self.op.disk_template,
+                                   errors.ECODE_INVAL)
+
+    self.adopt_disks = has_adopt
+
+  def _CheckVLANArguments(self):
+    """ Check validity of VLANs if given
+
+    """
+    for nic in self.op.nics:
+      vlan = nic.get(constants.INIC_VLAN, None)
+      if vlan:
+        if vlan[0] == ".":
+          # vlan starting with dot means single untagged vlan,
+          # might be followed by trunk (:)
+          if not vlan[1:].isdigit():
+            vlanlist = vlan[1:].split(':')
+            for vl in vlanlist:
+              if not vl.isdigit():
+                raise errors.OpPrereqError("Specified VLAN parameter is "
+                                           "invalid : %s" % vlan,
+                                             errors.ECODE_INVAL)
+        elif vlan[0] == ":":
+          # Trunk - tagged only
+          vlanlist = vlan[1:].split(':')
+          for vl in vlanlist:
+            if not vl.isdigit():
+              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
+                                           " : %s" % vlan, errors.ECODE_INVAL)
+        elif vlan.isdigit():
+          # This is the simplest case. No dots, only single digit
+          # -> Create untagged access port, dot needs to be added
+          nic[constants.INIC_VLAN] = "." + vlan
+        else:
+          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
+                                       " : %s" % vlan, errors.ECODE_INVAL)
+
+  def CheckArguments(self):
+    """Check arguments.
+
+    """
+    if self.op.forthcoming and self.op.commit:
+      raise errors.OpPrereqError("Forthcoming generation and commiting are"
+                                 " mutually exclusive", errors.ECODE_INVAL)
+
+    # do not require name_check to ease forward/backward compatibility
+    # for tools
+    if self.op.no_install and self.op.start:
+      self.LogInfo("No-installation mode selected, disabling startup")
+      self.op.start = False
+    # validate/normalize the instance name
+    self.op.instance_name = \
+      netutils.Hostname.GetNormalizedName(self.op.instance_name)
+
+    if self.op.ip_check and not self.op.name_check:
+      # TODO: make the ip check more flexible and not depend on the name check
+      raise errors.OpPrereqError("Cannot do IP address check without a name"
+                                 " check", errors.ECODE_INVAL)
+
+    # instance name verification
+    if self.op.name_check:
+      self.hostname = CheckHostnameSane(self, self.op.instance_name)
+      self.op.instance_name = self.hostname.name
+      # used in CheckPrereq for ip ping check
+      self.check_ip = self.hostname.ip
+    else:
+      self.check_ip = None
+
+    # add NIC for instance communication
+    if self.op.instance_communication:
+      nic_name = ComputeInstanceCommunicationNIC(self.op.instance_name)
+
+      for nic in self.op.nics:
+        if nic.get(constants.INIC_NAME, None) == nic_name:
+          break
+      else:
+        self.op.nics.append({constants.INIC_NAME: nic_name,
+                             constants.INIC_MAC: constants.VALUE_GENERATE,
+                             constants.INIC_IP: constants.NIC_IP_POOL,
+                             constants.INIC_NETWORK:
+                               self.cfg.GetInstanceCommunicationNetwork()})
+
+    # timeouts for unsafe OS installs
+    if self.op.helper_startup_timeout is None:
+      self.op.helper_startup_timeout = constants.HELPER_VM_STARTUP
+
+    if self.op.helper_shutdown_timeout is None:
+      self.op.helper_shutdown_timeout = constants.HELPER_VM_SHUTDOWN
+
+    # check nics' parameter names
+    for nic in self.op.nics:
+      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
+    # check that NIC's parameters names are unique and valid
+    utils.ValidateDeviceNames("NIC", self.op.nics)
+
+    self._CheckVLANArguments()
+
+    self._CheckDiskArguments()
+    assert self.op.disk_template is not None
+
+    # file storage checks
+    if (self.op.file_driver and
+        not self.op.file_driver in constants.FILE_DRIVER):
+      raise errors.OpPrereqError("Invalid file driver name '%s'" %
+                                 self.op.file_driver, errors.ECODE_INVAL)
+
+    # set default file_driver if unset and required
+    if (not self.op.file_driver and
+        self.op.disk_template in constants.DTS_FILEBASED):
+      self.op.file_driver = constants.FD_DEFAULT
+
+    ### Node/iallocator related checks
+    CheckIAllocatorOrNode(self, "iallocator", "pnode")
+
+    if self.op.pnode is not None:
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        if self.op.snode is None:
+          raise errors.OpPrereqError("The networked disk templates need"
+                                     " a mirror node", errors.ECODE_INVAL)
+      elif self.op.snode:
+        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
+                        " template")
+        self.op.snode = None
+
+    CheckOpportunisticLocking(self.op)
+
+    if self.op.mode == constants.INSTANCE_IMPORT:
+      # On import force_variant must be True, because if we forced it at
+      # initial install, our only chance when importing it back is that it
+      # works again!
+      self.op.force_variant = True
+
+      if self.op.no_install:
+        self.LogInfo("No-installation mode has no effect during import")
+
+      if objects.GetOSImage(self.op.osparams):
+        self.LogInfo("OS image has no effect during import")
+    elif self.op.mode == constants.INSTANCE_CREATE:
+      os_image = CheckOSImage(self.op)
+
+      if self.op.os_type is None and os_image is None:
+        raise errors.OpPrereqError("No guest OS or OS image specified",
+                                   errors.ECODE_INVAL)
+
+      if self.op.os_type is not None \
+            and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
+        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
+                                   " installation" % self.op.os_type,
+                                   errors.ECODE_STATE)
+    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
+      if objects.GetOSImage(self.op.osparams):
+        self.LogInfo("OS image has no effect during import")
+
+      self._cds = GetClusterDomainSecret()
+
+      # Check handshake to ensure both clusters have the same domain secret
+      src_handshake = self.op.source_handshake
+      if not src_handshake:
+        raise errors.OpPrereqError("Missing source handshake",
+                                   errors.ECODE_INVAL)
+
+      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
+                                                           src_handshake)
+      if errmsg:
+        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
+                                   errors.ECODE_INVAL)
+
+      # Load and check source CA
+      self.source_x509_ca_pem = self.op.source_x509_ca
+      if not self.source_x509_ca_pem:
+        raise errors.OpPrereqError("Missing source X509 CA",
+                                   errors.ECODE_INVAL)
+
+      try:
+        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
+                                                    self._cds)
+      except OpenSSL.crypto.Error, err:
+        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
+                                   (err, ), errors.ECODE_INVAL)
+
+      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
+      if errcode is not None:
+        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
+                                   errors.ECODE_INVAL)
+
+      self.source_x509_ca = cert
+
+      src_instance_name = self.op.source_instance_name
+      if not src_instance_name:
+        raise errors.OpPrereqError("Missing source instance name",
+                                   errors.ECODE_INVAL)
+
+      self.source_instance_name = \
+        netutils.GetHostname(name=src_instance_name).name
+
+    else:
+      raise errors.OpPrereqError("Invalid instance creation mode %r" %
+                                 self.op.mode, errors.ECODE_INVAL)
+
+  def ExpandNames(self):
+    """ExpandNames for CreateInstance.
+
+    Figure out the right locks for instance creation.
+
+    """
+    self.needed_locks = {}
+
+    if self.op.commit:
+      (uuid, name) = self.cfg.ExpandInstanceName(self.op.instance_name)
+      if name is None:
+        raise errors.OpPrereqError("Instance %s unknown" %
+                                   self.op.instance_name,
+                                   errors.ECODE_INVAL)
+      self.op.instance_name = name
+      if not self.cfg.GetInstanceInfo(uuid).forthcoming:
+        raise errors.OpPrereqError("Instance %s (with uuid %s) not forthcoming"
+                                   " but --commit was passed." % (name, uuid),
+                                   errors.ECODE_STATE)
+      logging.debug("Verified that instance %s with uuid %s is forthcoming",
+                    name, uuid)
+    else:
+      # this is just a preventive check, but someone might still add this
+      # instance in the meantime; we check again in CheckPrereq
+      CheckInstanceExistence(self, self.op.instance_name)
+
+    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
+
+    if self.op.commit:
+      (uuid, _) = self.cfg.ExpandInstanceName(self.op.instance_name)
+      self.needed_locks[locking.LEVEL_NODE] = self.cfg.GetInstanceNodes(uuid)
+      logging.debug("Forthcoming instance %s resides on %s", uuid,
+                    self.needed_locks[locking.LEVEL_NODE])
+    elif self.op.iallocator:
+      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
+      # specifying a group on instance creation and then selecting nodes from
+      # that group
+      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+      if self.op.opportunistic_locking:
+        self.opportunistic_locks[locking.LEVEL_NODE] = True
+        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
+        if self.op.disk_template == constants.DT_DRBD8:
+          self.opportunistic_locks_count[locking.LEVEL_NODE] = 2
+          self.opportunistic_locks_count[locking.LEVEL_NODE_RES] = 2
+    else:
+      (self.op.pnode_uuid, self.op.pnode) = \
+        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
+      nodelist = [self.op.pnode_uuid]
+      if self.op.snode is not None:
+        (self.op.snode_uuid, self.op.snode) = \
+          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
+        nodelist.append(self.op.snode_uuid)
+      self.needed_locks[locking.LEVEL_NODE] = nodelist
+
+    # in case of import lock the source node too
+    if self.op.mode == constants.INSTANCE_IMPORT:
+      src_node = self.op.src_node
+      src_path = self.op.src_path
+
+      if src_path is None:
+        self.op.src_path = src_path = self.op.instance_name
+
+      if src_node is None:
+        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+        self.op.src_node = None
+        if os.path.isabs(src_path):
+          raise errors.OpPrereqError("Importing an instance from a path"
+                                     " requires a source node option",
+                                     errors.ECODE_INVAL)
+      else:
+        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
+          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
+        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
+          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
+        if not os.path.isabs(src_path):
+          self.op.src_path = \
+            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
+
+    self.needed_locks[locking.LEVEL_NODE_RES] = \
+      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+
+    # Optimistically acquire shared group locks (we're reading the
+    # configuration).  We can't just call GetInstanceNodeGroups, because the
+    # instance doesn't exist yet. Therefore we lock all node groups of all
+    # nodes we have.
+    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
+      # In the case we lock all nodes for opportunistic allocation, we have no
+      # choice than to lock all groups, because they're allocated before nodes.
+      # This is sad, but true. At least we release all those we don't need in
+      # CheckPrereq later.
+      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
+    else:
+      self.needed_locks[locking.LEVEL_NODEGROUP] = \
+        list(self.cfg.GetNodeGroupsFromNodes(
+          self.needed_locks[locking.LEVEL_NODE]))
+    self.share_locks[locking.LEVEL_NODEGROUP] = 1
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODE_RES:
+      if self.op.opportunistic_locking:
+        self.needed_locks[locking.LEVEL_NODE_RES] = \
+          CopyLockList(list(self.owned_locks(locking.LEVEL_NODE)))
+
+  def _RunAllocator(self):
+    """Run the allocator based on input opcode.
+
+    """
+    if self.op.opportunistic_locking:
+      # Only consider nodes for which a lock is held
+      node_name_whitelist = self.cfg.GetNodeNames(
+        set(self.owned_locks(locking.LEVEL_NODE)) &
+        set(self.owned_locks(locking.LEVEL_NODE_RES)))
+    else:
+      node_name_whitelist = None
+
+    req = CreateInstanceAllocRequest(self.op, self.disks,
+                                     self.nics, self.be_full,
+                                     node_name_whitelist)
+    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
+
+    ial.Run(self.op.iallocator)
+
+    if not ial.success:
+      # When opportunistic locks are used only a temporary failure is generated
+      if self.op.opportunistic_locking:
+        ecode = errors.ECODE_TEMP_NORES
+        self.LogInfo("IAllocator '%s' failed on opportunistically acquired"
+                     " nodes: %s", self.op.iallocator, ial.info)
+      else:
+        ecode = errors.ECODE_NORES
+
+      raise errors.OpPrereqError("Can't compute nodes using"
+                                 " iallocator '%s': %s" %
+                                 (self.op.iallocator, ial.info),
+                                 ecode)
+
+    (self.op.pnode_uuid, self.op.pnode) = \
+      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
+    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
+                 self.op.instance_name, self.op.iallocator,
+                 utils.CommaJoin(ial.result))
+
+    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
+
+    if req.RequiredNodes() == 2:
+      (self.op.snode_uuid, self.op.snode) = \
+        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = {
+      "ADD_MODE": self.op.mode,
+      }
+    if self.op.mode == constants.INSTANCE_IMPORT:
+      env["SRC_NODE"] = self.op.src_node
+      env["SRC_PATH"] = self.op.src_path
+      env["SRC_IMAGES"] = self.src_images
+
+    env.update(BuildInstanceHookEnv(
+      name=self.op.instance_name,
+      primary_node_name=self.op.pnode,
+      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
+      status=self.op.start,
+      os_type=self.op.os_type,
+      minmem=self.be_full[constants.BE_MINMEM],
+      maxmem=self.be_full[constants.BE_MAXMEM],
+      vcpus=self.be_full[constants.BE_VCPUS],
+      nics=NICListToTuple(self, self.nics),
+      disk_template=self.op.disk_template,
+      # Note that self.disks here is not a list with objects.Disk
+      # but with dicts as returned by ComputeDisks.
+      disks=self.disks,
+      bep=self.be_full,
+      hvp=self.hv_full,
+      hypervisor_name=self.op.hypervisor,
+      tags=self.op.tags,
+      ))
+
+    return env
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
+    return nl, nl
+
+  def _ReadExportInfo(self):
+    """Reads the export information from disk.
+
+    It will override the opcode source node and path with the actual
+    information, if these two were not specified before.
+
+    @return: the export information
+
+    """
+    assert self.op.mode == constants.INSTANCE_IMPORT
+
+    if self.op.src_node_uuid is None:
+      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
+      exp_list = self.rpc.call_export_list(locked_nodes)
+      found = False
+      for node_uuid in exp_list:
+        if exp_list[node_uuid].fail_msg:
+          continue
+        if self.op.src_path in exp_list[node_uuid].payload:
+          found = True
+          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
+          self.op.src_node_uuid = node_uuid
+          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
+                                            self.op.src_path)
+          break
+      if not found:
+        raise errors.OpPrereqError("No export found for relative path %s" %
+                                   self.op.src_path, errors.ECODE_INVAL)
+
+    CheckNodeOnline(self, self.op.src_node_uuid)
+    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
+    result.Raise("No export or invalid export found in dir %s" %
+                 self.op.src_path)
+
+    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
+    if not export_info.has_section(constants.INISECT_EXP):
+      raise errors.ProgrammerError("Corrupted export config",
+                                   errors.ECODE_ENVIRON)
+
+    ei_version = export_info.get(constants.INISECT_EXP, "version")
+    if int(ei_version) != constants.EXPORT_VERSION:
+      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
+                                 (ei_version, constants.EXPORT_VERSION),
+                                 errors.ECODE_ENVIRON)
+    return export_info
+
+  def _ReadExportParams(self, einfo):
+    """Use export parameters as defaults.
+
+    In case the opcode doesn't specify (as in override) some instance
+    parameters, then try to use them from the export information, if
+    that declares them.
+
+    """
+    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
+
+    if not self.op.disks:
+      disks = []
+      # TODO: import the disk iv_name too
+      for idx in range(constants.MAX_DISKS):
+        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
+          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
+          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
+          disk = {
+            constants.IDISK_SIZE: disk_sz,
+            constants.IDISK_NAME: disk_name
+            }
+          disks.append(disk)
+      self.op.disks = disks
+      if not disks and self.op.disk_template != constants.DT_DISKLESS:
+        raise errors.OpPrereqError("No disk info specified and the export"
+                                   " is missing the disk information",
+                                   errors.ECODE_INVAL)
+
+    if not self.op.nics:
+      nics = []
+      for idx in range(constants.MAX_NICS):
+        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
+          ndict = {}
+          for name in [constants.INIC_IP,
+                       constants.INIC_MAC, constants.INIC_NAME]:
+            nic_param_name = "nic%d_%s" % (idx, name)
+            if einfo.has_option(constants.INISECT_INS, nic_param_name):
+              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
+              ndict[name] = v
+          network = einfo.get(constants.INISECT_INS,
+                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
+          # in case network is given link and mode are inherited
+          # from nodegroup's netparams and thus should not be passed here
+          if network:
+            ndict[constants.INIC_NETWORK] = network
+          else:
+            for name in list(constants.NICS_PARAMETERS):
+              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
+              ndict[name] = v
+          nics.append(ndict)
+        else:
+          break
+      self.op.nics = nics
+
+    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
+      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
+
+    if (self.op.hypervisor is None and
+        einfo.has_option(constants.INISECT_INS, "hypervisor")):
+      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
+
+    if einfo.has_section(constants.INISECT_HYP):
+      # use the export parameters but do not override the ones
+      # specified by the user
+      for name, value in einfo.items(constants.INISECT_HYP):
+        if name not in self.op.hvparams:
+          self.op.hvparams[name] = value
+
+    if einfo.has_section(constants.INISECT_BEP):
+      # use the parameters, without overriding
+      for name, value in einfo.items(constants.INISECT_BEP):
+        if name not in self.op.beparams:
+          self.op.beparams[name] = value
+        # Compatibility for the old "memory" be param
+        if name == constants.BE_MEMORY:
+          if constants.BE_MAXMEM not in self.op.beparams:
+            self.op.beparams[constants.BE_MAXMEM] = value
+          if constants.BE_MINMEM not in self.op.beparams:
+            self.op.beparams[constants.BE_MINMEM] = value
+    else:
+      # try to read the parameters old style, from the main section
+      for name in constants.BES_PARAMETERS:
+        if (name not in self.op.beparams and
+            einfo.has_option(constants.INISECT_INS, name)):
+          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
+
+    if einfo.has_section(constants.INISECT_OSP):
+      # use the parameters, without overriding
+      for name, value in einfo.items(constants.INISECT_OSP):
+        if name not in self.op.osparams:
+          self.op.osparams[name] = value
+
+    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
+      # use the parameters, without overriding
+      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
+        if name not in self.op.osparams_private:
+          self.op.osparams_private[name] = serializer.Private(value, descr=name)
+
+  def _RevertToDefaults(self, cluster):
+    """Revert the instance parameters to the default values.
+
+    """
+    # hvparams
+    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
+    for name in self.op.hvparams.keys():
+      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
+        del self.op.hvparams[name]
+    # beparams
+    be_defs = cluster.SimpleFillBE({})
+    for name in self.op.beparams.keys():
+      if name in be_defs and be_defs[name] == self.op.beparams[name]:
+        del self.op.beparams[name]
+    # nic params
+    nic_defs = cluster.SimpleFillNIC({})
+    for nic in self.op.nics:
+      for name in constants.NICS_PARAMETERS:
+        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
+          del nic[name]
+    # osparams
+    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
+    for name in self.op.osparams.keys():
+      if name in os_defs and os_defs[name] == self.op.osparams[name]:
+        del self.op.osparams[name]
+
+    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
+                                    os_params_private={})
+    for name in self.op.osparams_private.keys():
+      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
+        del self.op.osparams_private[name]
+
+  def _GetNodesFromForthcomingInstance(self):
+    """Set nodes as in the forthcoming instance
+
+    """
+    (uuid, name) = self.cfg.ExpandInstanceName(self.op.instance_name)
+    inst = self.cfg.GetInstanceInfo(uuid)
+    self.op.pnode_uuid = inst.primary_node
+    self.op.pnode = self.cfg.GetNodeName(inst.primary_node)
+    sec_nodes = self.cfg.GetInstanceSecondaryNodes(uuid)
+    node_names = [self.op.pnode]
+    if sec_nodes:
+      self.op.snode_uuid = sec_nodes[0]
+      self.op.snode = self.cfg.GetNodeName(sec_nodes[0])
+      node_names.append(self.op.snode)
+    self.LogInfo("Nodes of instance %s: %s", name, node_names)
+
+  def CheckPrereq(self): # pylint: disable=R0914
+    """Check prerequisites.
+
+    """
+    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+
+    if self.op.commit:
+      # Check that the instance is still on the cluster, forthcoming, and
+      # still resides on the nodes we acquired.
+      (uuid, name) = self.cfg.ExpandInstanceName(self.op.instance_name)
+      if uuid is None:
+        raise errors.OpPrereqError("Instance %s disappeared from the cluster"
+                                   " while waiting for locks"
+                                   % (self.op.instance_name,),
+                                   errors.ECODE_STATE)
+      if not self.cfg.GetInstanceInfo(uuid).forthcoming:
+        raise errors.OpPrereqError("Instance %s (with uuid %s) is no longer"
+                                   " forthcoming" % (name, uuid),
+                                   errors.ECODE_STATE)
+      required_nodes = self.cfg.GetInstanceNodes(uuid)
+      if not owned_nodes.issuperset(required_nodes):
+        raise errors.OpPrereqError("Forthcoming instance %s nodes changed"
+                                   " since locks were acquired; retry the"
+                                   " operation" % self.op.instance_name,
+                                   errors.ECODE_STATE)
+    else:
+      CheckInstanceExistence(self, self.op.instance_name)
+
+    # Check that the optimistically acquired groups are correct wrt the
+    # acquired nodes
+    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
+    if not owned_groups.issuperset(cur_groups):
+      raise errors.OpPrereqError("New instance %s's node groups changed since"
+                                 " locks were acquired, current groups are"
+                                 " are '%s', owning groups '%s'; retry the"
+                                 " operation" %
+                                 (self.op.instance_name,
+                                  utils.CommaJoin(cur_groups),
+                                  utils.CommaJoin(owned_groups)),
+                                 errors.ECODE_STATE)
+
+    self.instance_file_storage_dir = CalculateFileStorageDir(
+        self.op.disk_template, self.cfg, self.op.instance_name,
+        self.op.file_storage_dir)
+
+    if self.op.mode == constants.INSTANCE_IMPORT:
+      export_info = self._ReadExportInfo()
+      self._ReadExportParams(export_info)
+      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
+    else:
+      self._old_instance_name = None
+
+    if (not self.cfg.GetVGName() and
+        self.op.disk_template not in constants.DTS_NOT_LVM):
+      raise errors.OpPrereqError("Cluster does not support lvm-based"
+                                 " instances", errors.ECODE_STATE)
+
+    if (self.op.hypervisor is None or
+        self.op.hypervisor == constants.VALUE_AUTO):
+      self.op.hypervisor = self.cfg.GetHypervisorType()
+
+    cluster = self.cfg.GetClusterInfo()
+    enabled_hvs = cluster.enabled_hypervisors
+    if self.op.hypervisor not in enabled_hvs:
+      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
+                                 " cluster (%s)" %
+                                 (self.op.hypervisor, ",".join(enabled_hvs)),
+                                 errors.ECODE_STATE)
+
+    # Check tag validity
+    for tag in self.op.tags:
+      objects.TaggableObject.ValidateTag(tag)
+
+    # check hypervisor parameter syntax (locally)
+    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
+    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
+                                      self.op.hvparams)
+    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
+    hv_type.CheckParameterSyntax(filled_hvp)
+    self.hv_full = filled_hvp
+    # check that we don't specify global parameters on an instance
+    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
+                         "instance", "cluster")
+
+    # fill and remember the beparams dict
+    self.be_full = ComputeFullBeParams(self.op, cluster)
+
+    # build os parameters
+    if self.op.osparams_private is None:
+      self.op.osparams_private = serializer.PrivateDict()
+    if self.op.osparams_secret is None:
+      self.op.osparams_secret = serializer.PrivateDict()
+
+    self.os_full = cluster.SimpleFillOS(
+      self.op.os_type,
+      self.op.osparams,
+      os_params_private=self.op.osparams_private,
+      os_params_secret=self.op.osparams_secret
+    )
+
+    # now that hvp/bep are in final format, let's reset to defaults,
+    # if told to do so
+    if self.op.identify_defaults:
+      self._RevertToDefaults(cluster)
+
+    # NIC buildup
+    self.nics = ComputeNics(self.op, cluster, self.check_ip, self.cfg,
+                            self.proc.GetECId())
+
+    # disk checks/pre-build
+    default_vg = self.cfg.GetVGName()
+    self.disks = ComputeDisks(self.op.disks, self.op.disk_template, default_vg)
+
+    if self.op.mode == constants.INSTANCE_IMPORT:
+      disk_images = []
+      for idx in range(len(self.disks)):
+        option = "disk%d_dump" % idx
+        if export_info.has_option(constants.INISECT_INS, option):
+          # FIXME: are the old os-es, disk sizes, etc. useful?
+          export_name = export_info.get(constants.INISECT_INS, option)
+          image = utils.PathJoin(self.op.src_path, export_name)
+          disk_images.append(image)
+        else:
+          disk_images.append(False)
+
+      self.src_images = disk_images
+
+      if self.op.instance_name == self._old_instance_name:
+        for idx, nic in enumerate(self.nics):
+          if nic.mac == constants.VALUE_AUTO:
+            nic_mac_ini = "nic%d_mac" % idx
+            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
+
+    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
+
+    # ip ping checks (we use the same ip that was resolved in ExpandNames)
+    if self.op.ip_check:
+      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
+        raise errors.OpPrereqError("IP %s of instance %s already in use" %
+                                   (self.check_ip, self.op.instance_name),
+                                   errors.ECODE_NOTUNIQUE)
+
+    #### mac address generation
+    # By generating here the mac address both the allocator and the hooks get
+    # the real final mac address rather than the 'auto' or 'generate' value.
+    # There is a race condition between the generation and the instance object
+    # creation, which means that we know the mac is valid now, but we're not
+    # sure it will be when we actually add the instance. If things go bad
+    # adding the instance will abort because of a duplicate mac, and the
+    # creation job will fail.
+    for nic in self.nics:
+      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
+
+    #### allocator run
+
+    if self.op.iallocator is not None:
+      if self.op.commit:
+        self._GetNodesFromForthcomingInstance()
+      else:
+        self._RunAllocator()
+
+    # Release all unneeded node locks
+    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
+                               self.op.src_node_uuid])
+    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
+    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
+    # Release all unneeded group locks
+    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
+                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
+
+    assert (self.owned_locks(locking.LEVEL_NODE) ==
+            self.owned_locks(locking.LEVEL_NODE_RES)), \
+      "Node locks differ from node resource locks"
+
+    #### node related checks
+
+    # check primary node
+    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
+    assert self.pnode is not None, \
+      "Cannot retrieve locked node %s" % self.op.pnode_uuid
+    if pnode.offline:
+      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
+                                 pnode.name, errors.ECODE_STATE)
+    if pnode.drained:
+      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
+                                 pnode.name, errors.ECODE_STATE)
+    if not pnode.vm_capable:
+      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
+                                 " '%s'" % pnode.name, errors.ECODE_STATE)
+
+    self.secondaries = []
+
+    # Fill in any IPs from IP pools. This must happen here, because we need to
+    # know the nic's primary node, as specified by the iallocator
+    for idx, nic in enumerate(self.nics):
+      net_uuid = nic.network
+      if net_uuid is not None:
+        nobj = self.cfg.GetNetwork(net_uuid)
+        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
+        if netparams is None:
+          raise errors.OpPrereqError("No netparams found for network"
+                                     " %s. Probably not connected to"
+                                     " node's %s nodegroup" %
+                                     (nobj.name, self.pnode.name),
+                                     errors.ECODE_INVAL)
+        self.LogInfo("NIC/%d inherits netparams %s" %
+                     (idx, netparams.values()))
+        nic.nicparams = dict(netparams)
+        if nic.ip is not None:
+          if nic.ip.lower() == constants.NIC_IP_POOL:
+            try:
+              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
+            except errors.ReservationError:
+              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
+                                         " from the address pool" % idx,
+                                         errors.ECODE_STATE)
+            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
+          else:
+            try:
+              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
+                                 check=self.op.conflicts_check)
+            except errors.ReservationError:
+              raise errors.OpPrereqError("IP address %s already in use"
+                                         " or does not belong to network %s" %
+                                         (nic.ip, nobj.name),
+                                         errors.ECODE_NOTUNIQUE)
+
+      # net is None, ip None or given
+      elif self.op.conflicts_check:
+        CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
+
+    # mirror node verification
+    if self.op.disk_template in constants.DTS_INT_MIRROR:
+      if self.op.snode_uuid == pnode.uuid:
+        raise errors.OpPrereqError("The secondary node cannot be the"
+                                   " primary node", errors.ECODE_INVAL)
+      CheckNodeOnline(self, self.op.snode_uuid)
+      CheckNodeNotDrained(self, self.op.snode_uuid)
+      CheckNodeVmCapable(self, self.op.snode_uuid)
+      self.secondaries.append(self.op.snode_uuid)
+
+      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
+      if pnode.group != snode.group:
+        self.LogWarning("The primary and secondary nodes are in two"
+                        " different node groups; the disk parameters"
+                        " from the first disk's node group will be"
+                        " used")
+
+    nodes = [pnode]
+    if self.op.disk_template in constants.DTS_INT_MIRROR:
+      nodes.append(snode)
+    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
+    excl_stor = compat.any(map(has_es, nodes))
+    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+      raise errors.OpPrereqError("Disk template %s not supported with"
+                                 " exclusive storage" % self.op.disk_template,
+                                 errors.ECODE_STATE)
+    for disk in self.disks:
+      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
+
+    node_uuids = [pnode.uuid] + self.secondaries
+
+    if not self.adopt_disks:
+      if self.op.disk_template == constants.DT_RBD:
+        # _CheckRADOSFreeSpace() is just a placeholder.
+        # Any function that checks prerequisites can be placed here.
+        # Check if there is enough space on the RADOS cluster.
+        CheckRADOSFreeSpace()
+      elif self.op.disk_template == constants.DT_EXT:
+        # FIXME: Function that checks prereqs if needed
+        pass
+      elif self.op.disk_template in constants.DTS_LVM:
+        # Check lv size requirements, if not adopting
+        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
+        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
+      else:
+        # FIXME: add checks for other, non-adopting, non-lvm disk templates
+        pass
+
+    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
+      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
+                                disk[constants.IDISK_ADOPT])
+                     for disk in self.disks])
+      if len(all_lvs) != len(self.disks):
+        raise errors.OpPrereqError("Duplicate volume names given for adoption",
+                                   errors.ECODE_INVAL)
+      for lv_name in all_lvs:
+        try:
+          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
+          # to ReserveLV uses the same syntax
+          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
+        except errors.ReservationError:
+          raise errors.OpPrereqError("LV named %s used by another instance" %
+                                     lv_name, errors.ECODE_NOTUNIQUE)
+
+      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
+      vg_names.Raise("Cannot get VG information from node %s" % pnode.name,
+                     prereq=True)
+
+      node_lvs = self.rpc.call_lv_list([pnode.uuid],
+                                       vg_names.payload.keys())[pnode.uuid]
+      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name,
+                     prereq=True)
+      node_lvs = node_lvs.payload
+
+      delta = all_lvs.difference(node_lvs.keys())
+      if delta:
+        raise errors.OpPrereqError("Missing logical volume(s): %s" %
+                                   utils.CommaJoin(delta),
+                                   errors.ECODE_INVAL)
+      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
+      if online_lvs:
+        raise errors.OpPrereqError("Online logical volumes found, cannot"
+                                   " adopt: %s" % utils.CommaJoin(online_lvs),
+                                   errors.ECODE_STATE)
+      # update the size of disk based on what is found
+      for dsk in self.disks:
+        dsk[constants.IDISK_SIZE] = \
+          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
+                                        dsk[constants.IDISK_ADOPT])][0]))
+
+    elif self.op.disk_template == constants.DT_BLOCK:
+      # Normalize and de-duplicate device paths
+      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
+                       for disk in self.disks])
+      if len(all_disks) != len(self.disks):
+        raise errors.OpPrereqError("Duplicate disk names given for adoption",
+                                   errors.ECODE_INVAL)
+      baddisks = [d for d in all_disks
+                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
+      if baddisks:
+        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
+                                   " cannot be adopted" %
+                                   (utils.CommaJoin(baddisks),
+                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
+                                   errors.ECODE_INVAL)
+
+      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
+                                            list(all_disks))[pnode.uuid]
+      node_disks.Raise("Cannot get block device information from node %s" %
+                       pnode.name, prereq=True)
+      node_disks = node_disks.payload
+      delta = all_disks.difference(node_disks.keys())
+      if delta:
+        raise errors.OpPrereqError("Missing block device(s): %s" %
+                                   utils.CommaJoin(delta),
+                                   errors.ECODE_INVAL)
+      for dsk in self.disks:
+        dsk[constants.IDISK_SIZE] = \
+          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
+
+    # Check disk access param to be compatible with specified hypervisor
+    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
+    node_group = self.cfg.GetNodeGroup(node_info.group)
+    group_disk_params = self.cfg.GetGroupDiskParams(node_group)
+    group_access_type = group_disk_params[self.op.disk_template].get(
+      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
+    )
+    for dsk in self.disks:
+      access_type = dsk.get(constants.IDISK_ACCESS, group_access_type)
+      if not IsValidDiskAccessModeCombination(self.op.hypervisor,
+                                              self.op.disk_template,
+                                              access_type):
+        raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
+                                   " used with %s disk access param" %
+                                   (self.op.hypervisor, access_type),
+                                    errors.ECODE_STATE)
+
+    # Verify instance specs
+    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
+    ispec = {
+      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
+      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
+      constants.ISPEC_DISK_COUNT: len(self.disks),
+      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
+                                  for disk in self.disks],
+      constants.ISPEC_NIC_COUNT: len(self.nics),
+      constants.ISPEC_SPINDLE_USE: spindle_use,
+      }
+
+    group_info = self.cfg.GetNodeGroup(pnode.group)
+    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
+    disk_types = [self.op.disk_template] * len(self.disks)
+    res = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec, disk_types)
+    if not self.op.ignore_ipolicy and res:
+      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
+             (pnode.group, group_info.name, utils.CommaJoin(res)))
+      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+
+    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
+
+    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full,
+                  self.op.force_variant)
+
+    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
+
+    CheckCompressionTool(self, self.op.compress)
+
+    #TODO: _CheckExtParams (remotely)
+    # Check parameters for extstorage
+
+    # memory check on primary node
+    #TODO(dynmem): use MINMEM for checking
+    if self.op.start:
+      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
+                                self.op.hvparams)
+      CheckNodeFreeMemory(self, self.pnode.uuid,
+                          "creating instance %s" % self.op.instance_name,
+                          self.be_full[constants.BE_MAXMEM],
+                          self.op.hypervisor, hvfull)
+
+    self.dry_run_result = list(node_uuids)
+
+  def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
+    """Removes degraded disks and instance.
+
+    It optionally checks whether disks are degraded.  If the disks are
+    degraded, they are removed and the instance is also removed from
+    the configuration.
+
+    If L{disk_abort} is True, then the disks are considered degraded
+    and removed, and the instance is removed from the configuration.
+
+    If L{disk_abort} is False, then it first checks whether disks are
+    degraded and, if so, it removes the disks and the instance is
+    removed from the configuration.
+
+    @type feedback_fn: callable
+    @param feedback_fn: function used send feedback back to the caller
+
+    @type disk_abort: boolean
+    @param disk_abort:
+      True if disks are degraded, False to first check if disks are
+      degraded
+    @type instance: L{objects.Instance}
+    @param instance: instance containing the disks to check
+
+    @rtype: NoneType
+    @return: None
+    @raise errors.OpPrereqError: if disks are degraded
+
+    """
+    disk_info = self.cfg.GetInstanceDisks(instance.uuid)
+    if disk_abort:
+      pass
+    elif self.op.wait_for_sync:
+      disk_abort = not WaitForSync(self, instance)
+    elif utils.AnyDiskOfType(disk_info, constants.DTS_INT_MIRROR):
+      # make sure the disks are not degraded (still sync-ing is ok)
+      feedback_fn("* checking mirrors status")
+      disk_abort = not WaitForSync(self, instance, oneshot=True)
+    else:
+      disk_abort = False
+
+    if disk_abort:
+      RemoveDisks(self, instance)
+      for disk_uuid in instance.disks:
+        self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid)
+      self.cfg.RemoveInstance(instance.uuid)
+      raise errors.OpExecError("There are some degraded disks for"
+                               " this instance")
+
+  def RunOsScripts(self, feedback_fn, iobj):
+    """Run OS scripts
+
+    If necessary, disks are paused.  It handles instance create,
+    import, and remote import.
+
+    @type feedback_fn: callable
+    @param feedback_fn: function used send feedback back to the caller
+
+    @type iobj: L{objects.Instance}
+    @param iobj: instance object
+
+    """
+    if iobj.disks and not self.adopt_disks:
+      disks = self.cfg.GetInstanceDisks(iobj.uuid)
+      if self.op.mode == constants.INSTANCE_CREATE:
+        os_image = objects.GetOSImage(self.op.osparams)
+
+        if os_image is None and not self.op.no_install:
+          pause_sync = (not self.op.wait_for_sync and
+                        utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR))
+          if pause_sync:
+            feedback_fn("* pausing disk sync to install instance OS")
+            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
+                                                              (disks, iobj),
+                                                              True)
+            for idx, success in enumerate(result.payload):
+              if not success:
+                logging.warn("pause-sync of instance %s for disk %d failed",
+                             self.op.instance_name, idx)
+
+          feedback_fn("* running the instance OS create scripts...")
+          # FIXME: pass debug option from opcode to backend
+          os_add_result = \
+            self.rpc.call_instance_os_add(self.pnode.uuid,
+                                          (iobj, self.op.osparams_secret),
+                                          False,
+                                          self.op.debug_level)
+          if pause_sync:
+            feedback_fn("* resuming disk sync")
+            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
+                                                              (disks, iobj),
+                                                              False)
+            for idx, success in enumerate(result.payload):
+              if not success:
+                logging.warn("resume-sync of instance %s for disk %d failed",
+                             self.op.instance_name, idx)
+
+          os_add_result.Raise("Could not add os for instance %s"
+                              " on node %s" % (self.op.instance_name,
+                                               self.pnode.name))
+
+      else:
+        if self.op.mode == constants.INSTANCE_IMPORT:
+          feedback_fn("* running the instance OS import scripts...")
+
+          transfers = []
+
+          for idx, image in enumerate(self.src_images):
+            if not image:
+              continue
+
+            if iobj.os:
+              dst_io = constants.IEIO_SCRIPT
+              dst_ioargs = ((disks[idx], iobj), idx)
+            else:
+              dst_io = constants.IEIO_RAW_DISK
+              dst_ioargs = (disks[idx], iobj)
+
+            # FIXME: pass debug option from opcode to backend
+            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
+                                               constants.IEIO_FILE, (image, ),
+                                               dst_io, dst_ioargs,
+                                               None)
+            transfers.append(dt)
+
+          import_result = \
+            masterd.instance.TransferInstanceData(self, feedback_fn,
+                                                  self.op.src_node_uuid,
+                                                  self.pnode.uuid,
+                                                  self.pnode.secondary_ip,
+                                                  self.op.compress,
+                                                  iobj, transfers)
+          if not compat.all(import_result):
+            self.LogWarning("Some disks for instance %s on node %s were not"
+                            " imported successfully" % (self.op.instance_name,
+                                                        self.pnode.name))
+
+          rename_from = self._old_instance_name
+
+        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
+          feedback_fn("* preparing remote import...")
+          # The source cluster will stop the instance before attempting to make
+          # a connection. In some cases stopping an instance can take a long
+          # time, hence the shutdown timeout is added to the connection
+          # timeout.
+          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
+                             self.op.source_shutdown_timeout)
+          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
+
+          assert iobj.primary_node == self.pnode.uuid
+          disk_results = \
+            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
+                                          self.source_x509_ca,
+                                          self._cds, self.op.compress, timeouts)
+          if not compat.all(disk_results):
+            # TODO: Should the instance still be started, even if some disks
+            # failed to import (valid for local imports, too)?
+            self.LogWarning("Some disks for instance %s on node %s were not"
+                            " imported successfully" % (self.op.instance_name,
+                                                        self.pnode.name))
+
+          rename_from = self.source_instance_name
+
+        else:
+          # also checked in the prereq part
+          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
+                                       % self.op.mode)
+
+        assert iobj.name == self.op.instance_name
+
+        # Run rename script on newly imported instance
+        if iobj.os:
+          feedback_fn("Running rename script for %s" % self.op.instance_name)
+          result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
+                                                     rename_from,
+                                                     self.op.debug_level)
+          result.Warn("Failed to run rename script for %s on node %s" %
+                      (self.op.instance_name, self.pnode.name), self.LogWarning)
+
+  def GetOsInstallPackageEnvironment(self, instance, script):
+    """Returns the OS scripts environment for the helper VM
+
+    @type instance: L{objects.Instance}
+    @param instance: instance for which the OS scripts are run
+
+    @type script: string
+    @param script: script to run (e.g.,
+                   constants.OS_SCRIPT_CREATE_UNTRUSTED)
+
+    @rtype: dict of string to string
+    @return: OS scripts environment for the helper VM
+
+    """
+    env = {"OS_SCRIPT": script}
+
+    # We pass only the instance's disks, not the helper VM's disks.
+    if instance.hypervisor == constants.HT_KVM:
+      prefix = "/dev/vd"
+    elif instance.hypervisor in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]:
+      prefix = "/dev/xvd"
+    else:
+      raise errors.OpExecError("Cannot run OS scripts in a virtualized"
+                               " environment for hypervisor '%s'"
+                               % instance.hypervisor)
+
+    num_disks = len(self.cfg.GetInstanceDisks(instance.uuid))
+
+    for idx, disk_label in enumerate(utils.GetDiskLabels(prefix, num_disks + 1,
+                                                         start=1)):
+      env["DISK_%d_PATH" % idx] = disk_label
+
+    return env
+
+  def UpdateInstanceOsInstallPackage(self, feedback_fn, instance, override_env):
+    """Updates the OS parameter 'os-install-package' for an instance.
+
+    The OS install package is an archive containing an OS definition
+    and a file containing the environment variables needed to run the
+    OS scripts.
+
+    The OS install package is served by the metadata daemon to the
+    instances, so the OS scripts can run inside the virtualized
+    environment.
+
+    @type feedback_fn: callable
+    @param feedback_fn: function used send feedback back to the caller
+
+    @type instance: L{objects.Instance}
+    @param instance: instance for which the OS parameter
+                     'os-install-package' is updated
+
+    @type override_env: dict of string to string
+    @param override_env: if supplied, it overrides the environment of
+                         the export OS scripts archive
+
+    """
+    if "os-install-package" in instance.osparams:
+      feedback_fn("Using OS install package '%s'" %
+                  instance.osparams["os-install-package"])
+    else:
+      result = self.rpc.call_os_export(instance.primary_node, instance,
+                                       override_env)
+      result.Raise("Could not export OS '%s'" % instance.os)
+      instance.osparams["os-install-package"] = result.payload
+
+      feedback_fn("Created OS install package '%s'" % result.payload)
+
+  def RunOsScriptsVirtualized(self, feedback_fn, instance):
+    """Runs the OS scripts inside a safe virtualized environment.
+
+    The virtualized environment reuses the instance and temporarily
+    creates a disk onto which the image of the helper VM is dumped.
+    The temporary disk is used to boot the helper VM.  The OS scripts
+    are passed to the helper VM through the metadata daemon and the OS
+    install package.
+
+    @type feedback_fn: callable
+    @param feedback_fn: function used send feedback back to the caller
+
+    @type instance: L{objects.Instance}
+    @param instance: instance for which the OS scripts must be run
+                     inside the virtualized environment
+
+    """
+    install_image = self.cfg.GetInstallImage()
+
+    if not install_image:
+      raise errors.OpExecError("Cannot create install instance because an"
+                               " install image has not been specified")
+
+    disk_size = DetermineImageSize(self, install_image, instance.primary_node)
+
+    env = self.GetOsInstallPackageEnvironment(
+      instance,
+      constants.OS_SCRIPT_CREATE_UNTRUSTED)
+    self.UpdateInstanceOsInstallPackage(feedback_fn, instance, env)
+    UpdateMetadata(feedback_fn, self.rpc, instance,
+                   osparams_private=self.op.osparams_private,
+                   osparams_secret=self.op.osparams_secret)
+
+    with TemporaryDisk(self,
+                       instance,
+                       [(constants.DT_PLAIN, constants.DISK_RDWR, disk_size)],
+                       feedback_fn):
+      feedback_fn("Activating instance disks")
+      StartInstanceDisks(self, instance, False)
+
+      feedback_fn("Imaging disk with install image")
+      ImageDisks(self, instance, install_image)
+
+      feedback_fn("Starting instance with install image")
+      result = self.rpc.call_instance_start(instance.primary_node,
+                                            (instance, [], []),
+                                            False, self.op.reason)
+      result.Raise("Could not start instance '%s' with the install image '%s'"
+                   % (instance.name, install_image))
+
+      # First wait for the instance to start up
+      running_check = lambda: IsInstanceRunning(self, instance, prereq=False)
+      instance_up = retry.SimpleRetry(True, running_check, 5.0,
+                                      self.op.helper_startup_timeout)
+      if not instance_up:
+        raise errors.OpExecError("Could not boot instance using install image"
+                                 " '%s'" % install_image)
+
+      feedback_fn("Instance is up, now awaiting shutdown")
+
+      # Then for it to be finished, detected by its shutdown
+      instance_up = retry.SimpleRetry(False, running_check, 20.0,
+                                      self.op.helper_shutdown_timeout)
+      if instance_up:
+        self.LogWarning("Installation not completed prior to timeout, shutting"
+                        " down instance forcibly")
+
+    feedback_fn("Installation complete")
+
+  def Exec(self, feedback_fn):
+    """Create and add the instance to the cluster.
+
+    """
+    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
+                self.owned_locks(locking.LEVEL_NODE)), \
+      "Node locks differ from node resource locks"
+
+    ht_kind = self.op.hypervisor
+    if ht_kind in constants.HTS_REQ_PORT:
+      network_port = self.cfg.AllocatePort()
+    else:
+      network_port = None
+
+    if self.op.commit:
+      (instance_uuid, _) = self.cfg.ExpandInstanceName(self.op.instance_name)
+    else:
+      instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
+
+    # This is ugly but we got a chicken-egg problem here
+    # We can only take the group disk parameters, as the instance
+    # has no disks yet (we are generating them right here).
+    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
+
+    if self.op.commit:
+      disks = self.cfg.GetInstanceDisks(instance_uuid)
+      CommitDisks(disks)
+    else:
+      disks = GenerateDiskTemplate(self,
+                                   self.op.disk_template,
+                                   instance_uuid, self.pnode.uuid,
+                                   self.secondaries,
+                                   self.disks,
+                                   self.instance_file_storage_dir,
+                                   self.op.file_driver,
+                                   0,
+                                   feedback_fn,
+                                   self.cfg.GetGroupDiskParams(nodegroup),
+                                   forthcoming=self.op.forthcoming)
+
+    if self.op.os_type is None:
+      os_type = ""
+    else:
+      os_type = self.op.os_type
+
+    iobj = objects.Instance(name=self.op.instance_name,
+                            uuid=instance_uuid,
+                            os=os_type,
+                            primary_node=self.pnode.uuid,
+                            nics=self.nics, disks=[],
+                            disk_template=self.op.disk_template,
+                            disks_active=False,
+                            admin_state=constants.ADMINST_DOWN,
+                            admin_state_source=constants.ADMIN_SOURCE,
+                            network_port=network_port,
+                            beparams=self.op.beparams,
+                            hvparams=self.op.hvparams,
+                            hypervisor=self.op.hypervisor,
+                            osparams=self.op.osparams,
+                            osparams_private=self.op.osparams_private,
+                            forthcoming=self.op.forthcoming,
+                            )
+
+    if self.op.tags:
+      for tag in self.op.tags:
+        iobj.AddTag(tag)
+
+    if self.adopt_disks:
+      if self.op.disk_template == constants.DT_PLAIN:
+        # rename LVs to the newly-generated names; we need to construct
+        # 'fake' LV disks with the old data, plus the new unique_id
+        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
+        rename_to = []
+        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
+          rename_to.append(t_dsk.logical_id)
+          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
+        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
+                                               zip(tmp_disks, rename_to))
+        result.Raise("Failed to rename adoped LVs")
+    elif self.op.forthcoming:
+      feedback_fn("Instance is forthcoming, not creating disks")
+    else:
+      feedback_fn("* creating instance disks...")
+      try:
+        CreateDisks(self, iobj, disks=disks)
+      except errors.OpExecError:
+        self.LogWarning("Device creation failed")
+        for disk in disks:
+          self.cfg.ReleaseDRBDMinors(disk.uuid)
+        raise
+
+    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
+    self.cfg.AddInstance(iobj, self.proc.GetECId(), replace=self.op.commit)
+
+    feedback_fn("adding disks to cluster config")
+    for disk in disks:
+      self.cfg.AddInstanceDisk(iobj.uuid, disk, replace=self.op.commit)
+
+    if self.op.forthcoming:
+      feedback_fn("Instance is forthcoming; not creating the actual instance")
+      return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj.uuid)))
+
+    # re-read the instance from the configuration
+    iobj = self.cfg.GetInstanceInfo(iobj.uuid)
+
+    if self.op.mode == constants.INSTANCE_IMPORT:
+      # Release unused nodes
+      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
+    else:
+      # Release all nodes
+      ReleaseLocks(self, locking.LEVEL_NODE)
+
+    # Wipe disks
+    disk_abort = False
+    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
+      feedback_fn("* wiping instance disks...")
+      try:
+        WipeDisks(self, iobj)
+      except errors.OpExecError, err:
+        logging.exception("Wiping disks failed")
+        self.LogWarning("Wiping instance disks failed (%s)", err)
+        disk_abort = True
+
+    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
+
+    # Image disks
+    os_image = objects.GetOSImage(iobj.osparams)
+    disk_abort = False
+
+    if not self.adopt_disks and os_image is not None:
+      feedback_fn("* imaging instance disks...")
+      try:
+        ImageDisks(self, iobj, os_image)
+      except errors.OpExecError, err:
+        logging.exception("Imaging disks failed")
+        self.LogWarning("Imaging instance disks failed (%s)", err)
+        disk_abort = True
+
+    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
+
+    # instance disks are now active
+    iobj.disks_active = True
+
+    # Release all node resource locks
+    ReleaseLocks(self, locking.LEVEL_NODE_RES)
+
+    if iobj.os:
+      result = self.rpc.call_os_diagnose([iobj.primary_node])[iobj.primary_node]
+      result.Raise("Failed to get OS '%s'" % iobj.os)
+
+      trusted = None
+
+      for (name, _, _, _, _, _, _, os_trusted) in result.payload:
+        if name == objects.OS.GetName(iobj.os):
+          trusted = os_trusted
+          break
+
+      if trusted is None:
+        raise errors.OpPrereqError("OS '%s' is not available in node '%s'" %
+                                   (iobj.os, iobj.primary_node))
+      elif trusted:
+        self.RunOsScripts(feedback_fn, iobj)
+      else:
+        self.RunOsScriptsVirtualized(feedback_fn, iobj)
+        # Instance is modified by 'RunOsScriptsVirtualized',
+        # therefore, it must be retrieved once again from the
+        # configuration, otherwise there will be a config object
+        # version mismatch.
+        iobj = self.cfg.GetInstanceInfo(iobj.uuid)
+
+    # Update instance metadata so that it can be reached from the
+    # metadata service.
+    UpdateMetadata(feedback_fn, self.rpc, iobj,
+                   osparams_private=self.op.osparams_private,
+                   osparams_secret=self.op.osparams_secret)
+
+    assert not self.owned_locks(locking.LEVEL_NODE_RES)
+
+    if self.op.start:
+      iobj.admin_state = constants.ADMINST_UP
+      self.cfg.Update(iobj, feedback_fn)
+      logging.info("Starting instance %s on node %s", self.op.instance_name,
+                   self.pnode.name)
+      feedback_fn("* starting instance...")
+      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
+                                            False, self.op.reason)
+      result.Raise("Could not start instance")
+
+    return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj.uuid)))
+
+  def PrepareRetry(self, feedback_fn):
+    # A temporary lack of resources can only happen if opportunistic locking
+    # is used.
+    assert self.op.opportunistic_locking
+
+    logging.info("Opportunistic locking did not suceed, falling back to"
+                 " full lock allocation")
+    feedback_fn("* falling back to full lock allocation")
+    self.op.opportunistic_locking = False
diff --git a/lib/cmdlib/instance_migration.py b/lib/cmdlib/instance_migration.py
index c605323..ad6f9c3 100644
--- a/lib/cmdlib/instance_migration.py
+++ b/lib/cmdlib/instance_migration.py
@@ -69,11 +69,6 @@
   lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
   lu.dont_collate_locks[locking.LEVEL_NODE_RES] = True
 
-  # The node allocation lock is actually only needed for externally replicated
-  # instances (e.g. sharedfile or RBD) and if an iallocator is used.
-  lu.needed_locks[locking.LEVEL_NODE_ALLOC] = []
-  lu.dont_collate_locks[locking.LEVEL_NODE_ALLOC] = True
-
 
 def _DeclareLocksForMigration(lu, level):
   """Declares locks for L{TLMigrateInstance}.
@@ -82,26 +77,21 @@
   @param level: Lock level
 
   """
-  if level == locking.LEVEL_NODE_ALLOC:
+  if level == locking.LEVEL_NODE:
     assert lu.op.instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
 
     instance = lu.cfg.GetInstanceInfo(lu.op.instance_uuid)
 
-    # Node locks are already declared here rather than at LEVEL_NODE as we need
-    # the instance object anyway to declare the node allocation lock.
-    if instance.disk_template in constants.DTS_EXT_MIRROR:
+    disks = lu.cfg.GetInstanceDisks(instance.uuid)
+    if utils.AnyDiskOfType(disks, constants.DTS_EXT_MIRROR):
       if lu.op.target_node is None:
         lu.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
-        lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
       else:
         lu.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
                                                lu.op.target_node_uuid]
-      del lu.recalculate_locks[locking.LEVEL_NODE]
     else:
       lu._LockInstancesNodes() # pylint: disable=W0212
 
-  elif level == locking.LEVEL_NODE:
-    # Node locks are declared together with the node allocation lock
     assert (lu.needed_locks[locking.LEVEL_NODE] or
             lu.needed_locks[locking.LEVEL_NODE] is locking.ALL_SET)
 
@@ -135,7 +125,7 @@
     self.target_node = getattr(self.op, "target_node", None)
 
   def ExpandNames(self):
-    self._ExpandAndLockInstance()
+    self._ExpandAndLockInstance(allow_forthcoming=True)
     _ExpandNamesForMigration(self)
 
     self._migrater = \
@@ -166,7 +156,8 @@
       "FAILOVER_CLEANUP": self.op.cleanup,
       }
 
-    if instance.disk_template in constants.DTS_INT_MIRROR:
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
       secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
       env["OLD_SECONDARY"] = self.cfg.GetNodeName(secondary_nodes[0])
       env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid)
@@ -238,7 +229,8 @@
       "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
       })
 
-    if instance.disk_template in constants.DTS_INT_MIRROR:
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
       secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
       env["OLD_SECONDARY"] = self.cfg.GetNodeName(secondary_nodes[0])
       env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid)
@@ -334,20 +326,24 @@
                       " switching to failover")
       self.failover = True
 
-    if self.instance.disk_template not in constants.DTS_MIRRORED:
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+
+    if not utils.AllDiskOfType(disks, constants.DTS_MIRRORED):
       if self.failover:
         text = "failovers"
       else:
         text = "migrations"
+      invalid_disks = set(d.dev_type for d in disks
+                             if d.dev_type not in constants.DTS_MIRRORED)
       raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
-                                 " %s" % (self.instance.disk_template, text),
+                                 " %s" % (utils.CommaJoin(invalid_disks), text),
                                  errors.ECODE_STATE)
 
-    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
+    # TODO allow heterogeneous disk types if all are mirrored in some way.
+    if utils.AllDiskOfType(disks, constants.DTS_EXT_MIRROR):
       CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
 
       if self.lu.op.iallocator:
-        assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
         self._RunAllocator()
       else:
         # We set set self.target_node_uuid as it is required by
@@ -377,15 +373,15 @@
         # in the LU
         ReleaseLocks(self.lu, locking.LEVEL_NODE,
                      keep=[self.instance.primary_node, self.target_node_uuid])
-        ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
 
-    else:
+    elif utils.AllDiskOfType(disks, constants.DTS_INT_MIRROR):
+      templates = [d.dev_type for d in disks]
       secondary_node_uuids = \
         self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
       if not secondary_node_uuids:
         raise errors.ConfigurationError("No secondary node but using"
-                                        " %s disk template" %
-                                        self.instance.disk_template)
+                                        " %s disk types" %
+                                        utils.CommaJoin(set(templates)))
       self.target_node_uuid = target_node_uuid = secondary_node_uuids[0]
       if self.lu.op.iallocator or \
         (self.lu.op.target_node_uuid and
@@ -394,11 +390,11 @@
           text = "failed over"
         else:
           text = "migrated"
-        raise errors.OpPrereqError("Instances with disk template %s cannot"
+        raise errors.OpPrereqError("Instances with disk types %s cannot"
                                    " be %s to arbitrary nodes"
                                    " (neither an iallocator nor a target"
                                    " node can be passed)" %
-                                   (self.instance.disk_template, text),
+                                   (utils.CommaJoin(set(templates)), text),
                                    errors.ECODE_INVAL)
       nodeinfo = self.cfg.GetNodeInfo(target_node_uuid)
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
@@ -407,6 +403,10 @@
       CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo,
                              self.cfg, ignore=self.ignore_ipolicy)
 
+    else:
+      raise errors.OpPrereqError("Instance mixes internal and external "
+                                 "mirroring. This is not currently supported.")
+
     i_be = cluster.FillBE(self.instance)
 
     # check memory requirements on the secondary node
@@ -486,8 +486,6 @@
     """Run the allocator based on input opcode.
 
     """
-    assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
-
     # FIXME: add a self.ignore_ipolicy option
     req = iallocator.IAReqRelocate(
           inst_uuid=self.instance_uuid,
@@ -532,18 +530,44 @@
           self.feedback_fn("   - progress: %.1f%%" % min_percent)
         time.sleep(2)
 
-  def _EnsureSecondary(self, node_uuid):
-    """Demote a node to secondary.
+  def _OpenInstanceDisks(self, node_uuid, exclusive):
+    """Open instance disks.
 
     """
-    self.feedback_fn("* switching node %s to secondary mode" %
-                     self.cfg.GetNodeName(node_uuid))
+    if exclusive:
+      mode = "in exclusive mode"
+    else:
+      mode = "in shared mode"
+
+    node_name = self.cfg.GetNodeName(node_uuid)
+
+    self.feedback_fn("* opening instance disks on node %s %s" %
+                     (node_name, mode))
+
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+    result = self.rpc.call_blockdev_open(node_uuid, self.instance.name,
+                                         (disks, self.instance), exclusive)
+    result.Raise("Cannot open instance disks on node %s" % node_name)
+
+  def _CloseInstanceDisks(self, node_uuid):
+    """Close instance disks.
+
+    """
+    node_name = self.cfg.GetNodeName(node_uuid)
+
+    self.feedback_fn("* closing instance disks on node %s" % node_name)
 
     disks = self.cfg.GetInstanceDisks(self.instance.uuid)
     result = self.rpc.call_blockdev_close(node_uuid, self.instance.name,
                                           (disks, self.instance))
-    result.Raise("Cannot change disk to secondary on node %s" %
-                 self.cfg.GetNodeName(node_uuid))
+    msg = result.fail_msg
+    if msg:
+      if result.offline or self.ignore_consistency:
+        self.lu.LogWarning("Could not close instance disks on node %s,"
+                           " proceeding anyway" % node_name)
+      else:
+        raise errors.OpExecError("Cannot close instance disks on node %s: %s" %
+                                 (node_name, msg))
 
   def _GoStandalone(self):
     """Disconnect from the network.
@@ -569,7 +593,7 @@
     disks = self.cfg.GetInstanceDisks(self.instance.uuid)
     result = self.rpc.call_drbd_attach_net(self.all_node_uuids,
                                            (disks, self.instance),
-                                           self.instance.name, multimaster)
+                                           multimaster)
     for node_uuid, nres in result.items():
       nres.Raise("Cannot change disks config on node %s" %
                  self.cfg.GetNodeName(node_uuid))
@@ -629,8 +653,11 @@
                        self.cfg.GetNodeName(self.source_node_uuid))
       demoted_node_uuid = self.target_node_uuid
 
-    if self.instance.disk_template in constants.DTS_INT_MIRROR:
-      self._EnsureSecondary(demoted_node_uuid)
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+
+    self._CloseInstanceDisks(demoted_node_uuid)
+
+    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
       try:
         self._WaitUntilSync()
       except errors.OpExecError:
@@ -640,6 +667,8 @@
       self._GoStandalone()
       self._GoReconnect(False)
       self._WaitUntilSync()
+    elif utils.AnyDiskOfType(disks, constants.DTS_EXT_MIRROR):
+      self._OpenInstanceDisks(self.instance.primary_node, True)
 
     self.feedback_fn("* done")
 
@@ -647,11 +676,16 @@
     """Try to revert the disk status after a failed migration.
 
     """
-    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
+
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+
+    self._CloseInstanceDisks(self.target_node_uuid)
+
+    if utils.AllDiskOfType(disks, constants.DTS_EXT_MIRROR):
+      self._OpenInstanceDisks(self.source_node_uuid, True)
       return
 
     try:
-      self._EnsureSecondary(self.target_node_uuid)
       self._GoStandalone()
       self._GoReconnect(False)
       self._WaitUntilSync()
@@ -761,13 +795,19 @@
 
     self.migration_info = migration_info = result.payload
 
-    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+
+    self._CloseInstanceDisks(self.target_node_uuid)
+
+    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
       # Then switch the disks to master/master mode
-      self._EnsureSecondary(self.target_node_uuid)
       self._GoStandalone()
       self._GoReconnect(True)
       self._WaitUntilSync()
 
+    self._OpenInstanceDisks(self.source_node_uuid, False)
+    self._OpenInstanceDisks(self.target_node_uuid, False)
+
     self.feedback_fn("* preparing %s to accept the instance" %
                      self.cfg.GetNodeName(self.target_node_uuid))
     result = self.rpc.call_accept_instance(self.target_node_uuid,
@@ -843,6 +883,7 @@
 
     self.cfg.SetInstancePrimaryNode(self.instance.uuid, self.target_node_uuid)
     self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
+    disks = self.cfg.GetInstanceDisks(self.instance_uuid)
 
     result = self.rpc.call_instance_finalize_migration_dst(
                self.target_node_uuid, self.instance, migration_info, True)
@@ -853,20 +894,26 @@
       raise errors.OpExecError("Could not finalize instance migration: %s" %
                                msg)
 
-    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
-      self._EnsureSecondary(self.source_node_uuid)
+    self._CloseInstanceDisks(self.source_node_uuid)
+
+    if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
       self._WaitUntilSync()
       self._GoStandalone()
       self._GoReconnect(False)
       self._WaitUntilSync()
+    elif utils.AnyDiskOfType(disks, constants.DTS_EXT_MIRROR):
+      self._OpenInstanceDisks(self.target_node_uuid, True)
 
     # If the instance's disk template is `rbd' or `ext' and there was a
     # successful migration, unmap the device from the source node.
-    if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
-      inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
-      disks = ExpandCheckDisks(inst_disks, inst_disks)
-      self.feedback_fn("* unmapping instance's disks from %s" %
-                       self.cfg.GetNodeName(self.source_node_uuid))
+    unmap_types = (constants.DT_RBD, constants.DT_EXT)
+
+    if utils.AnyDiskOfType(disks, unmap_types):
+      unmap_disks = [d for d in disks if d.dev_type in unmap_types]
+      disks = ExpandCheckDisks(unmap_disks, unmap_disks)
+      self.feedback_fn("* unmapping instance's disks %s from %s" %
+                       (utils.CommaJoin(d.name for d in unmap_disks),
+                        self.cfg.GetNodeName(self.source_node_uuid)))
       for disk in disks:
         result = self.rpc.call_blockdev_shutdown(self.source_node_uuid,
                                                  (disk, self.instance))
@@ -889,6 +936,13 @@
     starting it on the secondary.
 
     """
+    if self.instance.forthcoming:
+      self.feedback_fn("Instance is forthcoming, just updating the"
+                       "  configuration")
+      self.cfg.SetInstancePrimaryNode(self.instance.uuid,
+                                      self.target_node_uuid)
+      return
+
     primary_node = self.cfg.GetNodeInfo(self.instance.primary_node)
 
     source_node_uuid = self.instance.primary_node
@@ -934,6 +988,10 @@
                                  (self.instance.name,
                                   self.cfg.GetNodeName(source_node_uuid), msg))
 
+    disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
+    if disk_template in constants.DTS_EXT_MIRROR:
+      self._CloseInstanceDisks(source_node_uuid)
+
     self.feedback_fn("* deactivating the instance's disks on source node")
     if not ShutdownInstanceDisks(self.lu, self.instance, ignore_primary=True):
       raise errors.OpExecError("Can't shut down the instance's disks")
@@ -948,8 +1006,8 @@
       logging.info("Starting instance %s on node %s", self.instance.name,
                    self.cfg.GetNodeName(self.target_node_uuid))
 
-      disks_ok, _ = AssembleInstanceDisks(self.lu, self.instance,
-                                          ignore_secondaries=True)
+      disks_ok, _, _ = AssembleInstanceDisks(self.lu, self.instance,
+                                             ignore_secondaries=True)
       if not disks_ok:
         ShutdownInstanceDisks(self.lu, self.instance)
         raise errors.OpExecError("Can't activate the instance's disks")
@@ -975,7 +1033,10 @@
     self.source_node_uuid = self.instance.primary_node
 
     # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
-    if self.instance.disk_template in constants.DTS_INT_MIRROR:
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+
+    # TODO allow mixed disks
+    if utils.AllDiskOfType(disks, constants.DTS_INT_MIRROR):
       secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
       self.target_node_uuid = secondary_nodes[0]
       # Otherwise self.target_node has been populated either
diff --git a/lib/cmdlib/instance_operation.py b/lib/cmdlib/instance_operation.py
index 585a704..048b1e4 100644
--- a/lib/cmdlib/instance_operation.py
+++ b/lib/cmdlib/instance_operation.py
@@ -352,7 +352,7 @@
     CheckNodeOnline(self, instance.primary_node, "Instance primary node"
                     " offline, cannot reinstall")
 
-    if instance.disk_template == constants.DT_DISKLESS:
+    if not instance.disks:
       raise errors.OpPrereqError("Instance '%s' has no disks" %
                                  self.op.instance_name,
                                  errors.ECODE_INVAL)
diff --git a/lib/cmdlib/instance_query.py b/lib/cmdlib/instance_query.py
index fbf4362..5aec4c1 100644
--- a/lib/cmdlib/instance_query.py
+++ b/lib/cmdlib/instance_query.py
@@ -35,6 +35,7 @@
 from ganeti import compat
 from ganeti import constants
 from ganeti import locking
+from ganeti import utils
 from ganeti.cmdlib.base import NoHooksLU
 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
   CheckInstancesNodeGroups, AnnotateDiskParams
@@ -272,9 +273,10 @@
       group2name_fn = lambda uuid: groups[uuid].name
       node_uuid2name_fn = lambda uuid: nodes[uuid].name
 
-      disks = map(compat.partial(self._ComputeDiskStatus, instance,
-                                 node_uuid2name_fn),
-                  self.cfg.GetInstanceDisks(instance.uuid))
+      disk_objects = self.cfg.GetInstanceDisks(instance.uuid)
+      output_disks = map(compat.partial(self._ComputeDiskStatus, instance,
+                                        node_uuid2name_fn),
+                         disk_objects)
 
       secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
       snodes_group_uuids = [nodes[snode_uuid].group
@@ -293,8 +295,8 @@
         "os": instance.os,
         # this happens to be the same format used for hooks
         "nics": NICListToTuple(self, instance.nics),
-        "disk_template": instance.disk_template,
-        "disks": disks,
+        "disk_template": utils.GetDiskTemplate(disk_objects),
+        "disks": output_disks,
         "hypervisor": instance.hypervisor,
         "network_port": instance.network_port,
         "hv_instance": instance.hvparams,
diff --git a/lib/cmdlib/instance_set_params.py b/lib/cmdlib/instance_set_params.py
new file mode 100644
index 0000000..a35e95c
--- /dev/null
+++ b/lib/cmdlib/instance_set_params.py
@@ -0,0 +1,1968 @@
+#
+#
+
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Logical unit setting parameters of a single instance."""
+
+import copy
+import logging
+import os
+
+from ganeti import compat
+from ganeti import constants
+from ganeti import errors
+from ganeti import ht
+from ganeti import hypervisor
+from ganeti import locking
+from ganeti import netutils
+from ganeti import objects
+from ganeti import utils
+import ganeti.rpc.node as rpc
+
+from ganeti.cmdlib.base import LogicalUnit
+
+from ganeti.cmdlib.common import INSTANCE_DOWN, \
+  INSTANCE_NOT_RUNNING, CheckNodeOnline, \
+  CheckParamsNotGlobal, \
+  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
+  GetUpdatedParams, CheckInstanceState, ExpandNodeUuidAndName, \
+  IsValidDiskAccessModeCombination, AnnotateDiskParams
+from ganeti.cmdlib.instance_storage import CalculateFileStorageDir, \
+  CheckDiskExtProvider, CheckNodesFreeDiskPerVG, CheckRADOSFreeSpace, \
+  CheckSpindlesExclusiveStorage, ComputeDiskSizePerVG, ComputeDisksInfo, \
+  CreateDisks, CreateSingleBlockDev, GenerateDiskTemplate, \
+  IsExclusiveStorageEnabledNodeUuid, ShutdownInstanceDisks, \
+  WaitForSync, WipeOrCleanupDisks, AssembleInstanceDisks
+from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
+  NICToTuple, CheckNodeNotDrained, CopyLockList, \
+  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
+  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
+  UpdateMetadata, CheckForConflictingIp, \
+  PrepareContainerMods, ComputeInstanceCommunicationNIC, \
+  ApplyContainerMods, ComputeIPolicyInstanceSpecViolation, \
+  CheckNodesPhysicalCPUs
+import ganeti.masterd.instance
+
+
+class InstNicModPrivate(object):
+  """Data structure for network interface modifications.
+
+  Used by L{LUInstanceSetParams}.
+
+  """
+  def __init__(self):
+    self.params = None
+    self.filled = None
+
+
+class LUInstanceSetParams(LogicalUnit):
+  """Modifies an instances's parameters.
+
+  """
+  HPATH = "instance-modify"
+  HTYPE = constants.HTYPE_INSTANCE
+  REQ_BGL = False
+
+  def GenericGetDiskInfo(self, uuid=None, name=None):
+    """Find a disk object using the provided params.
+
+    Accept arguments as keywords and use the GetDiskInfo/GetDiskInfoByName
+    config functions to retrieve the disk info based on these arguments.
+
+    In case of an error, raise the appropriate exceptions.
+    """
+    if uuid:
+      disk = self.cfg.GetDiskInfo(uuid)
+      if disk is None:
+        raise errors.OpPrereqError("No disk was found with this UUID: %s" %
+                                   uuid, errors.ECODE_INVAL)
+    elif name:
+      disk = self.cfg.GetDiskInfoByName(name)
+      if disk is None:
+        raise errors.OpPrereqError("No disk was found with this name: %s" %
+                                   name, errors.ECODE_INVAL)
+    else:
+      raise errors.ProgrammerError("No disk UUID or name was given")
+
+    return disk
+
+  @staticmethod
+  def _UpgradeDiskNicMods(kind, mods, verify_fn):
+    assert ht.TList(mods)
+    assert not mods or len(mods[0]) in (2, 3)
+
+    if mods and len(mods[0]) == 2:
+      result = []
+
+      addremove = 0
+      for op, params in mods:
+        if op in (constants.DDM_ADD, constants.DDM_ATTACH,
+                  constants.DDM_REMOVE, constants.DDM_DETACH):
+          result.append((op, -1, params))
+          addremove += 1
+
+          if addremove > 1:
+            raise errors.OpPrereqError("Only one %s add/attach/remove/detach "
+                                       "operation is supported at a time" %
+                                       kind, errors.ECODE_INVAL)
+        else:
+          result.append((constants.DDM_MODIFY, op, params))
+
+      assert verify_fn(result)
+    else:
+      result = mods
+    return result
+
+  @staticmethod
+  def _CheckMods(kind, mods, key_types, item_fn):
+    """Ensures requested disk/NIC modifications are valid.
+
+    Note that the 'attach' action needs a way to refer to the UUID of the disk,
+    since the disk name is not unique cluster-wide. However, the UUID of the
+    disk is not settable but rather generated by Ganeti automatically,
+    therefore it cannot be passed as an IDISK parameter. For this reason, this
+    function will override the checks to accept uuid parameters solely for the
+    attach action.
+    """
+    # Create a key_types copy with the 'uuid' as a valid key type.
+    key_types_attach = key_types.copy()
+    key_types_attach['uuid'] = 'string'
+
+    for (op, _, params) in mods:
+      assert ht.TDict(params)
+
+      # If 'key_types' is an empty dict, we assume we have an
+      # 'ext' template and thus do not ForceDictType
+      if key_types:
+        utils.ForceDictType(params, (key_types if op != constants.DDM_ATTACH
+                                     else key_types_attach))
+
+      if op in (constants.DDM_REMOVE, constants.DDM_DETACH):
+        if params:
+          raise errors.OpPrereqError("No settings should be passed when"
+                                     " removing or detaching a %s" % kind,
+                                     errors.ECODE_INVAL)
+      elif op in (constants.DDM_ADD, constants.DDM_ATTACH,
+                  constants.DDM_MODIFY):
+        item_fn(op, params)
+      else:
+        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
+
+  def _VerifyDiskModification(self, op, params, excl_stor, group_access_types):
+    """Verifies a disk modification.
+
+    """
+    disk_type = params.get(
+        constants.IDISK_TYPE,
+        self.cfg.GetInstanceDiskTemplate(self.instance.uuid))
+
+    if op == constants.DDM_ADD:
+      params[constants.IDISK_TYPE] = disk_type
+
+      if disk_type == constants.DT_DISKLESS:
+        raise errors.OpPrereqError(
+            "Must specify disk type on diskless instance", errors.ECODE_INVAL)
+
+      if disk_type != constants.DT_EXT:
+        utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
+
+      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
+      if mode not in constants.DISK_ACCESS_SET:
+        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
+                                   errors.ECODE_INVAL)
+
+      size = params.get(constants.IDISK_SIZE, None)
+      if size is None:
+        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
+                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
+      size = int(size)
+
+      params[constants.IDISK_SIZE] = size
+      name = params.get(constants.IDISK_NAME, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.IDISK_NAME] = None
+
+    # This check is necessary both when adding and attaching disks
+    if op in (constants.DDM_ADD, constants.DDM_ATTACH):
+      CheckSpindlesExclusiveStorage(params, excl_stor, True)
+      CheckDiskExtProvider(params, disk_type)
+
+      # Make sure we do not add syncing disks to instances with inactive disks
+      if not self.op.wait_for_sync and not self.instance.disks_active:
+        raise errors.OpPrereqError("Can't %s a disk to an instance with"
+                                   " deactivated disks and --no-wait-for-sync"
+                                   " given" % op, errors.ECODE_INVAL)
+
+      # Check disk access param (only for specific disks)
+      if disk_type in constants.DTS_HAVE_ACCESS:
+        access_type = params.get(constants.IDISK_ACCESS,
+                                 group_access_types[disk_type])
+        if not IsValidDiskAccessModeCombination(self.instance.hypervisor,
+                                                disk_type, access_type):
+          raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
+                                     " used with %s disk access param" %
+                                     (self.instance.hypervisor, access_type),
+                                      errors.ECODE_STATE)
+
+    if op == constants.DDM_ATTACH:
+      if len(params) != 1 or ('uuid' not in params and
+                              constants.IDISK_NAME not in params):
+        raise errors.OpPrereqError("Only one argument is permitted in %s op,"
+                                   " either %s or uuid" % (constants.DDM_ATTACH,
+                                                           constants.IDISK_NAME,
+                                                           ),
+                                   errors.ECODE_INVAL)
+      self._CheckAttachDisk(params)
+
+    elif op == constants.DDM_MODIFY:
+      if constants.IDISK_SIZE in params:
+        raise errors.OpPrereqError("Disk size change not possible, use"
+                                   " grow-disk", errors.ECODE_INVAL)
+
+      disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
+
+      # Disk modification supports changing only the disk name and mode.
+      # Changing arbitrary parameters is allowed only for ext disk template",
+      if not utils.AllDiskOfType(disk_info, [constants.DT_EXT]):
+        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
+      else:
+        # We have to check that the 'access' and 'disk_provider' parameters
+        # cannot be modified
+        for param in [constants.IDISK_ACCESS, constants.IDISK_PROVIDER]:
+          if param in params:
+            raise errors.OpPrereqError("Disk '%s' parameter change is"
+                                       " not possible" % param,
+                                       errors.ECODE_INVAL)
+
+      name = params.get(constants.IDISK_NAME, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.IDISK_NAME] = None
+
+  @staticmethod
+  def _VerifyNicModification(op, params):
+    """Verifies a network interface modification.
+
+    """
+    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
+      ip = params.get(constants.INIC_IP, None)
+      name = params.get(constants.INIC_NAME, None)
+      req_net = params.get(constants.INIC_NETWORK, None)
+      link = params.get(constants.NIC_LINK, None)
+      mode = params.get(constants.NIC_MODE, None)
+      if name is not None and name.lower() == constants.VALUE_NONE:
+        params[constants.INIC_NAME] = None
+      if req_net is not None:
+        if req_net.lower() == constants.VALUE_NONE:
+          params[constants.INIC_NETWORK] = None
+          req_net = None
+        elif link is not None or mode is not None:
+          raise errors.OpPrereqError("If network is given"
+                                     " mode or link should not",
+                                     errors.ECODE_INVAL)
+
+      if op == constants.DDM_ADD:
+        macaddr = params.get(constants.INIC_MAC, None)
+        if macaddr is None:
+          params[constants.INIC_MAC] = constants.VALUE_AUTO
+
+      if ip is not None:
+        if ip.lower() == constants.VALUE_NONE:
+          params[constants.INIC_IP] = None
+        else:
+          if ip.lower() == constants.NIC_IP_POOL:
+            if op == constants.DDM_ADD and req_net is None:
+              raise errors.OpPrereqError("If ip=pool, parameter network"
+                                         " cannot be none",
+                                         errors.ECODE_INVAL)
+          else:
+            if not netutils.IPAddress.IsValid(ip):
+              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
+                                         errors.ECODE_INVAL)
+
+      if constants.INIC_MAC in params:
+        macaddr = params[constants.INIC_MAC]
+        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+          macaddr = utils.NormalizeAndValidateMac(macaddr)
+
+        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
+          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
+                                     " modifying an existing NIC",
+                                     errors.ECODE_INVAL)
+
+  def _LookupDiskIndex(self, idx):
+    """Looks up uuid or name of disk if necessary."""
+    try:
+      return int(idx)
+    except ValueError:
+      pass
+    for i, d in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
+      if d.name == idx or d.uuid == idx:
+        return i
+    raise errors.OpPrereqError("Lookup of disk %r failed" % idx)
+
+  def _LookupDiskMods(self):
+    """Looks up uuid or name of disk if necessary."""
+    return [(op, self._LookupDiskIndex(idx), params)
+            for op, idx, params in self.op.disks]
+
+  def CheckArguments(self):
+    if not (self.op.nics or self.op.disks or self.op.disk_template or
+            self.op.hvparams or self.op.beparams or self.op.os_name or
+            self.op.osparams or self.op.offline is not None or
+            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
+            self.op.instance_communication is not None):
+      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
+
+    if self.op.hvparams:
+      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
+                           "hypervisor", "instance", "cluster")
+
+    self.op.disks = self._UpgradeDiskNicMods(
+      "disk", self.op.disks,
+      ht.TSetParamsMods(ht.TIDiskParams))
+    self.op.nics = self._UpgradeDiskNicMods(
+      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
+
+    # Check disk template modifications
+    if self.op.disk_template:
+      if self.op.disks:
+        raise errors.OpPrereqError("Disk template conversion and other disk"
+                                   " changes not supported at the same time",
+                                   errors.ECODE_INVAL)
+
+      # mirrored template node checks
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        if not self.op.remote_node:
+          raise errors.OpPrereqError("Changing the disk template to a mirrored"
+                                     " one requires specifying a secondary"
+                                     " node", errors.ECODE_INVAL)
+      elif self.op.remote_node:
+        self.LogWarning("Changing the disk template to a non-mirrored one,"
+                        " the secondary node will be ignored")
+        # the secondary node must be cleared in order to be ignored, otherwise
+        # the operation will fail, in the GenerateDiskTemplate method
+        self.op.remote_node = None
+
+      # file-based template checks
+      if self.op.disk_template in constants.DTS_FILEBASED:
+        self._FillFileDriver()
+
+    # Check NIC modifications
+    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
+                    self._VerifyNicModification)
+
+    if self.op.pnode:
+      (self.op.pnode_uuid, self.op.pnode) = \
+        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
+
+  def _CheckAttachDisk(self, params):
+    """Check if disk can be attached to an instance.
+
+    Check if the disk and instance have the same template. Also, check if the
+    disk nodes are visible from the instance.
+    """
+    uuid = params.get("uuid", None)
+    name = params.get(constants.IDISK_NAME, None)
+
+    disk = self.GenericGetDiskInfo(uuid, name)
+    instance_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
+    if (disk.dev_type != instance_template and
+        instance_template != constants.DT_DISKLESS):
+      raise errors.OpPrereqError("Instance has '%s' template while disk has"
+                                 " '%s' template" %
+                                 (instance_template, disk.dev_type),
+                                 errors.ECODE_INVAL)
+
+    instance_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
+    # Make sure we do not attach disks to instances on wrong nodes. If the
+    # instance is diskless, that instance is associated only to the primary
+    # node, whereas the disk can be associated to two nodes in the case of DRBD,
+    # hence, we have a subset check here.
+    if disk.nodes and not set(instance_nodes).issubset(set(disk.nodes)):
+      raise errors.OpPrereqError("Disk nodes are %s while the instance's nodes"
+                                 " are %s" %
+                                 (disk.nodes, instance_nodes),
+                                 errors.ECODE_INVAL)
+    # Make sure a DRBD disk has the same primary node as the instance where it
+    # will be attached to.
+    disk_primary = disk.GetPrimaryNode(self.instance.primary_node)
+    if self.instance.primary_node != disk_primary:
+      raise errors.OpExecError("The disks' primary node is %s whereas the "
+                               "instance's primary node is %s."
+                               % (disk_primary, self.instance.primary_node))
+
+  def ExpandNames(self):
+    self._ExpandAndLockInstance()
+    self.needed_locks[locking.LEVEL_NODEGROUP] = []
+    # Can't even acquire node locks in shared mode as upcoming changes in
+    # Ganeti 2.6 will start to modify the node object on disk conversion
+    self.needed_locks[locking.LEVEL_NODE] = []
+    self.needed_locks[locking.LEVEL_NODE_RES] = []
+    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+    # Look node group to look up the ipolicy
+    self.share_locks[locking.LEVEL_NODEGROUP] = 1
+    self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
+    self.dont_collate_locks[locking.LEVEL_NODE] = True
+    self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
+
+  def DeclareLocks(self, level):
+    if level == locking.LEVEL_NODEGROUP:
+      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
+      # Acquire locks for the instance's nodegroups optimistically. Needs
+      # to be verified in CheckPrereq
+      self.needed_locks[locking.LEVEL_NODEGROUP] = \
+        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
+    elif level == locking.LEVEL_NODE:
+      self._LockInstancesNodes()
+      if self.op.disk_template and self.op.remote_node:
+        (self.op.remote_node_uuid, self.op.remote_node) = \
+          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
+                                self.op.remote_node)
+        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
+    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
+      # Copy node locks
+      self.needed_locks[locking.LEVEL_NODE_RES] = \
+        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on the master, primary and secondaries.
+
+    """
+    args = {}
+    if constants.BE_MINMEM in self.be_new:
+      args["minmem"] = self.be_new[constants.BE_MINMEM]
+    if constants.BE_MAXMEM in self.be_new:
+      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
+    if constants.BE_VCPUS in self.be_new:
+      args["vcpus"] = self.be_new[constants.BE_VCPUS]
+    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
+    # information at all.
+
+    if self._new_nics is not None:
+      nics = []
+
+      for nic in self._new_nics:
+        n = copy.deepcopy(nic)
+        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
+        n.nicparams = nicparams
+        nics.append(NICToTuple(self, n))
+
+      args["nics"] = nics
+
+    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
+    if self.op.disk_template:
+      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
+    if self.op.runtime_mem:
+      env["RUNTIME_MEMORY"] = self.op.runtime_mem
+
+    return env
+
+  def BuildHooksNodes(self):
+    """Build hooks nodes.
+
+    """
+    nl = [self.cfg.GetMasterNode()] + \
+        list(self.cfg.GetInstanceNodes(self.instance.uuid))
+    return (nl, nl)
+
+  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
+                              old_params, cluster, pnode_uuid):
+
+    update_params_dict = dict([(key, params[key])
+                               for key in constants.NICS_PARAMETERS
+                               if key in params])
+
+    req_link = update_params_dict.get(constants.NIC_LINK, None)
+    req_mode = update_params_dict.get(constants.NIC_MODE, None)
+
+    new_net_uuid = None
+    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
+    if new_net_uuid_or_name:
+      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
+      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
+
+    if old_net_uuid:
+      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
+
+    if new_net_uuid:
+      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
+      if not netparams:
+        raise errors.OpPrereqError("No netparams found for the network"
+                                   " %s, probably not connected" %
+                                   new_net_obj.name, errors.ECODE_INVAL)
+      new_params = dict(netparams)
+    else:
+      new_params = GetUpdatedParams(old_params, update_params_dict)
+
+    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
+
+    new_filled_params = cluster.SimpleFillNIC(new_params)
+    objects.NIC.CheckParameterSyntax(new_filled_params)
+
+    new_mode = new_filled_params[constants.NIC_MODE]
+    if new_mode == constants.NIC_MODE_BRIDGED:
+      bridge = new_filled_params[constants.NIC_LINK]
+      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
+      if msg:
+        msg = "Error checking bridges on node '%s': %s" % \
+                (self.cfg.GetNodeName(pnode_uuid), msg)
+        if self.op.force:
+          self.warn.append(msg)
+        else:
+          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
+
+    elif new_mode == constants.NIC_MODE_ROUTED:
+      ip = params.get(constants.INIC_IP, old_ip)
+      if ip is None and not new_net_uuid:
+        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
+                                   " on a routed NIC if not attached to a"
+                                   " network", errors.ECODE_INVAL)
+
+    elif new_mode == constants.NIC_MODE_OVS:
+      # TODO: check OVS link
+      self.LogInfo("OVS links are currently not checked for correctness")
+
+    if constants.INIC_MAC in params:
+      mac = params[constants.INIC_MAC]
+      if mac is None:
+        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
+                                   errors.ECODE_INVAL)
+      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+        # otherwise generate the MAC address
+        params[constants.INIC_MAC] = \
+          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
+      else:
+        # or validate/reserve the current one
+        try:
+          self.cfg.ReserveMAC(mac, self.proc.GetECId())
+        except errors.ReservationError:
+          raise errors.OpPrereqError("MAC address '%s' already in use"
+                                     " in cluster" % mac,
+                                     errors.ECODE_NOTUNIQUE)
+    elif new_net_uuid != old_net_uuid:
+
+      def get_net_prefix(net_uuid):
+        mac_prefix = None
+        if net_uuid:
+          nobj = self.cfg.GetNetwork(net_uuid)
+          mac_prefix = nobj.mac_prefix
+
+        return mac_prefix
+
+      new_prefix = get_net_prefix(new_net_uuid)
+      old_prefix = get_net_prefix(old_net_uuid)
+      if old_prefix != new_prefix:
+        params[constants.INIC_MAC] = \
+          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
+
+    # if there is a change in (ip, network) tuple
+    new_ip = params.get(constants.INIC_IP, old_ip)
+    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
+      if new_ip:
+        # if IP is pool then require a network and generate one IP
+        if new_ip.lower() == constants.NIC_IP_POOL:
+          if new_net_uuid:
+            try:
+              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
+            except errors.ReservationError:
+              raise errors.OpPrereqError("Unable to get a free IP"
+                                         " from the address pool",
+                                         errors.ECODE_STATE)
+            self.LogInfo("Chose IP %s from network %s",
+                         new_ip,
+                         new_net_obj.name)
+            params[constants.INIC_IP] = new_ip
+          else:
+            raise errors.OpPrereqError("ip=pool, but no network found",
+                                       errors.ECODE_INVAL)
+        # Reserve new IP if in the new network if any
+        elif new_net_uuid:
+          try:
+            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
+                               check=self.op.conflicts_check)
+            self.LogInfo("Reserving IP %s in network %s",
+                         new_ip, new_net_obj.name)
+          except errors.ReservationError:
+            raise errors.OpPrereqError("IP %s not available in network %s" %
+                                       (new_ip, new_net_obj.name),
+                                       errors.ECODE_NOTUNIQUE)
+        # new network is None so check if new IP is a conflicting IP
+        elif self.op.conflicts_check:
+          CheckForConflictingIp(self, new_ip, pnode_uuid)
+
+      # release old IP if old network is not None
+      if old_ip and old_net_uuid:
+        try:
+          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
+        except errors.AddressPoolError:
+          logging.warning("Release IP %s not contained in network %s",
+                          old_ip, old_net_obj.name)
+
+    # there are no changes in (ip, network) tuple and old network is not None
+    elif (old_net_uuid is not None and
+          (req_link is not None or req_mode is not None)):
+      raise errors.OpPrereqError("Not allowed to change link or mode of"
+                                 " a NIC that is connected to a network",
+                                 errors.ECODE_INVAL)
+
+    private.params = new_params
+    private.filled = new_filled_params
+
+  def _PreCheckDiskTemplate(self, pnode_info):
+    """CheckPrereq checks related to a new disk template."""
+    # Arguments are passed to avoid configuration lookups
+    pnode_uuid = self.instance.primary_node
+
+    # TODO make sure heterogeneous disk types can be converted.
+    disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
+    if disk_template == constants.DT_MIXED:
+      raise errors.OpPrereqError(
+          "Conversion from mixed is not yet supported.")
+
+    inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+    if utils.AnyDiskOfType(inst_disks, constants.DTS_NOT_CONVERTIBLE_FROM):
+      raise errors.OpPrereqError(
+          "Conversion from the '%s' disk template is not supported"
+          % self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
+          errors.ECODE_INVAL)
+
+    elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO:
+      raise errors.OpPrereqError("Conversion to the '%s' disk template is"
+                                 " not supported" % self.op.disk_template,
+                                 errors.ECODE_INVAL)
+
+    if (self.op.disk_template != constants.DT_EXT and
+        utils.AllDiskOfType(inst_disks, [self.op.disk_template])):
+      raise errors.OpPrereqError("Instance already has disk template %s" %
+                                 self.op.disk_template, errors.ECODE_INVAL)
+
+    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
+      enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates)
+      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
+                                 " cluster (enabled templates: %s)" %
+                                 (self.op.disk_template, enabled_dts),
+                                  errors.ECODE_STATE)
+
+    default_vg = self.cfg.GetVGName()
+    if (not default_vg and
+        self.op.disk_template not in constants.DTS_NOT_LVM):
+      raise errors.OpPrereqError("Disk template conversions to lvm-based"
+                                 " instances are not supported by the cluster",
+                                 errors.ECODE_STATE)
+
+    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
+                       msg="cannot change disk template")
+
+    # compute new disks' information
+    self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template,
+                                       default_vg, self.op.ext_params)
+
+    # mirror node verification
+    if self.op.disk_template in constants.DTS_INT_MIRROR:
+      if self.op.remote_node_uuid == pnode_uuid:
+        raise errors.OpPrereqError("Given new secondary node %s is the same"
+                                   " as the primary node of the instance" %
+                                   self.op.remote_node, errors.ECODE_STATE)
+      CheckNodeOnline(self, self.op.remote_node_uuid)
+      CheckNodeNotDrained(self, self.op.remote_node_uuid)
+      CheckNodeVmCapable(self, self.op.remote_node_uuid)
+
+      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
+      snode_group = self.cfg.GetNodeGroup(snode_info.group)
+      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
+                                                              snode_group)
+      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
+                             ignore=self.op.ignore_ipolicy)
+      if pnode_info.group != snode_info.group:
+        self.LogWarning("The primary and secondary nodes are in two"
+                        " different node groups; the disk parameters"
+                        " from the first disk's node group will be"
+                        " used")
+
+    # check that the template is in the primary node group's allowed templates
+    pnode_group = self.cfg.GetNodeGroup(pnode_info.group)
+    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
+                                                            pnode_group)
+    allowed_dts = ipolicy[constants.IPOLICY_DTS]
+    if self.op.disk_template not in allowed_dts:
+      raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed"
+                                 " templates: %s)" % (self.op.disk_template,
+                                 utils.CommaJoin(allowed_dts)),
+                                 errors.ECODE_STATE)
+
+    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+      # Make sure none of the nodes require exclusive storage
+      nodes = [pnode_info]
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        assert snode_info
+        nodes.append(snode_info)
+      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
+      if compat.any(map(has_es, nodes)):
+        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
+                  " storage is enabled" % (
+                      self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
+                      self.op.disk_template))
+        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
+
+    # TODO remove setting the disk template after DiskSetParams exists.
+    # node capacity checks
+    if (self.op.disk_template == constants.DT_PLAIN and
+        utils.AllDiskOfType(inst_disks, [constants.DT_DRBD8])):
+      # we ensure that no capacity checks will be made for conversions from
+      # the 'drbd' to the 'plain' disk template
+      pass
+    elif (self.op.disk_template == constants.DT_DRBD8 and
+          utils.AllDiskOfType(inst_disks, [constants.DT_PLAIN])):
+      # for conversions from the 'plain' to the 'drbd' disk template, check
+      # only the remote node's capacity
+      req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
+      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes)
+    elif self.op.disk_template in constants.DTS_LVM:
+      # rest lvm-based capacity checks
+      node_uuids = [pnode_uuid]
+      if self.op.remote_node_uuid:
+        node_uuids.append(self.op.remote_node_uuid)
+      req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
+      CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
+    elif self.op.disk_template == constants.DT_RBD:
+      # CheckRADOSFreeSpace() is simply a placeholder
+      CheckRADOSFreeSpace()
+    elif self.op.disk_template == constants.DT_EXT:
+      # FIXME: Capacity checks for extstorage template, if exists
+      pass
+    else:
+      # FIXME: Checks about other non lvm-based disk templates
+      pass
+
+  def _PreCheckDisks(self, ispec):
+    """CheckPrereq checks related to disk changes.
+
+    @type ispec: dict
+    @param ispec: instance specs to be updated with the new disks
+
+    """
+    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
+
+    inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
+    excl_stor = compat.any(
+      rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
+      )
+
+    # Get the group access type
+    node_info = self.cfg.GetNodeInfo(self.instance.primary_node)
+    node_group = self.cfg.GetNodeGroup(node_info.group)
+    group_disk_params = self.cfg.GetGroupDiskParams(node_group)
+
+    group_access_types = dict(
+        (dt, group_disk_params[dt].get(
+            constants.RBD_ACCESS, constants.DISK_KERNELSPACE))
+        for dt in constants.DISK_TEMPLATES)
+
+    # Check disk modifications. This is done here and not in CheckArguments
+    # (as with NICs), because we need to know the instance's disk template
+    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor,
+                                                          group_access_types)
+    # Don't enforce param types here in case it's an ext disk added. The check
+    # happens inside _VerifyDiskModification.
+    self._CheckMods("disk", self.op.disks, {}, ver_fn)
+
+    self.diskmod = PrepareContainerMods(self.op.disks, None)
+
+    def _PrepareDiskMod(_, disk, params, __):
+      disk.name = params.get(constants.IDISK_NAME, None)
+
+    # Verify disk changes (operating on a copy)
+    inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+    disks = copy.deepcopy(inst_disks)
+    ApplyContainerMods("disk", disks, None, self.diskmod, None, None,
+                       _PrepareDiskMod, None, None)
+    utils.ValidateDeviceNames("disk", disks)
+    if len(disks) > constants.MAX_DISKS:
+      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
+                                 " more" % constants.MAX_DISKS,
+                                 errors.ECODE_STATE)
+    disk_sizes = [disk.size for disk in inst_disks]
+    disk_sizes.extend(params["size"] for (op, idx, params, private) in
+                      self.diskmod if op == constants.DDM_ADD)
+    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
+    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
+
+    # either --online or --offline was passed
+    if self.op.offline is not None:
+      if self.op.offline:
+        msg = "can't change to offline without being down first"
+      else:
+        msg = "can't change to online (down) without being offline first"
+      CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,
+                         msg=msg)
+
+  @staticmethod
+  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
+    """Create a NIC mod that adds or removes the instance
+    communication NIC to a running instance.
+
+    The NICS are dynamically created using the Dynamic Device
+    Modification (DDM).  This function produces a NIC modification
+    (mod) that inserts an additional NIC meant for instance
+    communication in or removes an existing instance communication NIC
+    from a running instance, using DDM.
+
+    @type cfg: L{config.ConfigWriter}
+    @param cfg: cluster configuration
+
+    @type instance_communication: boolean
+    @param instance_communication: whether instance communication is
+                                   enabled or disabled
+
+    @type instance: L{objects.Instance}
+    @param instance: instance to which the NIC mod will be applied to
+
+    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
+            (L{constants.DDM_REMOVE}, -1, parameters) or
+            L{None}
+    @return: DDM mod containing an action to add or remove the NIC, or
+             None if nothing needs to be done
+
+    """
+    nic_name = ComputeInstanceCommunicationNIC(instance.name)
+
+    instance_communication_nic = None
+
+    for nic in instance.nics:
+      if nic.name == nic_name:
+        instance_communication_nic = nic
+        break
+
+    if instance_communication and not instance_communication_nic:
+      action = constants.DDM_ADD
+      params = {constants.INIC_NAME: nic_name,
+                constants.INIC_MAC: constants.VALUE_GENERATE,
+                constants.INIC_IP: constants.NIC_IP_POOL,
+                constants.INIC_NETWORK:
+                  cfg.GetInstanceCommunicationNetwork()}
+    elif not instance_communication and instance_communication_nic:
+      action = constants.DDM_REMOVE
+      params = None
+    else:
+      action = None
+      params = None
+
+    if action is not None:
+      return (action, -1, params)
+    else:
+      return None
+
+  def _GetInstanceInfo(self, cluster_hvparams):
+    pnode_uuid = self.instance.primary_node
+    instance_info = self.rpc.call_instance_info(
+        pnode_uuid, self.instance.name, self.instance.hypervisor,
+        cluster_hvparams)
+    return instance_info
+
+  def _CheckHotplug(self):
+    if self.op.hotplug or self.op.hotplug_if_possible:
+      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
+                                               self.instance)
+      if result.fail_msg:
+        if self.op.hotplug:
+          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
+                       prereq=True, ecode=errors.ECODE_STATE)
+        else:
+          self.LogWarning(result.fail_msg)
+          self.op.hotplug = False
+          self.LogInfo("Modification will take place without hotplugging.")
+      else:
+        self.op.hotplug = True
+
+  def _PrepareNicCommunication(self):
+    # add or remove NIC for instance communication
+    if self.op.instance_communication is not None:
+      mod = self._InstanceCommunicationDDM(self.cfg,
+                                           self.op.instance_communication,
+                                           self.instance)
+      if mod is not None:
+        self.op.nics.append(mod)
+
+    self.nicmod = PrepareContainerMods(self.op.nics, InstNicModPrivate)
+
+  def _ProcessHVParams(self, node_uuids):
+    if self.op.hvparams:
+      hv_type = self.instance.hypervisor
+      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
+      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
+      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
+
+      # local check
+      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
+      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
+      self.hv_proposed = self.hv_new = hv_new # the new actual values
+      self.hv_inst = i_hvdict # the new dict (without defaults)
+    else:
+      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
+                                                   self.instance.os,
+                                                   self.instance.hvparams)
+      self.hv_new = self.hv_inst = {}
+
+  def _ProcessBeParams(self):
+    if self.op.beparams:
+      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
+                                  use_none=True)
+      objects.UpgradeBeParams(i_bedict)
+      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
+      be_new = self.cluster.SimpleFillBE(i_bedict)
+      self.be_proposed = self.be_new = be_new # the new actual values
+      self.be_inst = i_bedict # the new dict (without defaults)
+    else:
+      self.be_new = self.be_inst = {}
+      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
+    return self.cluster.FillBE(self.instance)
+
+  def _ValidateCpuParams(self):
+    # CPU param validation -- checking every time a parameter is
+    # changed to cover all cases where either CPU mask or vcpus have
+    # changed
+    if (constants.BE_VCPUS in self.be_proposed and
+        constants.HV_CPU_MASK in self.hv_proposed):
+      cpu_list = \
+        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
+      # Verify mask is consistent with number of vCPUs. Can skip this
+      # test if only 1 entry in the CPU mask, which means same mask
+      # is applied to all vCPUs.
+      if (len(cpu_list) > 1 and
+          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
+        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
+                                   " CPU mask [%s]" %
+                                   (self.be_proposed[constants.BE_VCPUS],
+                                    self.hv_proposed[constants.HV_CPU_MASK]),
+                                   errors.ECODE_INVAL)
+
+      # Only perform this test if a new CPU mask is given
+      if constants.HV_CPU_MASK in self.hv_new and cpu_list:
+        # Calculate the largest CPU number requested
+        max_requested_cpu = max(map(max, cpu_list))
+        # Check that all of the instance's nodes have enough physical CPUs to
+        # satisfy the requested CPU mask
+        hvspecs = [(self.instance.hypervisor,
+                    self.cfg.GetClusterInfo()
+                      .hvparams[self.instance.hypervisor])]
+        CheckNodesPhysicalCPUs(self,
+                               self.cfg.GetInstanceNodes(self.instance.uuid),
+                               max_requested_cpu + 1,
+                               hvspecs)
+
+  def _ProcessOsParams(self, node_uuids):
+    # osparams processing
+    instance_os = (self.op.os_name
+                   if self.op.os_name and not self.op.force
+                   else self.instance.os)
+
+    if self.op.osparams or self.op.osparams_private:
+      public_parms = self.op.osparams or {}
+      private_parms = self.op.osparams_private or {}
+      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
+
+      if dupe_keys:
+        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
+                                   utils.CommaJoin(dupe_keys))
+
+      self.os_inst = GetUpdatedParams(self.instance.osparams,
+                                      public_parms)
+      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
+                                              private_parms)
+
+      CheckOSParams(self, True, node_uuids, instance_os,
+                    objects.FillDict(self.os_inst,
+                                     self.os_inst_private),
+                    self.op.force_variant)
+
+    else:
+      self.os_inst = {}
+      self.os_inst_private = {}
+
+  def _ProcessMem(self, cluster_hvparams, be_old, pnode_uuid):
+    #TODO(dynmem): do the appropriate check involving MINMEM
+    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
+        self.be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
+      mem_check_list = [pnode_uuid]
+      if self.be_new[constants.BE_AUTO_BALANCE]:
+        # either we changed auto_balance to yes or it was from before
+        mem_check_list.extend(
+          self.cfg.GetInstanceSecondaryNodes(self.instance.uuid))
+      instance_info = self._GetInstanceInfo(cluster_hvparams)
+      hvspecs = [(self.instance.hypervisor,
+                  cluster_hvparams)]
+      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
+                                         hvspecs)
+      pninfo = nodeinfo[pnode_uuid]
+      msg = pninfo.fail_msg
+      if msg:
+        # Assume the primary node is unreachable and go ahead
+        self.warn.append("Can't get info from primary node %s: %s" %
+                         (self.cfg.GetNodeName(pnode_uuid), msg))
+      else:
+        (_, _, (pnhvinfo, )) = pninfo.payload
+        if not isinstance(pnhvinfo.get("memory_free", None), int):
+          self.warn.append("Node data from primary node %s doesn't contain"
+                           " free memory information" %
+                           self.cfg.GetNodeName(pnode_uuid))
+        elif instance_info.fail_msg:
+          self.warn.append("Can't get instance runtime information: %s" %
+                           instance_info.fail_msg)
+        else:
+          if instance_info.payload:
+            current_mem = int(instance_info.payload["memory"])
+          else:
+            # Assume instance not running
+            # (there is a slight race condition here, but it's not very
+            # probable, and we have no other way to check)
+            # TODO: Describe race condition
+            current_mem = 0
+          #TODO(dynmem): do the appropriate check involving MINMEM
+          miss_mem = (self.be_new[constants.BE_MAXMEM] - current_mem -
+                      pnhvinfo["memory_free"])
+          if miss_mem > 0:
+            raise errors.OpPrereqError("This change will prevent the instance"
+                                       " from starting, due to %d MB of memory"
+                                       " missing on its primary node" %
+                                       miss_mem, errors.ECODE_NORES)
+
+      if self.be_new[constants.BE_AUTO_BALANCE]:
+        secondary_nodes = \
+          self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
+        for node_uuid, nres in nodeinfo.items():
+          if node_uuid not in secondary_nodes:
+            continue
+          nres.Raise("Can't get info from secondary node %s" %
+                     self.cfg.GetNodeName(node_uuid), prereq=True,
+                     ecode=errors.ECODE_STATE)
+          (_, _, (nhvinfo, )) = nres.payload
+          if not isinstance(nhvinfo.get("memory_free", None), int):
+            raise errors.OpPrereqError("Secondary node %s didn't return free"
+                                       " memory information" %
+                                       self.cfg.GetNodeName(node_uuid),
+                                       errors.ECODE_STATE)
+          #TODO(dynmem): do the appropriate check involving MINMEM
+          elif self.be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
+            raise errors.OpPrereqError("This change will prevent the instance"
+                                       " from failover to its secondary node"
+                                       " %s, due to not enough memory" %
+                                       self.cfg.GetNodeName(node_uuid),
+                                       errors.ECODE_STATE)
+
+    if self.op.runtime_mem:
+      remote_info = self.rpc.call_instance_info(
+         self.instance.primary_node, self.instance.name,
+         self.instance.hypervisor,
+         cluster_hvparams)
+      remote_info.Raise("Error checking node %s" %
+                        self.cfg.GetNodeName(self.instance.primary_node),
+                        prereq=True)
+      if not remote_info.payload: # not running already
+        raise errors.OpPrereqError("Instance %s is not running" %
+                                   self.instance.name, errors.ECODE_STATE)
+
+      current_memory = remote_info.payload["memory"]
+      if (not self.op.force and
+           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
+            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
+        raise errors.OpPrereqError("Instance %s must have memory between %d"
+                                   " and %d MB of memory unless --force is"
+                                   " given" %
+                                   (self.instance.name,
+                                    self.be_proposed[constants.BE_MINMEM],
+                                    self.be_proposed[constants.BE_MAXMEM]),
+                                   errors.ECODE_INVAL)
+
+      delta = self.op.runtime_mem - current_memory
+      if delta > 0:
+        CheckNodeFreeMemory(
+            self, self.instance.primary_node,
+            "ballooning memory for instance %s" % self.instance.name, delta,
+            self.instance.hypervisor,
+            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This only checks the instance list against the existing names.
+
+    """
+    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
+    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
+    self.cluster = self.cfg.GetClusterInfo()
+    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
+
+    self.op.disks = self._LookupDiskMods()
+
+    assert self.instance is not None, \
+      "Cannot retrieve locked instance %s" % self.op.instance_name
+
+    self.warn = []
+
+    if (self.op.pnode_uuid is not None and
+        self.op.pnode_uuid != self.instance.primary_node and
+        not self.op.force):
+      instance_info = self._GetInstanceInfo(cluster_hvparams)
+
+      if instance_info.fail_msg:
+        self.warn.append("Can't get instance runtime information: %s" %
+                         instance_info.fail_msg)
+      elif instance_info.payload:
+        raise errors.OpPrereqError(
+            "Instance is still running on %s" %
+            self.cfg.GetNodeName(self.instance.primary_node),
+            errors.ECODE_STATE)
+    pnode_uuid = self.instance.primary_node
+    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
+
+    node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
+    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
+
+    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
+    group_info = self.cfg.GetNodeGroup(pnode_info.group)
+
+    # dictionary with instance information after the modification
+    ispec = {}
+
+    self._CheckHotplug()
+
+    self._PrepareNicCommunication()
+
+    # disks processing
+    assert not (self.op.disk_template and self.op.disks), \
+      "Can't modify disk template and apply disk changes at the same time"
+
+    if self.op.disk_template:
+      self._PreCheckDiskTemplate(pnode_info)
+
+    self._PreCheckDisks(ispec)
+
+    self._ProcessHVParams(node_uuids)
+    be_old = self._ProcessBeParams()
+
+    self._ValidateCpuParams()
+    self._ProcessOsParams(node_uuids)
+    self._ProcessMem(cluster_hvparams, be_old, pnode_uuid)
+
+    # make self.cluster visible in the functions below
+    cluster = self.cluster
+
+    def _PrepareNicCreate(_, params, private):
+      self._PrepareNicModification(params, private, None, None,
+                                   {}, cluster, pnode_uuid)
+      return (None, None)
+
+    def _PrepareNicAttach(_, __, ___):
+      raise errors.OpPrereqError("Attach operation is not supported for NICs",
+                                 errors.ECODE_INVAL)
+
+    def _PrepareNicMod(_, nic, params, private):
+      self._PrepareNicModification(params, private, nic.ip, nic.network,
+                                   nic.nicparams, cluster, pnode_uuid)
+      return None
+
+    def _PrepareNicRemove(_, params, __):
+      ip = params.ip
+      net = params.network
+      if net is not None and ip is not None:
+        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
+
+    def _PrepareNicDetach(_, __, ___):
+      raise errors.OpPrereqError("Detach operation is not supported for NICs",
+                                 errors.ECODE_INVAL)
+
+    # Verify NIC changes (operating on copy)
+    nics = [nic.Copy() for nic in self.instance.nics]
+    ApplyContainerMods("NIC", nics, None, self.nicmod, _PrepareNicCreate,
+                       _PrepareNicAttach, _PrepareNicMod, _PrepareNicRemove,
+                       _PrepareNicDetach)
+    if len(nics) > constants.MAX_NICS:
+      raise errors.OpPrereqError("Instance has too many network interfaces"
+                                 " (%d), cannot add more" % constants.MAX_NICS,
+                                 errors.ECODE_STATE)
+
+    # Pre-compute NIC changes (necessary to use result in hooks)
+    self._nic_chgdesc = []
+    if self.nicmod:
+      # Operate on copies as this is still in prereq
+      nics = [nic.Copy() for nic in self.instance.nics]
+      ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
+                         self._CreateNewNic, None, self._ApplyNicMods,
+                         self._RemoveNic, None)
+      # Verify that NIC names are unique and valid
+      utils.ValidateDeviceNames("NIC", nics)
+      self._new_nics = nics
+      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
+    else:
+      self._new_nics = None
+      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
+
+    if not self.op.ignore_ipolicy:
+      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
+                                                              group_info)
+
+      # Fill ispec with backend parameters
+      ispec[constants.ISPEC_SPINDLE_USE] = \
+        self.be_new.get(constants.BE_SPINDLE_USE, None)
+      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
+                                                         None)
+
+      # Copy ispec to verify parameters with min/max values separately
+      if self.op.disk_template:
+        count = ispec[constants.ISPEC_DISK_COUNT]
+        new_disk_types = [self.op.disk_template] * count
+      else:
+        old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+        add_disk_count = ispec[constants.ISPEC_DISK_COUNT] - len(old_disks)
+        dev_type = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
+        if dev_type == constants.DT_DISKLESS and add_disk_count != 0:
+          raise errors.ProgrammerError(
+              "Conversion from diskless instance not possible and should have"
+              " been caught")
+
+        new_disk_types = ([d.dev_type for d in old_disks] +
+                          [dev_type] * add_disk_count)
+      ispec_max = ispec.copy()
+      ispec_max[constants.ISPEC_MEM_SIZE] = \
+        self.be_new.get(constants.BE_MAXMEM, None)
+      res_max = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
+                                                    new_disk_types)
+      ispec_min = ispec.copy()
+      ispec_min[constants.ISPEC_MEM_SIZE] = \
+        self.be_new.get(constants.BE_MINMEM, None)
+      res_min = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
+                                                    new_disk_types)
+
+      if (res_max or res_min):
+        # FIXME: Improve error message by including information about whether
+        # the upper or lower limit of the parameter fails the ipolicy.
+        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
+               (group_info, group_info.name,
+                utils.CommaJoin(set(res_max + res_min))))
+        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+
+  def _ConvertInstanceDisks(self, feedback_fn):
+    """Converts the disks of an instance to another type.
+
+    This function converts the disks of an instance. It supports
+    conversions among all the available disk types except conversions
+    between the LVM-based disk types, that use their separate code path.
+    Also, this method does not support conversions that include the 'diskless'
+    template and those targeting the 'blockdev' template.
+
+    @type feedback_fn: callable
+    @param feedback_fn: function used to send feedback back to the caller
+
+    @rtype: NoneType
+    @return: None
+    @raise errors.OpPrereqError: in case of failure
+
+    """
+    template_info = self.op.disk_template
+    if self.op.disk_template == constants.DT_EXT:
+      template_info = ":".join([self.op.disk_template,
+                                self.op.ext_params["provider"]])
+
+    old_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
+    feedback_fn("Converting disk template from '%s' to '%s'" %
+                (old_template, template_info))
+
+    assert not (old_template in constants.DTS_NOT_CONVERTIBLE_FROM or
+                self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \
+      ("Unsupported disk template conversion from '%s' to '%s'" %
+       (old_template, self.op.disk_template))
+
+    pnode_uuid = self.instance.primary_node
+    snode_uuid = []
+    if self.op.remote_node_uuid:
+      snode_uuid = [self.op.remote_node_uuid]
+
+    old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+
+    feedback_fn("Generating new '%s' disk template..." % template_info)
+    file_storage_dir = CalculateFileStorageDir(
+        self.op.disk_template, self.cfg, self.instance.name,
+        file_storage_dir=self.op.file_storage_dir)
+    new_disks = GenerateDiskTemplate(self,
+                                     self.op.disk_template,
+                                     self.instance.uuid,
+                                     pnode_uuid,
+                                     snode_uuid,
+                                     self.disks_info,
+                                     file_storage_dir,
+                                     self.op.file_driver,
+                                     0,
+                                     feedback_fn,
+                                     self.diskparams)
+
+    # Create the new block devices for the instance.
+    feedback_fn("Creating new empty disks of type '%s'..." % template_info)
+    try:
+      CreateDisks(self, self.instance, disk_template=self.op.disk_template,
+                  disks=new_disks)
+    except errors.OpExecError:
+      self.LogWarning("Device creation failed")
+      for disk in new_disks:
+        self.cfg.ReleaseDRBDMinors(disk.uuid)
+      raise
+
+    # Transfer the data from the old to the newly created disks of the instance.
+    feedback_fn("Populating the new empty disks of type '%s'..." %
+                template_info)
+    for idx, (old, new) in enumerate(zip(old_disks, new_disks)):
+      feedback_fn(" - copying data from disk %s (%s), size %s" %
+                  (idx, old.dev_type,
+                   utils.FormatUnit(new.size, "h")))
+      if old.dev_type == constants.DT_DRBD8:
+        old = old.children[0]
+      result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance),
+                                              (new, self.instance))
+      msg = result.fail_msg
+      if msg:
+        # A disk failed to copy. Abort the conversion operation and rollback
+        # the modifications to the previous state. The instance will remain
+        # intact.
+        if self.op.disk_template == constants.DT_DRBD8:
+          new = new.children[0]
+        self.Log(" - ERROR: Could not copy disk '%s' to '%s'" %
+                 (old.logical_id[1], new.logical_id[1]))
+        try:
+          self.LogInfo("Some disks failed to copy")
+          self.LogInfo("The instance will not be affected, aborting operation")
+          self.LogInfo("Removing newly created disks of type '%s'..." %
+                       template_info)
+          RemoveDisks(self, self.instance, disks=new_disks)
+          self.LogInfo("Newly created disks removed successfully")
+        finally:
+          for disk in new_disks:
+            self.cfg.ReleaseDRBDMinors(disk.uuid)
+          result.Raise("Error while converting the instance's template")
+
+    # In case of DRBD disk, return its port to the pool
+    for disk in old_disks:
+      if disk.dev_type == constants.DT_DRBD8:
+        tcp_port = disk.logical_id[2]
+        self.cfg.AddTcpUdpPort(tcp_port)
+
+    # Remove old disks from the instance.
+    feedback_fn("Detaching old disks (%s) from the instance and removing"
+                " them from cluster config" % old_template)
+    for old_disk in old_disks:
+      self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
+
+    # Attach the new disks to the instance.
+    feedback_fn("Adding new disks (%s) to cluster config and attaching"
+                " them to the instance" % template_info)
+    for (idx, new_disk) in enumerate(new_disks):
+      self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
+
+    # Re-read the instance from the configuration.
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
+    # Release node locks while waiting for sync and disks removal.
+    ReleaseLocks(self, locking.LEVEL_NODE)
+
+    disk_abort = not WaitForSync(self, self.instance,
+                                 oneshot=not self.op.wait_for_sync)
+    if disk_abort:
+      raise errors.OpExecError("There are some degraded disks for"
+                               " this instance, please cleanup manually")
+
+    feedback_fn("Removing old block devices of type '%s'..." % old_template)
+    RemoveDisks(self, self.instance, disks=old_disks)
+
+    # Node resource locks will be released by the caller.
+
+  def _ConvertPlainToDrbd(self, feedback_fn):
+    """Converts an instance from plain to drbd.
+
+    """
+    feedback_fn("Converting disk template from 'plain' to 'drbd'")
+
+    pnode_uuid = self.instance.primary_node
+    snode_uuid = self.op.remote_node_uuid
+    old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+
+    assert utils.AnyDiskOfType(old_disks, [constants.DT_PLAIN])
+
+    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
+                                     self.instance.uuid, pnode_uuid,
+                                     [snode_uuid], self.disks_info,
+                                     None, None, 0,
+                                     feedback_fn, self.diskparams)
+    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
+    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
+    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
+    info = GetInstanceInfoText(self.instance)
+    feedback_fn("Creating additional volumes...")
+    # first, create the missing data and meta devices
+    for disk in anno_disks:
+      # unfortunately this is... not too nice
+      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
+                           info, True, p_excl_stor)
+      for child in disk.children:
+        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
+                             s_excl_stor)
+    # at this stage, all new LVs have been created, we can rename the
+    # old ones
+    feedback_fn("Renaming original volumes...")
+    rename_list = [(o, n.children[0].logical_id)
+                   for (o, n) in zip(old_disks, new_disks)]
+    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
+    result.Raise("Failed to rename original LVs")
+
+    feedback_fn("Initializing DRBD devices...")
+    # all child devices are in place, we can now create the DRBD devices
+    try:
+      for disk in anno_disks:
+        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
+                                       (snode_uuid, s_excl_stor)]:
+          f_create = node_uuid == pnode_uuid
+          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
+                               f_create, excl_stor)
+    except errors.GenericError, e:
+      feedback_fn("Initializing of DRBD devices failed;"
+                  " renaming back original volumes...")
+      rename_back_list = [(n.children[0], o.logical_id)
+                          for (n, o) in zip(new_disks, old_disks)]
+      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
+      result.Raise("Failed to rename LVs back after error %s" % str(e))
+      raise
+
+    # Remove the old disks from the instance
+    for old_disk in old_disks:
+      self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
+
+    # Attach the new disks to the instance
+    for (idx, new_disk) in enumerate(new_disks):
+      self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
+    # Release node locks while waiting for sync
+    ReleaseLocks(self, locking.LEVEL_NODE)
+
+    # disks are created, waiting for sync
+    disk_abort = not WaitForSync(self, self.instance,
+                                 oneshot=not self.op.wait_for_sync)
+    if disk_abort:
+      raise errors.OpExecError("There are some degraded disks for"
+                               " this instance, please cleanup manually")
+
+    # Node resource locks will be released by caller
+
+  def _ConvertDrbdToPlain(self, feedback_fn):
+    """Converts an instance from drbd to plain.
+
+    """
+    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+    assert len(secondary_nodes) == 1
+    assert utils.AnyDiskOfType(disks, [constants.DT_DRBD8])
+
+    feedback_fn("Converting disk template from 'drbd' to 'plain'")
+
+    old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
+    new_disks = [d.children[0] for d in disks]
+
+    # copy over size, mode and name and set the correct nodes
+    for parent, child in zip(old_disks, new_disks):
+      child.size = parent.size
+      child.mode = parent.mode
+      child.name = parent.name
+      child.nodes = [self.instance.primary_node]
+
+    # this is a DRBD disk, return its port to the pool
+    for disk in old_disks:
+      tcp_port = disk.logical_id[2]
+      self.cfg.AddTcpUdpPort(tcp_port)
+
+    # Remove the old disks from the instance
+    for old_disk in old_disks:
+      self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
+
+    # Attach the new disks to the instance
+    for (idx, new_disk) in enumerate(new_disks):
+      self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
+    # Release locks in case removing disks takes a while
+    ReleaseLocks(self, locking.LEVEL_NODE)
+
+    feedback_fn("Removing volumes on the secondary node...")
+    RemoveDisks(self, self.instance, disks=old_disks,
+                target_node_uuid=secondary_nodes[0])
+
+    feedback_fn("Removing unneeded volumes on the primary node...")
+    meta_disks = []
+    for idx, disk in enumerate(old_disks):
+      meta_disks.append(disk.children[1])
+    RemoveDisks(self, self.instance, disks=meta_disks)
+
+  def _HotplugDevice(self, action, dev_type, device, extra, seq):
+    self.LogInfo("Trying to hotplug device...")
+    msg = "hotplug:"
+    result = self.rpc.call_hotplug_device(self.instance.primary_node,
+                                          self.instance, action, dev_type,
+                                          (device, self.instance),
+                                          extra, seq)
+    if result.fail_msg:
+      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
+      self.LogInfo("Continuing execution..")
+      msg += "failed"
+    else:
+      self.LogInfo("Hotplug done.")
+      msg += "done"
+    return msg
+
+  def _FillFileDriver(self):
+    if not self.op.file_driver:
+      self.op.file_driver = constants.FD_DEFAULT
+    elif self.op.file_driver not in constants.FILE_DRIVER:
+      raise errors.OpPrereqError("Invalid file driver name '%s'" %
+                                 self.op.file_driver, errors.ECODE_INVAL)
+
+  def _GenerateDiskTemplateWrapper(self, idx, disk_type, params):
+    file_path = CalculateFileStorageDir(
+        disk_type, self.cfg, self.instance.name,
+        file_storage_dir=self.op.file_storage_dir)
+
+    self._FillFileDriver()
+
+    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
+    return \
+      GenerateDiskTemplate(self, disk_type, self.instance.uuid,
+                           self.instance.primary_node, secondary_nodes,
+                           [params], file_path, self.op.file_driver, idx,
+                           self.Log, self.diskparams)[0]
+
+  def _CreateNewDisk(self, idx, params, _):
+    """Creates a new disk.
+
+    """
+    # add a new disk
+    disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
+    disk = self._GenerateDiskTemplateWrapper(idx, disk_template,
+                                             params)
+    new_disks = CreateDisks(self, self.instance, disks=[disk])
+    self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
+    if self.cluster.prealloc_wipe_disks:
+      # Wipe new disk
+      WipeOrCleanupDisks(self, self.instance,
+                         disks=[(idx, disk, 0)],
+                         cleanup=new_disks)
+
+    changes = [
+      ("disk/%d" % idx,
+       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
+      ]
+    if self.op.hotplug:
+      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
+                                               (disk, self.instance),
+                                               self.instance, True, idx)
+      if result.fail_msg:
+        changes.append(("disk/%d" % idx, "assemble:failed"))
+        self.LogWarning("Can't assemble newly created disk %d: %s",
+                        idx, result.fail_msg)
+      else:
+        _, link_name, uri = result.payload
+        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
+                                  constants.HOTPLUG_TARGET_DISK,
+                                  disk, (link_name, uri), idx)
+        changes.append(("disk/%d" % idx, msg))
+
+    return (disk, changes)
+
+  def _PostAddDisk(self, _, disk):
+    if not WaitForSync(self, self.instance, disks=[disk],
+                       oneshot=not self.op.wait_for_sync):
+      raise errors.OpExecError("Failed to sync disks of %s" %
+                               self.instance.name)
+
+    # the disk is active at this point, so deactivate it if the instance disks
+    # are supposed to be inactive
+    if not self.instance.disks_active:
+      ShutdownInstanceDisks(self, self.instance, disks=[disk])
+
+  def _AttachDisk(self, idx, params, _):
+    """Attaches an existing disk to an instance.
+
+    """
+    uuid = params.get("uuid", None)
+    name = params.get(constants.IDISK_NAME, None)
+
+    disk = self.GenericGetDiskInfo(uuid, name)
+
+    # Rename disk before attaching (if disk is filebased)
+    if disk.dev_type in (constants.DTS_INSTANCE_DEPENDENT_PATH):
+      # Add disk size/mode, else GenerateDiskTemplate will not work.
+      params[constants.IDISK_SIZE] = disk.size
+      params[constants.IDISK_MODE] = str(disk.mode)
+      dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)
+      new_logical_id = dummy_disk.logical_id
+      result = self.rpc.call_blockdev_rename(self.instance.primary_node,
+                                             [(disk, new_logical_id)])
+      result.Raise("Failed before attach")
+      self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)
+      disk.logical_id = new_logical_id
+
+    # Attach disk to instance
+    self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
+    changes = [
+      ("disk/%d" % idx,
+       "attach:size=%s,mode=%s" % (disk.size, disk.mode)),
+      ]
+
+    disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,
+                                                  disks=[disk])
+    if not disks_ok:
+      changes.append(("disk/%d" % idx, "assemble:failed"))
+      return disk, changes
+
+    if self.op.hotplug:
+      _, link_name, uri = payloads[0]
+      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
+                                constants.HOTPLUG_TARGET_DISK,
+                                disk, (link_name, uri), idx)
+      changes.append(("disk/%d" % idx, msg))
+
+    return (disk, changes)
+
+  def _ModifyDisk(self, idx, disk, params, _):
+    """Modifies a disk.
+
+    """
+    changes = []
+    if constants.IDISK_MODE in params:
+      disk.mode = params.get(constants.IDISK_MODE)
+      changes.append(("disk.mode/%d" % idx, disk.mode))
+
+    if constants.IDISK_NAME in params:
+      disk.name = params.get(constants.IDISK_NAME)
+      changes.append(("disk.name/%d" % idx, disk.name))
+
+    # Modify arbitrary params in case instance template is ext
+
+    for key, value in params.iteritems():
+      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
+          disk.dev_type == constants.DT_EXT):
+        # stolen from GetUpdatedParams: default means reset/delete
+        if value.lower() == constants.VALUE_DEFAULT:
+          try:
+            del disk.params[key]
+          except KeyError:
+            pass
+        else:
+          disk.params[key] = value
+        changes.append(("disk.params:%s/%d" % (key, idx), value))
+
+    # Update disk object
+    self.cfg.Update(disk, self.feedback_fn)
+
+    return changes
+
+  def _RemoveDisk(self, idx, root, _):
+    """Removes a disk.
+
+    """
+    hotmsg = ""
+    if self.op.hotplug:
+      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
+                                   constants.HOTPLUG_TARGET_DISK,
+                                   root, None, idx)
+      ShutdownInstanceDisks(self, self.instance, [root])
+
+    RemoveDisks(self, self.instance, disks=[root])
+
+    # if this is a DRBD disk, return its port to the pool
+    if root.dev_type in constants.DTS_DRBD:
+      self.cfg.AddTcpUdpPort(root.logical_id[2])
+
+    # Remove disk from config
+    self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
+    return hotmsg
+
+  def _DetachDisk(self, idx, root, _):
+    """Detaches a disk from an instance.
+
+    """
+    hotmsg = ""
+    if self.op.hotplug:
+      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
+                                   constants.HOTPLUG_TARGET_DISK,
+                                   root, None, idx)
+
+    # Always shutdown the disk before detaching.
+    ShutdownInstanceDisks(self, self.instance, [root])
+
+    # Rename detached disk.
+    #
+    # Transform logical_id from:
+    #   <file_storage_dir>/<instance_name>/<disk_name>
+    # to
+    #   <file_storage_dir>/<disk_name>
+    if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):
+      file_driver = root.logical_id[0]
+      instance_path, disk_name = os.path.split(root.logical_id[1])
+      new_path = os.path.join(os.path.dirname(instance_path), disk_name)
+      new_logical_id = (file_driver, new_path)
+      result = self.rpc.call_blockdev_rename(self.instance.primary_node,
+                                             [(root, new_logical_id)])
+      result.Raise("Failed before detach")
+      # Update logical_id
+      self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)
+
+    # Remove disk from config
+    self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)
+
+    # re-read the instance from the configuration
+    self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
+
+    return hotmsg
+
+  def _CreateNewNic(self, idx, params, private):
+    """Creates data structure for a new network interface.
+
+    """
+    mac = params[constants.INIC_MAC]
+    ip = params.get(constants.INIC_IP, None)
+    net = params.get(constants.INIC_NETWORK, None)
+    name = params.get(constants.INIC_NAME, None)
+    net_uuid = self.cfg.LookupNetwork(net)
+    #TODO: not private.filled?? can a nic have no nicparams??
+    nicparams = private.filled
+    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
+                       nicparams=nicparams)
+    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
+
+    changes = [
+      ("nic.%d" % idx,
+       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
+       (mac, ip, private.filled[constants.NIC_MODE],
+       private.filled[constants.NIC_LINK], net)),
+      ]
+
+    if self.op.hotplug:
+      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
+                                constants.HOTPLUG_TARGET_NIC,
+                                nobj, None, idx)
+      changes.append(("nic.%d" % idx, msg))
+
+    return (nobj, changes)
+
+  def _ApplyNicMods(self, idx, nic, params, private):
+    """Modifies a network interface.
+
+    """
+    changes = []
+
+    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
+      if key in params:
+        changes.append(("nic.%s/%d" % (key, idx), params[key]))
+        setattr(nic, key, params[key])
+
+    new_net = params.get(constants.INIC_NETWORK, nic.network)
+    new_net_uuid = self.cfg.LookupNetwork(new_net)
+    if new_net_uuid != nic.network:
+      changes.append(("nic.network/%d" % idx, new_net))
+      nic.network = new_net_uuid
+
+    if private.filled:
+      nic.nicparams = private.filled
+
+      for (key, val) in nic.nicparams.items():
+        changes.append(("nic.%s/%d" % (key, idx), val))
+
+    if self.op.hotplug:
+      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
+                                constants.HOTPLUG_TARGET_NIC,
+                                nic, None, idx)
+      changes.append(("nic/%d" % idx, msg))
+
+    return changes
+
+  def _RemoveNic(self, idx, nic, _):
+    if self.op.hotplug:
+      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
+                                 constants.HOTPLUG_TARGET_NIC,
+                                 nic, None, idx)
+
+  def Exec(self, feedback_fn):
+    """Modifies an instance.
+
+    All parameters take effect only at the next restart of the instance.
+
+    """
+    self.feedback_fn = feedback_fn
+    # Process here the warnings from CheckPrereq, as we don't have a
+    # feedback_fn there.
+    # TODO: Replace with self.LogWarning
+    for warn in self.warn:
+      feedback_fn("WARNING: %s" % warn)
+
+    assert ((self.op.disk_template is None) ^
+            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
+      "Not owning any node resource locks"
+
+    result = []
+
+    # New primary node
+    if self.op.pnode_uuid:
+      self.instance.primary_node = self.op.pnode_uuid
+
+    # runtime memory
+    if self.op.runtime_mem:
+      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
+                                                     self.instance,
+                                                     self.op.runtime_mem)
+      rpcres.Raise("Cannot modify instance runtime memory")
+      result.append(("runtime_memory", self.op.runtime_mem))
+
+    # Apply disk changes
+    inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+    ApplyContainerMods("disk", inst_disks, result, self.diskmod,
+                       self._CreateNewDisk, self._AttachDisk, self._ModifyDisk,
+                       self._RemoveDisk, self._DetachDisk,
+                       post_add_fn=self._PostAddDisk)
+
+    if self.op.disk_template:
+      if __debug__:
+        check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid))
+        if self.op.remote_node_uuid:
+          check_nodes.add(self.op.remote_node_uuid)
+        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
+          owned = self.owned_locks(level)
+          assert not (check_nodes - owned), \
+            ("Not owning the correct locks, owning %r, expected at least %r" %
+             (owned, check_nodes))
+
+      r_shut = ShutdownInstanceDisks(self, self.instance)
+      if not r_shut:
+        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
+                                 " proceed with disk template conversion")
+      #TODO make heterogeneous conversions work
+      mode = (self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
+              self.op.disk_template)
+      try:
+        if mode in self._DISK_CONVERSIONS:
+          self._DISK_CONVERSIONS[mode](self, feedback_fn)
+        else:
+          self._ConvertInstanceDisks(feedback_fn)
+      except:
+        for disk in inst_disks:
+          self.cfg.ReleaseDRBDMinors(disk.uuid)
+        raise
+      result.append(("disk_template", self.op.disk_template))
+
+      disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
+      assert utils.AllDiskOfType(disk_info, [self.op.disk_template]), \
+        ("Expected disk template '%s', found '%s'" %
+         (self.op.disk_template,
+          self.cfg.GetInstanceDiskTemplate(self.instance.uuid)))
+
+    # Release node and resource locks if there are any (they might already have
+    # been released during disk conversion)
+    ReleaseLocks(self, locking.LEVEL_NODE)
+    ReleaseLocks(self, locking.LEVEL_NODE_RES)
+
+    # Apply NIC changes
+    if self._new_nics is not None:
+      self.instance.nics = self._new_nics
+      result.extend(self._nic_chgdesc)
+
+    # hvparams changes
+    if self.op.hvparams:
+      self.instance.hvparams = self.hv_inst
+      for key, val in self.op.hvparams.iteritems():
+        result.append(("hv/%s" % key, val))
+
+    # beparams changes
+    if self.op.beparams:
+      self.instance.beparams = self.be_inst
+      for key, val in self.op.beparams.iteritems():
+        result.append(("be/%s" % key, val))
+
+    # OS change
+    if self.op.os_name:
+      self.instance.os = self.op.os_name
+
+    # osparams changes
+    if self.op.osparams:
+      self.instance.osparams = self.os_inst
+      for key, val in self.op.osparams.iteritems():
+        result.append(("os/%s" % key, val))
+
+    if self.op.osparams_private:
+      self.instance.osparams_private = self.os_inst_private
+      for key, val in self.op.osparams_private.iteritems():
+        # Show the Private(...) blurb.
+        result.append(("os_private/%s" % key, repr(val)))
+
+    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
+
+    if self.op.offline is None:
+      # Ignore
+      pass
+    elif self.op.offline:
+      # Mark instance as offline
+      self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid)
+      result.append(("admin_state", constants.ADMINST_OFFLINE))
+    else:
+      # Mark instance as online, but stopped
+      self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)
+      result.append(("admin_state", constants.ADMINST_DOWN))
+
+    UpdateMetadata(feedback_fn, self.rpc, self.instance)
+
+    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
+                self.owned_locks(locking.LEVEL_NODE)), \
+      "All node locks should have been released by now"
+
+    return result
+
+  _DISK_CONVERSIONS = {
+    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
+    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
+    }
diff --git a/lib/cmdlib/instance_storage.py b/lib/cmdlib/instance_storage.py
index 6e247ae..d92a9e8 100644
--- a/lib/cmdlib/instance_storage.py
+++ b/lib/cmdlib/instance_storage.py
@@ -267,7 +267,11 @@
     all_node_uuids = [pnode_uuid]
 
   if disk_template is None:
-    disk_template = instance.disk_template
+    disk_template = utils.GetDiskTemplate(disks)
+    if disk_template == constants.DT_MIXED:
+      raise errors.OpExecError("Creating disk for '%s' instances "
+                               "only possible with explicit disk template."
+                               % (constants.DT_MIXED,))
 
   CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), disk_template)
 
@@ -361,12 +365,7 @@
       raise errors.OpPrereqError("Invalid disk size '%s'" % size,
                                  errors.ECODE_INVAL)
 
-    ext_provider = disk.get(constants.IDISK_PROVIDER, None)
-    if ext_provider and disk_template != constants.DT_EXT:
-      raise errors.OpPrereqError("The '%s' option is only valid for the %s"
-                                 " disk template, not %s" %
-                                 (constants.IDISK_PROVIDER, constants.DT_EXT,
-                                  disk_template), errors.ECODE_INVAL)
+    CheckDiskExtProvider(disk, disk_template)
 
     data_vg = disk.get(constants.IDISK_VG, default_vg)
     name = disk.get(constants.IDISK_NAME, None)
@@ -377,6 +376,7 @@
       constants.IDISK_MODE: mode,
       constants.IDISK_VG: data_vg,
       constants.IDISK_NAME: name,
+      constants.IDISK_TYPE: disk_template,
       }
 
     for key in [
@@ -395,14 +395,10 @@
     # For extstorage, demand the `provider' option and add any
     # additional parameters (ext-params) to the dict
     if disk_template == constants.DT_EXT:
-      if ext_provider:
-        new_disk[constants.IDISK_PROVIDER] = ext_provider
-        for key in disk:
-          if key not in constants.IDISK_PARAMS:
-            new_disk[key] = disk[key]
-      else:
-        raise errors.OpPrereqError("Missing provider for template '%s'" %
-                                   constants.DT_EXT, errors.ECODE_INVAL)
+      new_disk[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
+      for key in disk:
+        if key not in constants.IDISK_PARAMS:
+          new_disk[key] = disk[key]
 
     new_disks.append(new_disk)
 
@@ -480,11 +476,20 @@
   return new_disks
 
 
-def CalculateFileStorageDir(lu):
+def CalculateFileStorageDir(disk_type, cfg, instance_name,
+                            file_storage_dir=None):
   """Calculate final instance file storage dir.
 
-  @type lu: L{LogicalUnit}
-  @param lu: the logical unit on whose behalf we execute
+  @type disk_type: disk template
+  @param disk_type: L{constants.DT_FILE}, L{constants.DT_SHARED_FILE}, or
+                    L{constants.DT_GLUSTER}
+
+  @type cfg: ConfigWriter
+  @param cfg: the configuration that is to be used.
+  @type file_storage_dir: path
+  @param file_storage_dir: the path below the configured base.
+  @type instance_name: string
+  @param instance_name: name of the instance this disk is for.
 
   @rtype: string
   @return: The file storage directory for the instance
@@ -492,33 +497,32 @@
   """
   # file storage dir calculation/check
   instance_file_storage_dir = None
-  if lu.op.disk_template in constants.DTS_FILEBASED:
+  if disk_type in constants.DTS_FILEBASED:
     # build the full file storage dir path
     joinargs = []
 
     cfg_storage = None
-    if lu.op.disk_template == constants.DT_FILE:
-      cfg_storage = lu.cfg.GetFileStorageDir()
-    elif lu.op.disk_template == constants.DT_SHARED_FILE:
-      cfg_storage = lu.cfg.GetSharedFileStorageDir()
-    elif lu.op.disk_template == constants.DT_GLUSTER:
-      cfg_storage = lu.cfg.GetGlusterStorageDir()
+    if disk_type == constants.DT_FILE:
+      cfg_storage = cfg.GetFileStorageDir()
+    elif disk_type == constants.DT_SHARED_FILE:
+      cfg_storage = cfg.GetSharedFileStorageDir()
+    elif disk_type == constants.DT_GLUSTER:
+      cfg_storage = cfg.GetGlusterStorageDir()
 
     if not cfg_storage:
       raise errors.OpPrereqError(
         "Cluster file storage dir for {tpl} storage type not defined".format(
-          tpl=repr(lu.op.disk_template)
+          tpl=repr(disk_type)
         ),
-        errors.ECODE_STATE
-    )
+        errors.ECODE_STATE)
 
     joinargs.append(cfg_storage)
 
-    if lu.op.file_storage_dir is not None:
-      joinargs.append(lu.op.file_storage_dir)
+    if file_storage_dir is not None:
+      joinargs.append(file_storage_dir)
 
-    if lu.op.disk_template != constants.DT_GLUSTER:
-      joinargs.append(lu.op.instance_name)
+    if disk_type != constants.DT_GLUSTER:
+      joinargs.append(instance_name)
 
     if len(joinargs) > 1:
       # pylint: disable=W0142
@@ -538,7 +542,7 @@
 
 
 def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
-                         iv_name, p_minor, s_minor):
+                         iv_name, forthcoming=False):
   """Generate a drbd8 device complete with its children.
 
   """
@@ -548,27 +552,35 @@
 
   dev_data = objects.Disk(dev_type=constants.DT_PLAIN, size=size,
                           logical_id=(vgnames[0], names[0]),
-                          params={})
+                          nodes=[primary_uuid, secondary_uuid],
+                          params={}, forthcoming=forthcoming)
   dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   dev_meta = objects.Disk(dev_type=constants.DT_PLAIN,
                           size=constants.DRBD_META_SIZE,
                           logical_id=(vgnames[1], names[1]),
-                          params={})
+                          nodes=[primary_uuid, secondary_uuid],
+                          params={}, forthcoming=forthcoming)
   dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
+
+  drbd_uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
+  minors = lu.cfg.AllocateDRBDMinor([primary_uuid, secondary_uuid], drbd_uuid)
+  assert len(minors) == 2
   drbd_dev = objects.Disk(dev_type=constants.DT_DRBD8, size=size,
                           logical_id=(primary_uuid, secondary_uuid, port,
-                                      p_minor, s_minor,
+                                      minors[0], minors[1],
                                       shared_secret),
                           children=[dev_data, dev_meta],
-                          iv_name=iv_name, params={})
-  drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
+                          nodes=[primary_uuid, secondary_uuid],
+                          iv_name=iv_name, params={},
+                          forthcoming=forthcoming)
+  drbd_dev.uuid = drbd_uuid
   return drbd_dev
 
 
 def GenerateDiskTemplate(
   lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
   disk_info, file_storage_dir, file_driver, base_index,
-  feedback_fn, full_disk_params):
+  feedback_fn, full_disk_params, forthcoming=False):
   """Generate the entire disk layout for a given template type.
 
   """
@@ -584,8 +596,6 @@
     if len(secondary_node_uuids) != 1:
       raise errors.ProgrammerError("Wrong template configuration")
     remote_node_uuid = secondary_node_uuids[0]
-    minors = lu.cfg.AllocateDRBDMinor(
-      [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
 
     (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
                                                        full_disk_params)
@@ -605,9 +615,10 @@
                                       [data_vg, meta_vg],
                                       names[idx * 2:idx * 2 + 2],
                                       "disk/%d" % disk_index,
-                                      minors[idx * 2], minors[idx * 2 + 1])
+                                      forthcoming=forthcoming)
       disk_dev.mode = disk[constants.IDISK_MODE]
       disk_dev.name = disk.get(constants.IDISK_NAME, None)
+      disk_dev.dev_type = template_name
       disks.append(disk_dev)
   else:
     if secondary_node_uuids:
@@ -620,6 +631,7 @@
       names = _GenerateUniqueNames(lu, ["%s.disk%s" %
                                         (name_prefix, base_index + i)
                                         for i in range(disk_count)])
+    disk_nodes = []
 
     if template_name == constants.DT_PLAIN:
 
@@ -627,6 +639,8 @@
         vg = disk.get(constants.IDISK_VG, vgname)
         return (vg, names[idx])
 
+      disk_nodes = [primary_node_uuid]
+
     elif template_name == constants.DT_GLUSTER:
       logical_id_fn = lambda _1, disk_index, _2: \
         (file_driver, "ganeti/%s.%d" % (instance_uuid,
@@ -637,6 +651,9 @@
         lambda _, disk_index, disk: (file_driver,
                                      "%s/%s" % (file_storage_dir,
                                                 names[idx]))
+      if template_name == constants.DT_FILE:
+        disk_nodes = [primary_node_uuid]
+
     elif template_name == constants.DT_BLOCK:
       logical_id_fn = \
         lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
@@ -676,8 +693,9 @@
                               logical_id=logical_id_fn(idx, disk_index, disk),
                               iv_name="disk/%d" % disk_index,
                               mode=disk[constants.IDISK_MODE],
-                              params=params,
-                              spindles=disk.get(constants.IDISK_SPINDLES))
+                              params=params, nodes=disk_nodes,
+                              spindles=disk.get(constants.IDISK_SPINDLES),
+                              forthcoming=forthcoming)
       disk_dev.name = disk.get(constants.IDISK_NAME, None)
       disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
       disks.append(disk_dev)
@@ -685,6 +703,15 @@
   return disks
 
 
+def CommitDisks(disks):
+  """Recursively remove the forthcoming flag
+
+  """
+  for disk in disks:
+    disk.forthcoming = False
+    CommitDisks(disk.children)
+
+
 def CheckSpindlesExclusiveStorage(diskdict, es_flag, required):
   """Check the presence of the spindle options with exclusive_storage.
 
@@ -709,6 +736,29 @@
                                errors.ECODE_INVAL)
 
 
+def CheckDiskExtProvider(diskdict, disk_template):
+  """Check that the given disk should or should not have the provider param.
+
+  @type diskdict: dict
+  @param diskdict: disk parameters
+  @type disk_template: string
+  @param disk_template: the desired template of this disk
+  @raise errors.OpPrereqError: when the parameter is used in the wrong way
+
+  """
+  ext_provider = diskdict.get(constants.IDISK_PROVIDER, None)
+
+  if ext_provider and disk_template != constants.DT_EXT:
+    raise errors.OpPrereqError("The '%s' option is only valid for the %s"
+                               " disk template, not %s" %
+                               (constants.IDISK_PROVIDER, constants.DT_EXT,
+                                disk_template), errors.ECODE_INVAL)
+
+  if ext_provider is None and disk_template == constants.DT_EXT:
+    raise errors.OpPrereqError("Missing provider for template '%s'" %
+                               constants.DT_EXT, errors.ECODE_INVAL)
+
+
 class LUInstanceRecreateDisks(LogicalUnit):
   """Recreate an instance's missing disks.
 
@@ -733,6 +783,7 @@
     constants.IDISK_PROVIDER,
     constants.IDISK_NAME,
     constants.IDISK_ACCESS,
+    constants.IDISK_TYPE,
     ]))
 
   def _RunAllocator(self):
@@ -758,12 +809,13 @@
     # they should be already be marked as drained or offline, and hence
     # skipped by the allocator. If instance disks have been lost for other
     # reasons, then recreating the disks on the same nodes should be fine.
-    disk_template = self.instance.disk_template
     spindle_use = be_full[constants.BE_SPINDLE_USE]
+    disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
     disks = [{
       constants.IDISK_SIZE: d.size,
       constants.IDISK_MODE: d.mode,
       constants.IDISK_SPINDLES: d.spindles,
+      constants.IDISK_TYPE: d.dev_type
       } for d in self.cfg.GetInstanceDisks(self.instance.uuid)]
     req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
                                         disk_template=disk_template,
@@ -831,7 +883,6 @@
       if self.op.iallocator:
         # iallocator will select a new node in the same group
         self.needed_locks[locking.LEVEL_NODEGROUP] = []
-        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
 
     self.needed_locks[locking.LEVEL_NODE_RES] = []
 
@@ -867,7 +918,6 @@
           self.needed_locks[locking.LEVEL_NODE].extend(
             self.cfg.GetNodeGroup(group_uuid).members)
 
-        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
       elif not self.op.nodes:
         self._LockInstancesNodes(primary_only=False)
     elif level == locking.LEVEL_NODE_RES:
@@ -908,17 +958,18 @@
                                    (instance.name, len(inst_nodes),
                                     len(self.op.node_uuids)),
                                    errors.ECODE_INVAL)
-      assert instance.disk_template != constants.DT_DRBD8 or \
-             len(self.op.node_uuids) == 2
-      assert instance.disk_template != constants.DT_PLAIN or \
-             len(self.op.node_uuids) == 1
+      disks = self.cfg.GetInstanceDisks(instance.uuid)
+      assert (not utils.AnyDiskOfType(disks, [constants.DT_DRBD8]) or
+              len(self.op.node_uuids) == 2)
+      assert (not utils.AnyDiskOfType(disks, [constants.DT_PLAIN]) or
+              len(self.op.node_uuids) == 1)
       primary_node = self.op.node_uuids[0]
     else:
       primary_node = instance.primary_node
     if not self.op.iallocator:
       CheckNodeOnline(self, primary_node)
 
-    if instance.disk_template == constants.DT_DISKLESS:
+    if not instance.disks:
       raise errors.OpPrereqError("Instance '%s' has no disks" %
                                  self.op.instance_name, errors.ECODE_INVAL)
 
@@ -960,7 +1011,6 @@
       # Release unneeded node and node resource locks
       ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
       ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
-      ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
 
     if self.op.node_uuids:
       node_uuids = self.op.node_uuids
@@ -999,7 +1049,7 @@
                                          # have changed
         (_, _, old_port, _, _, old_secret) = disk.logical_id
         new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
-                                                self.instance.uuid)
+                                                disk.uuid)
         new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
                   new_minors[0], new_minors[1], old_secret)
         assert len(disk.logical_id) == len(new_id)
@@ -1023,12 +1073,12 @@
 
     # change primary node, if needed
     if self.op.node_uuids:
-      self.instance.primary_node = self.op.node_uuids[0]
       self.LogWarning("Changing the instance's nodes, you will have to"
                       " remove any disks left on the older nodes manually")
-
-    if self.op.node_uuids:
+      self.instance.primary_node = self.op.node_uuids[0]
       self.cfg.Update(self.instance, feedback_fn)
+      for disk in inst_disks:
+        self.cfg.SetDiskNodes(disk.uuid, self.op.node_uuids)
 
     # All touched nodes must be locked
     mylocks = self.owned_locks(locking.LEVEL_NODE)
@@ -1086,7 +1136,8 @@
   lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
       space_info, constants.ST_LVM_VG)
   if not lvm_vg_info:
-    raise errors.OpPrereqError("Can't retrieve storage information for LVM")
+    raise errors.OpPrereqError("Can't retrieve storage information for LVM",
+                               errors.ECODE_ENVIRON)
   vg_free = lvm_vg_info.get("storage_free", None)
   if not isinstance(vg_free, int):
     raise errors.OpPrereqError("Can't compute free disk space on node"
@@ -1390,7 +1441,8 @@
   if not inst_disks or disks is not None and not disks:
     return True
 
-  disks = ExpandCheckDisks(inst_disks, disks)
+  disks = [d for d in ExpandCheckDisks(inst_disks, disks)
+           if d.dev_type in constants.DTS_INT_MIRROR]
 
   if not oneshot:
     lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
@@ -1528,11 +1580,13 @@
       when the size is wrong
   @return: False if the operation failed, otherwise a list of
       (host, instance_visible_name, node_visible_name)
-      with the mapping from node devices to instance devices
+      with the mapping from node devices to instance devices, as well as the
+      payloads of the RPC calls
 
   """
   device_info = []
   disks_ok = True
+  payloads = []
 
   if disks is None:
     # only mark instance disks as active if all disks are affected
@@ -1585,6 +1639,7 @@
         node_disk.UnsetSize()
       result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
                                              instance, True, idx)
+      payloads.append(result.payload)
       msg = result.fail_msg
       if msg:
         lu.LogWarning("Could not prepare block device %s on node %s"
@@ -1600,7 +1655,7 @@
   if not disks_ok:
     lu.cfg.MarkInstanceDisksInactive(instance.uuid)
 
-  return disks_ok, device_info
+  return disks_ok, device_info, payloads
 
 
 def StartInstanceDisks(lu, instance, force):
@@ -1610,8 +1665,8 @@
   instance configuration, if needed.
 
   """
-  disks_ok, _ = AssembleInstanceDisks(lu, instance,
-                                      ignore_secondaries=force)
+  disks_ok, _, _ = AssembleInstanceDisks(lu, instance,
+                                         ignore_secondaries=force)
   if not disks_ok:
     ShutdownInstanceDisks(lu, instance)
     if force is not None and not force:
@@ -1682,12 +1737,13 @@
       CheckNodeOnline(self, node_uuid)
     self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
 
-    if self.instance.disk_template not in constants.DTS_GROWABLE:
-      raise errors.OpPrereqError("Instance's disk layout does not support"
-                                 " growing", errors.ECODE_INVAL)
-
     self.disk = self.cfg.GetDiskInfo(self.instance.FindDisk(self.op.disk))
 
+    if self.disk.dev_type not in constants.DTS_GROWABLE:
+      raise errors.OpPrereqError(
+          "Instance's disk layout %s does not support"
+          " growing" % self.disk.dev_type, errors.ECODE_INVAL)
+
     if self.op.absolute:
       self.target = self.op.amount
       self.delta = self.target - self.disk.size
@@ -1710,8 +1766,8 @@
     self._CheckIPolicy(self.target)
 
   def _CheckDiskSpace(self, node_uuids, req_vgspace):
-    template = self.instance.disk_template
-    if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
+    template = self.disk.dev_type
+    if (template not in constants.DTS_NO_FREE_SPACE_CHECK and
         not any(self.node_es_flags.values())):
       # TODO: check the free disk space for file, when that feature will be
       # supported
@@ -1728,12 +1784,12 @@
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                             group_info)
 
+    disks = self.cfg.GetInstanceDisks(self.op.instance_uuid)
     disk_sizes = [disk.size if disk.uuid != self.disk.uuid else target_size
-                  for disk in self.cfg.GetInstanceDisks(self.op.instance_uuid)]
+                  for disk in disks]
 
     # The ipolicy checker below ignores None, so we only give it the disk size
-    res = ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes,
-                                           self.instance.disk_template)
+    res = ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes, disks)
     if res:
       msg = ("Growing disk %s violates policy: %s" %
              (self.op.disk,
@@ -1753,7 +1809,8 @@
 
     wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
 
-    disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
+    disks_ok, _, _ = AssembleInstanceDisks(self, self.instance,
+                                           disks=[self.disk])
     if not disks_ok:
       raise errors.OpExecError("Cannot activate block device to grow")
 
@@ -1876,7 +1933,7 @@
                                  " secondary node", errors.ECODE_INVAL)
 
   def ExpandNames(self):
-    self._ExpandAndLockInstance()
+    self._ExpandAndLockInstance(allow_forthcoming=True)
 
     assert locking.LEVEL_NODE not in self.needed_locks
     assert locking.LEVEL_NODE_RES not in self.needed_locks
@@ -1903,7 +1960,6 @@
       if self.op.iallocator is not None:
         # iallocator will select a new node in the same group
         self.needed_locks[locking.LEVEL_NODEGROUP] = []
-        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
 
     self.needed_locks[locking.LEVEL_NODE_RES] = []
 
@@ -1935,7 +1991,6 @@
       if self.op.iallocator is not None:
         assert self.op.remote_node_uuid is None
         assert not self.needed_locks[locking.LEVEL_NODE]
-        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
 
         # Lock member nodes of all locked groups
         self.needed_locks[locking.LEVEL_NODE] = \
@@ -2021,9 +2076,9 @@
     """Activate the disks.
 
     """
-    disks_ok, disks_info = \
-              AssembleInstanceDisks(self, self.instance,
-                                    ignore_size=self.op.ignore_size)
+    disks_ok, disks_info, _ = AssembleInstanceDisks(
+      self, self.instance, ignore_size=self.op.ignore_size)
+
     if not disks_ok:
       raise errors.OpExecError("Cannot activate block devices")
 
@@ -2244,10 +2299,6 @@
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.instance_name
 
-    if self.instance.disk_template != constants.DT_DRBD8:
-      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
-                                 " instances", errors.ECODE_INVAL)
-
     secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
     if len(secondary_nodes) != 1:
       raise errors.OpPrereqError("The instance has a strange layout,"
@@ -2353,6 +2404,12 @@
       if not self.disks:
         self.disks = range(len(self.instance.disks))
 
+    disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+    if (not disks or
+        not utils.AllDiskOfType(disks, [constants.DT_DRBD8])):
+      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
+                                 " instances", errors.ECODE_INVAL)
+
     # TODO: This is ugly, but right now we can't distinguish between internal
     # submitted opcode and external one. We should fix that.
     if self.remote_node_info:
@@ -2376,7 +2433,6 @@
     # Release unneeded node and node resource locks
     ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
     ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
-    ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
 
     # Release any owned node group
     ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
@@ -2424,7 +2480,9 @@
     activate_disks = not self.instance.disks_active
 
     # Activate the instance disks if we're replacing them on a down instance
-    if activate_disks:
+    # that is real (forthcoming instances currently only have forthcoming
+    # disks).
+    if activate_disks and not self.instance.forthcoming:
       StartInstanceDisks(self.lu, self.instance, True)
       # Re-read the instance object modified by the previous call
       self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
@@ -2440,11 +2498,11 @@
     finally:
       # Deactivate the instance disks if we're replacing them on a
       # down instance
-      if activate_disks:
+      if activate_disks and not self.instance.forthcoming:
         _SafeShutdownInstanceDisks(self.lu, self.instance,
                                    req_states=INSTANCE_NOT_RUNNING)
 
-    assert not self.lu.owned_locks(locking.LEVEL_NODE)
+    self.lu.AssertReleasedLocks(locking.LEVEL_NODE)
 
     if __debug__:
       # Verify owned locks
@@ -2612,6 +2670,10 @@
     """
     steps_total = 6
 
+    if self.instance.forthcoming:
+      feedback_fn("Instance forthcoming, not touching disks")
+      return
+
     # Step: check device activation
     self.lu.LogStep(1, steps_total, "Check device existence")
     self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
@@ -2734,6 +2796,24 @@
       self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
       self._RemoveOldStorage(self.target_node_uuid, iv_names)
 
+  def _UpdateDisksSecondary(self, iv_names, feedback_fn):
+    """Update the configuration of disks to have a new secondary.
+
+    @param iv_names: iterable of triples for all volumes of the instance.
+        The first component has to be the device and the third the logical
+        id.
+    @param feedback_fn: function to used send feedback back to the caller of
+        the OpCode
+    """
+    self.lu.LogInfo("Updating instance configuration")
+    for dev, _, new_logical_id in iv_names.itervalues():
+      dev.logical_id = new_logical_id
+      self.cfg.Update(dev, feedback_fn)
+      self.cfg.SetDiskNodes(dev.uuid, [self.instance.primary_node,
+                                       self.new_node_uuid])
+
+    self.cfg.Update(self.instance, feedback_fn)
+
   def _ExecDrbd8Secondary(self, feedback_fn):
     """Replace the secondary node for DRBD 8.
 
@@ -2753,6 +2833,30 @@
     Failures are not very well handled.
 
     """
+    if self.instance.forthcoming:
+      feedback_fn("Instance fortcoming, will only update the configuration")
+      inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
+      minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
+                                           for _ in inst_disks],
+                                          self.instance.uuid)
+      logging.debug("Allocated minors %r", minors)
+      iv_names = {}
+      for idx, (dev, new_minor) in enumerate(zip(inst_disks, minors)):
+        (o_node1, _, o_port, o_minor1, o_minor2, o_secret) = \
+            dev.logical_id
+        if self.instance.primary_node == o_node1:
+          p_minor = o_minor1
+        else:
+          p_minor = o_minor2
+        new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
+                      p_minor, new_minor, o_secret)
+        iv_names[idx] = (dev, dev.children, new_net_id)
+        logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
+                      new_net_id)
+      self._UpdateDisksSecondary(iv_names, feedback_fn)
+      ReleaseLocks(self.lu, locking.LEVEL_NODE)
+      return
+
     steps_total = 6
 
     pnode = self.instance.primary_node
@@ -2788,9 +2892,10 @@
     # after this, we must manually remove the drbd minors on both the
     # error and the success paths
     self.lu.LogStep(4, steps_total, "Changing drbd configuration")
-    minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
-                                         for _ in inst_disks],
-                                        self.instance.uuid)
+    minors = []
+    for disk in inst_disks:
+      minor = self.cfg.AllocateDRBDMinor([self.new_node_uuid], disk.uuid)
+      minors.append(minor[0])
     logging.debug("Allocated minors %r", minors)
 
     iv_names = {}
@@ -2829,10 +2934,12 @@
                              GetInstanceInfoText(self.instance), False,
                              excl_stor)
       except errors.GenericError:
-        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
+        for disk in inst_disks:
+          self.cfg.ReleaseDRBDMinors(disk.uuid)
         raise
 
     # We have new devices, shutdown the drbd on the old secondary
+
     for idx, dev in enumerate(inst_disks):
       self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
       msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
@@ -2850,18 +2957,14 @@
     msg = result.fail_msg
     if msg:
       # detaches didn't succeed (unlikely)
-      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
+      for disk in inst_disks:
+        self.cfg.ReleaseDRBDMinors(disk.uuid)
       raise errors.OpExecError("Can't detach the disks from the network on"
                                " old node: %s" % (msg,))
 
     # if we managed to detach at least one, we update all the disks of
     # the instance to point to the new secondary
-    self.lu.LogInfo("Updating instance configuration")
-    for dev, _, new_logical_id in iv_names.itervalues():
-      dev.logical_id = new_logical_id
-      self.cfg.Update(dev, feedback_fn)
-
-    self.cfg.Update(self.instance, feedback_fn)
+    self._UpdateDisksSecondary(iv_names, feedback_fn)
 
     # Release all node locks (the configuration has been updated)
     ReleaseLocks(self.lu, locking.LEVEL_NODE)
@@ -2873,7 +2976,6 @@
     result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
                                             self.new_node_uuid],
                                            (inst_disks, self.instance),
-                                           self.instance.name,
                                            False)
     for to_node, to_result in result.items():
       msg = to_result.fail_msg
diff --git a/lib/cmdlib/instance_utils.py b/lib/cmdlib/instance_utils.py
index 175b53e..b703c1e 100644
--- a/lib/cmdlib/instance_utils.py
+++ b/lib/cmdlib/instance_utils.py
@@ -35,13 +35,26 @@
 
 from ganeti import constants
 from ganeti import errors
+from ganeti import ht
 from ganeti import locking
+from ganeti.masterd import iallocator
 from ganeti import network
+from ganeti import netutils
 from ganeti import objects
 from ganeti import pathutils
 from ganeti import utils
 from ganeti.cmdlib.common import AnnotateDiskParams, \
-  ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled
+  ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled, \
+  CheckDiskTemplateEnabled, ComputeIPolicySpecViolation
+
+
+#: Type description for changes as returned by L{ApplyContainerMods}'s
+#: callbacks
+_TApplyContModsCbChanges = \
+  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
+    ht.TNonEmptyString,
+    ht.TAny,
+    ])))
 
 
 def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type,
@@ -180,6 +193,8 @@
   if disks is None:
     disks = lu.cfg.GetInstanceDisks(instance.uuid)
 
+  disk_template = utils.GetDiskTemplate(disks)
+
   args = {
     "name": instance.name,
     "primary_node_name": lu.cfg.GetNodeName(instance.primary_node),
@@ -190,7 +205,7 @@
     "minmem": bep[constants.BE_MINMEM],
     "vcpus": bep[constants.BE_VCPUS],
     "nics": NICListToTuple(lu, instance.nics),
-    "disk_template": instance.disk_template,
+    "disk_template": disk_template,
     "disks": disks,
     "bep": bep,
     "hvp": hvp,
@@ -256,7 +271,35 @@
   lu.cfg.RemoveInstance(instance.uuid)
 
 
-def RemoveDisks(lu, instance, disk_template=None, disks=None,
+def _StoragePathsRemoved(removed, disks):
+  """Returns an iterable of all storage paths to be removed.
+
+  A storage path is removed if no disks are contained in it anymore.
+
+  @type removed: list of L{objects.Disk}
+  @param removed: The disks that are being removed
+  @type disks: list of L{objects.Disk}
+  @param disks: All disks attached to the instance
+
+  @rtype: list of file paths
+  @returns: the storage directories that need to be removed
+
+  """
+  remaining_storage_dirs = set()
+  for disk in disks:
+    if (disk not in removed and
+        disk.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE)):
+      remaining_storage_dirs.add(os.path.dirname(disk.logical_id[1]))
+
+  deleted_storage_dirs = set()
+  for disk in removed:
+    if disk.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):
+      deleted_storage_dirs.add(os.path.dirname(disk.logical_id[1]))
+
+  return deleted_storage_dirs - remaining_storage_dirs
+
+
+def RemoveDisks(lu, instance, disks=None,
                 target_node_uuid=None, ignore_failures=False):
   """Remove all or a subset of disks for an instance.
 
@@ -273,8 +316,6 @@
   @param lu: the logical unit on whose behalf we execute
   @type instance: L{objects.Instance}
   @param instance: the instance whose disks we should remove
-  @type disk_template: string
-  @param disk_template: if passed, overrides the instance's disk_template
   @type disks: list of L{objects.Disk}
   @param disks: the disks to remove; if not specified, all the disks of the
           instance are removed
@@ -290,12 +331,9 @@
   all_result = True
   ports_to_release = set()
 
-  disk_count = len(instance.disks)
+  all_disks = lu.cfg.GetInstanceDisks(instance.uuid)
   if disks is None:
-    disks = lu.cfg.GetInstanceDisks(instance.uuid)
-
-  if disk_template is None:
-    disk_template = instance.disk_template
+    disks = all_disks
 
   anno_disks = AnnotateDiskParams(instance, disks, lu.cfg)
   for (idx, device) in enumerate(anno_disks):
@@ -320,23 +358,17 @@
     for port in ports_to_release:
       lu.cfg.AddTcpUdpPort(port)
 
-  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), disk_template)
+  for d in disks:
+    CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), d.dev_type)
 
-  if (len(disks) == disk_count and
-      disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]):
-    if len(disks) > 0:
-      file_storage_dir = os.path.dirname(disks[0].logical_id[1])
-    else:
-      if disk_template == constants.DT_SHARED_FILE:
-        file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(),
-                                          instance.name)
-      else:
-        file_storage_dir = utils.PathJoin(lu.cfg.GetFileStorageDir(),
-                                          instance.name)
-    if target_node_uuid:
-      tgt = target_node_uuid
-    else:
-      tgt = instance.primary_node
+  if target_node_uuid:
+    tgt = target_node_uuid
+  else:
+    tgt = instance.primary_node
+
+  obsolete_storage_paths = _StoragePathsRemoved(disks, all_disks)
+
+  for file_storage_dir in obsolete_storage_paths:
     result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
     if result.fail_msg:
       lu.LogWarning("Could not remove directory '%s' on node %s: %s",
@@ -773,3 +805,499 @@
      [inst.name for inst in lu.cfg.GetAllInstancesInfo().values()]:
     raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
                                instance_name, errors.ECODE_EXISTS)
+
+
+def CheckForConflictingIp(lu, ip, node_uuid):
+  """In case of conflicting IP address raise error.
+
+  @type ip: string
+  @param ip: IP address
+  @type node_uuid: string
+  @param node_uuid: node UUID
+
+  """
+  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
+  if conf_net is not None:
+    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
+                                " network %s, but the target NIC does not." %
+                                (ip, conf_net)),
+                               errors.ECODE_STATE)
+
+  return (None, None)
+
+
+def ComputeIPolicyInstanceSpecViolation(
+  ipolicy, instance_spec, disk_types,
+  _compute_fn=ComputeIPolicySpecViolation):
+  """Compute if instance specs meets the specs of ipolicy.
+
+  @type ipolicy: dict
+  @param ipolicy: The ipolicy to verify against
+  @param instance_spec: dict
+  @param instance_spec: The instance spec to verify
+  @type disk_types: list of strings
+  @param disk_types: the disk templates of the instance
+  @param _compute_fn: The function to verify ipolicy (unittest only)
+  @see: L{ComputeIPolicySpecViolation}
+
+  """
+  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
+  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
+  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
+  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
+  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
+  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
+
+  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
+                     disk_sizes, spindle_use, disk_types)
+
+
+def ComputeInstanceCommunicationNIC(instance_name):
+  """Compute the name of the instance NIC used by instance
+  communication.
+
+  With instance communication, a new NIC is added to the instance.
+  This NIC has a special name that identities it as being part of
+  instance communication, and not just a normal NIC.  This function
+  generates the name of the NIC based on a prefix and the instance
+  name
+
+  @type instance_name: string
+  @param instance_name: name of the instance the NIC belongs to
+
+  @rtype: string
+  @return: name of the NIC
+
+  """
+  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
+
+
+def PrepareContainerMods(mods, private_fn):
+  """Prepares a list of container modifications by adding a private data field.
+
+  @type mods: list of tuples; (operation, index, parameters)
+  @param mods: List of modifications
+  @type private_fn: callable or None
+  @param private_fn: Callable for constructing a private data field for a
+    modification
+  @rtype: list
+
+  """
+  if private_fn is None:
+    fn = lambda: None
+  else:
+    fn = private_fn
+
+  return [(op, idx, params, fn()) for (op, idx, params) in mods]
+
+
+def ApplyContainerMods(kind, container, chgdesc, mods,
+                       create_fn, attach_fn, modify_fn, remove_fn,
+                       detach_fn, post_add_fn=None):
+  """Applies descriptions in C{mods} to C{container}.
+
+  @type kind: string
+  @param kind: One-word item description
+  @type container: list
+  @param container: Container to modify
+  @type chgdesc: None or list
+  @param chgdesc: List of applied changes
+  @type mods: list
+  @param mods: Modifications as returned by L{PrepareContainerMods}
+  @type create_fn: callable
+  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
+    receives absolute item index, parameters and private data object as added
+    by L{PrepareContainerMods}, returns tuple containing new item and changes
+    as list
+  @type attach_fn: callable
+  @param attach_fn: Callback for attaching an existing item to a container
+    (L{constants.DDM_ATTACH}); receives absolute item index and item UUID or
+    name, returns tuple containing new item and changes as list
+  @type modify_fn: callable
+  @param modify_fn: Callback for modifying an existing item
+    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
+    and private data object as added by L{PrepareContainerMods}, returns
+    changes as list
+  @type remove_fn: callable
+  @param remove_fn: Callback on removing item; receives absolute item index,
+    item and private data object as added by L{PrepareContainerMods}
+  @type detach_fn: callable
+  @param detach_fn: Callback on detaching item; receives absolute item index,
+    item and private data object as added by L{PrepareContainerMods}
+  @type post_add_fn: callable
+  @param post_add_fn: Callable for post-processing a newly created item after
+    it has been put into the container. It receives the index of the new item
+    and the new item as parameters.
+
+  """
+  for (op, identifier, params, private) in mods:
+    changes = None
+
+    if op == constants.DDM_ADD:
+      addidx = GetIndexFromIdentifier(identifier, kind, container)
+      if create_fn is None:
+        item = params
+      else:
+        (item, changes) = create_fn(addidx, params, private)
+
+      InsertItemToIndex(identifier, item, container)
+
+      if post_add_fn is not None:
+        post_add_fn(addidx, item)
+
+    elif op == constants.DDM_ATTACH:
+      addidx = GetIndexFromIdentifier(identifier, kind, container)
+      if attach_fn is None:
+        item = params
+      else:
+        (item, changes) = attach_fn(addidx, params, private)
+
+      InsertItemToIndex(identifier, item, container)
+
+      if post_add_fn is not None:
+        post_add_fn(addidx, item)
+
+    else:
+      # Retrieve existing item
+      (absidx, item) = GetItemFromContainer(identifier, kind, container)
+
+      if op == constants.DDM_REMOVE:
+        assert not params
+
+        changes = [("%s/%s" % (kind, absidx), "remove")]
+
+        if remove_fn is not None:
+          msg = remove_fn(absidx, item, private)
+          if msg:
+            changes.append(("%s/%s" % (kind, absidx), msg))
+
+        assert container[absidx] == item
+        del container[absidx]
+      elif op == constants.DDM_DETACH:
+        assert not params
+
+        changes = [("%s/%s" % (kind, absidx), "detach")]
+
+        if detach_fn is not None:
+          msg = detach_fn(absidx, item, private)
+          if msg:
+            changes.append(("%s/%s" % (kind, absidx), msg))
+
+        assert container[absidx] == item
+        del container[absidx]
+      elif op == constants.DDM_MODIFY:
+        if modify_fn is not None:
+          changes = modify_fn(absidx, item, params, private)
+      else:
+        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
+
+    assert _TApplyContModsCbChanges(changes)
+
+    if not (chgdesc is None or changes is None):
+      chgdesc.extend(changes)
+
+
+def GetItemFromContainer(identifier, kind, container):
+  """Return the item refered by the identifier.
+
+  @type identifier: string
+  @param identifier: Item index or name or UUID
+  @type kind: string
+  @param kind: One-word item description
+  @type container: list
+  @param container: Container to get the item from
+
+  """
+  # Index
+  try:
+    idx = int(identifier)
+    if idx == -1:
+      # Append
+      absidx = len(container) - 1
+    elif idx < 0:
+      raise IndexError("Not accepting negative indices other than -1")
+    elif idx > len(container):
+      raise IndexError("Got %s index %s, but there are only %s" %
+                       (kind, idx, len(container)))
+    else:
+      absidx = idx
+    return (absidx, container[idx])
+  except ValueError:
+    pass
+
+  for idx, item in enumerate(container):
+    if item.uuid == identifier or item.name == identifier:
+      return (idx, item)
+
+  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
+                             (kind, identifier), errors.ECODE_NOENT)
+
+
+def GetIndexFromIdentifier(identifier, kind, container):
+  """Check if the identifier represents a valid container index and return it.
+
+  Used in "add" and "attach" actions.
+
+  @type identifier: string
+  @param identifier: Item index or name or UUID
+  @type kind: string
+  @param kind: Type of item, e.g. "disk", "nic"
+  @type container: list
+  @param container: Container to calculate the index from
+
+  """
+  try:
+    idx = int(identifier)
+  except ValueError:
+    raise errors.OpPrereqError("Only positive integer or -1 is accepted",
+                               errors.ECODE_INVAL)
+  if idx == -1:
+    return len(container)
+  else:
+    if idx < 0:
+      raise IndexError("Not accepting negative indices other than -1")
+    elif idx > len(container):
+      raise IndexError("Got %s index %s, but there are only %s" %
+                        (kind, idx, len(container)))
+  return idx
+
+
+def InsertItemToIndex(identifier, item, container):
+  """Insert an item to the provided index of a container.
+
+  Used in "add" and "attach" actions.
+
+  @type identifier: string
+  @param identifier: Item index
+  @type item: object
+  @param item: The item to be inserted
+  @type container: list
+  @param container: Container to insert the item to
+
+  """
+  try:
+    idx = int(identifier)
+  except ValueError:
+    raise errors.OpPrereqError("Only positive integer or -1 is accepted",
+                               errors.ECODE_INVAL)
+  if idx == -1:
+    container.append(item)
+  else:
+    assert idx >= 0
+    assert idx <= len(container)
+    # list.insert does so before the specified index
+    container.insert(idx, item)
+
+
+def CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
+  """Checks if nodes have enough physical CPUs
+
+  This function checks if all given nodes have the needed number of
+  physical CPUs. In case any node has less CPUs or we cannot get the
+  information from the node, this function raises an OpPrereqError
+  exception.
+
+  @type lu: C{LogicalUnit}
+  @param lu: a logical unit from which we get configuration data
+  @type node_uuids: C{list}
+  @param node_uuids: the list of node UUIDs to check
+  @type requested: C{int}
+  @param requested: the minimum acceptable number of physical CPUs
+  @type hypervisor_specs: list of pairs (string, dict of strings)
+  @param hypervisor_specs: list of hypervisor specifications in
+      pairs (hypervisor_name, hvparams)
+  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
+      or we cannot check the node
+
+  """
+  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
+  for node_uuid in node_uuids:
+    info = nodeinfo[node_uuid]
+    node_name = lu.cfg.GetNodeName(node_uuid)
+    info.Raise("Cannot get current information from node %s" % node_name,
+               prereq=True, ecode=errors.ECODE_ENVIRON)
+    (_, _, (hv_info, )) = info.payload
+    num_cpus = hv_info.get("cpu_total", None)
+    if not isinstance(num_cpus, int):
+      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
+                                 " on node %s, result was '%s'" %
+                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
+    if requested > num_cpus:
+      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
+                                 "required" % (node_name, num_cpus, requested),
+                                 errors.ECODE_NORES)
+
+
+def CheckHostnameSane(lu, name):
+  """Ensures that a given hostname resolves to a 'sane' name.
+
+  The given name is required to be a prefix of the resolved hostname,
+  to prevent accidental mismatches.
+
+  @param lu: the logical unit on behalf of which we're checking
+  @param name: the name we should resolve and check
+  @return: the resolved hostname object
+
+  """
+  hostname = netutils.GetHostname(name=name)
+  if hostname.name != name:
+    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
+  if not utils.MatchNameComponent(name, [hostname.name]):
+    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
+                                " same as given hostname '%s'") %
+                               (hostname.name, name), errors.ECODE_INVAL)
+  return hostname
+
+
+def CheckOpportunisticLocking(op):
+  """Generate error if opportunistic locking is not possible.
+
+  """
+  if op.opportunistic_locking and not op.iallocator:
+    raise errors.OpPrereqError("Opportunistic locking is only available in"
+                               " combination with an instance allocator",
+                               errors.ECODE_INVAL)
+
+
+def CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
+  """Wrapper around IAReqInstanceAlloc.
+
+  @param op: The instance opcode
+  @param disks: The computed disks
+  @param nics: The computed nics
+  @param beparams: The full filled beparams
+  @param node_name_whitelist: List of nodes which should appear as online to the
+    allocator (unless the node is already marked offline)
+
+  @returns: A filled L{iallocator.IAReqInstanceAlloc}
+
+  """
+  spindle_use = beparams[constants.BE_SPINDLE_USE]
+  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
+                                       disk_template=op.disk_template,
+                                       group_name=op.group_name,
+                                       tags=op.tags,
+                                       os=op.os_type,
+                                       vcpus=beparams[constants.BE_VCPUS],
+                                       memory=beparams[constants.BE_MAXMEM],
+                                       spindle_use=spindle_use,
+                                       disks=disks,
+                                       nics=[n.ToDict() for n in nics],
+                                       hypervisor=op.hypervisor,
+                                       node_whitelist=node_name_whitelist)
+
+
+def ComputeFullBeParams(op, cluster):
+  """Computes the full beparams.
+
+  @param op: The instance opcode
+  @param cluster: The cluster config object
+
+  @return: The fully filled beparams
+
+  """
+  default_beparams = cluster.beparams[constants.PP_DEFAULT]
+  for param, value in op.beparams.iteritems():
+    if value == constants.VALUE_AUTO:
+      op.beparams[param] = default_beparams[param]
+  objects.UpgradeBeParams(op.beparams)
+  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
+  return cluster.SimpleFillBE(op.beparams)
+
+
+def ComputeNics(op, cluster, default_ip, cfg, ec_id):
+  """Computes the nics.
+
+  @param op: The instance opcode
+  @param cluster: Cluster configuration object
+  @param default_ip: The default ip to assign
+  @param cfg: An instance of the configuration object
+  @param ec_id: Execution context ID
+
+  @returns: The build up nics
+
+  """
+  nics = []
+  for nic in op.nics:
+    nic_mode_req = nic.get(constants.INIC_MODE, None)
+    nic_mode = nic_mode_req
+    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
+      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
+
+    net = nic.get(constants.INIC_NETWORK, None)
+    link = nic.get(constants.NIC_LINK, None)
+    ip = nic.get(constants.INIC_IP, None)
+    vlan = nic.get(constants.INIC_VLAN, None)
+
+    if net is None or net.lower() == constants.VALUE_NONE:
+      net = None
+    else:
+      if nic_mode_req is not None or link is not None:
+        raise errors.OpPrereqError("If network is given, no mode or link"
+                                   " is allowed to be passed",
+                                   errors.ECODE_INVAL)
+
+    # ip validity checks
+    if ip is None or ip.lower() == constants.VALUE_NONE:
+      nic_ip = None
+    elif ip.lower() == constants.VALUE_AUTO:
+      if not op.name_check:
+        raise errors.OpPrereqError("IP address set to auto but name checks"
+                                   " have been skipped",
+                                   errors.ECODE_INVAL)
+      nic_ip = default_ip
+    else:
+      # We defer pool operations until later, so that the iallocator has
+      # filled in the instance's node(s) dimara
+      if ip.lower() == constants.NIC_IP_POOL:
+        if net is None:
+          raise errors.OpPrereqError("if ip=pool, parameter network"
+                                     " must be passed too",
+                                     errors.ECODE_INVAL)
+
+      elif not netutils.IPAddress.IsValid(ip):
+        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
+                                   errors.ECODE_INVAL)
+
+      nic_ip = ip
+
+    # TODO: check the ip address for uniqueness
+    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip and not net:
+      raise errors.OpPrereqError("Routed nic mode requires an ip address"
+                                 " if not attached to a network",
+                                 errors.ECODE_INVAL)
+
+    # MAC address verification
+    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
+    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+      mac = utils.NormalizeAndValidateMac(mac)
+
+      try:
+        # TODO: We need to factor this out
+        cfg.ReserveMAC(mac, ec_id)
+      except errors.ReservationError:
+        raise errors.OpPrereqError("MAC address %s already in use"
+                                   " in cluster" % mac,
+                                   errors.ECODE_NOTUNIQUE)
+
+    #  Build nic parameters
+    nicparams = {}
+    if nic_mode_req:
+      nicparams[constants.NIC_MODE] = nic_mode
+    if link:
+      nicparams[constants.NIC_LINK] = link
+    if vlan:
+      nicparams[constants.NIC_VLAN] = vlan
+
+    check_params = cluster.SimpleFillNIC(nicparams)
+    objects.NIC.CheckParameterSyntax(check_params)
+    net_uuid = cfg.LookupNetwork(net)
+    name = nic.get(constants.INIC_NAME, None)
+    if name is not None and name.lower() == constants.VALUE_NONE:
+      name = None
+    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
+                          network=net_uuid, nicparams=nicparams)
+    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
+    nics.append(nic_obj)
+
+  return nics
diff --git a/lib/cmdlib/misc.py b/lib/cmdlib/misc.py
index 0c66896..62bff52 100644
--- a/lib/cmdlib/misc.py
+++ b/lib/cmdlib/misc.py
@@ -65,12 +65,6 @@
       locking.LEVEL_NODE: lock_node_uuids,
       }
 
-    self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
-
-    if not self.op.node_names:
-      # Acquire node allocation lock only if all nodes are affected
-      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
-
   def CheckPrereq(self):
     """Check prerequisites.
 
diff --git a/lib/cmdlib/network.py b/lib/cmdlib/network.py
index 4c06453..ec112d8 100644
--- a/lib/cmdlib/network.py
+++ b/lib/cmdlib/network.py
@@ -106,10 +106,8 @@
 
     if self.op.conflicts_check:
       self.share_locks[locking.LEVEL_NODE] = 1
-      self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
       self.needed_locks = {
         locking.LEVEL_NODE: locking.ALL_SET,
-        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
         }
     else:
       self.needed_locks = {}
diff --git a/lib/cmdlib/node.py b/lib/cmdlib/node.py
index 6578741..569fa25 100644
--- a/lib/cmdlib/node.py
+++ b/lib/cmdlib/node.py
@@ -541,16 +541,15 @@
     """Filter for getting affected instances.
 
     """
-    return (instance.disk_template in constants.DTS_INT_MIRROR and
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    any_mirrored = utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR)
+    return (any_mirrored and
             self.op.node_uuid in self.cfg.GetInstanceNodes(instance.uuid))
 
   def ExpandNames(self):
     if self.lock_all:
       self.needed_locks = {
         locking.LEVEL_NODE: locking.ALL_SET,
-
-        # Block allocations when all nodes are locked
-        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
         }
     else:
       self.needed_locks = {
@@ -567,7 +566,6 @@
     self.share_locks = ShareAll()
     self.share_locks[locking.LEVEL_NODE] = 0
     self.share_locks[locking.LEVEL_NODE_RES] = 0
-    self.share_locks[locking.LEVEL_NODE_ALLOC] = 0
 
     if self.lock_instances:
       self.needed_locks[locking.LEVEL_INSTANCE] = \
@@ -966,7 +964,7 @@
 
   return _GetNodeInstancesInner(cfg,
                                 lambda inst: node_uuid in
-                                  cfg.GetInstanceNodes(inst.uuid.uuid))
+                                  cfg.GetInstanceNodes(inst.uuid))
 
 
 class LUNodeEvacuate(NoHooksLU):
@@ -1131,8 +1129,9 @@
 
     elif self.op.iallocator is not None:
       # TODO: Implement relocation to other group
-      req = iallocator.IAReqNodeEvac(evac_mode=self.op.mode,
-                                     instances=list(self.instance_names))
+      req = iallocator.IAReqNodeEvac(
+          evac_mode=self.op.mode, instances=list(self.instance_names),
+          ignore_soft_errors=self.op.ignore_soft_errors)
       ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
       ial.Run(self.op.iallocator)
@@ -1328,7 +1327,6 @@
     else:
       self.needed_locks = {
         locking.LEVEL_NODE: locking.ALL_SET,
-        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
         }
 
   def Exec(self, feedback_fn):
@@ -1404,7 +1402,6 @@
     else:
       self.needed_locks = {
         locking.LEVEL_NODE: locking.ALL_SET,
-        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
         }
 
   def _DetermineStorageType(self):
diff --git a/lib/cmdlib/test.py b/lib/cmdlib/test.py
index 117245d..5ec7c92 100644
--- a/lib/cmdlib/test.py
+++ b/lib/cmdlib/test.py
@@ -426,7 +426,8 @@
                                         target_groups=self.op.target_groups)
     elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
       req = iallocator.IAReqNodeEvac(instances=self.op.instances,
-                                     evac_mode=self.op.evac_mode)
+                                     evac_mode=self.op.evac_mode,
+                                     ignore_soft_errors=False)
     elif self.op.mode == constants.IALLOCATOR_MODE_MULTI_ALLOC:
       disk_template = self.op.disk_template
       insts = [iallocator.IAReqInstanceAlloc(name="%s%s" % (self.op.name, idx),
diff --git a/lib/config.py b/lib/config/__init__.py
similarity index 87%
rename from lib/config.py
rename to lib/config/__init__.py
index 8441255..0e9c0c8 100644
--- a/lib/config.py
+++ b/lib/config/__init__.py
@@ -51,6 +51,11 @@
 import threading
 import itertools
 
+from ganeti.config.temporary_reservations import TemporaryReservationManager
+from ganeti.config.utils import ConfigSync, ConfigManager
+from ganeti.config.verify import (VerifyType, VerifyNic, VerifyIpolicy,
+                                  ValidateConfig)
+
 from ganeti import errors
 from ganeti import utils
 from ganeti import constants
@@ -111,101 +116,10 @@
   return ConfigWriter(**kwargs)
 
 
-def _ConfigSync(shared=0):
-  """Configuration synchronization decorator.
-
-  """
-  def wrap(fn):
-    def sync_function(*args, **kwargs):
-      with args[0].GetConfigManager(shared):
-        return fn(*args, **kwargs)
-    return sync_function
-  return wrap
-
 # job id used for resource management at config upgrade time
 _UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
 
 
-def _ValidateConfig(data):
-  """Verifies that a configuration dict looks valid.
-
-  This only verifies the version of the configuration.
-
-  @raise errors.ConfigurationError: if the version differs from what
-      we expect
-
-  """
-  if data['version'] != constants.CONFIG_VERSION:
-    raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION,
-                                       data['version'])
-
-
-class TemporaryReservationManager(object):
-  """A temporary resource reservation manager.
-
-  This is used to reserve resources in a job, before using them, making sure
-  other jobs cannot get them in the meantime.
-
-  """
-  def __init__(self):
-    self._ec_reserved = {}
-
-  def Reserved(self, resource):
-    for holder_reserved in self._ec_reserved.values():
-      if resource in holder_reserved:
-        return True
-    return False
-
-  def Reserve(self, ec_id, resource):
-    if self.Reserved(resource):
-      raise errors.ReservationError("Duplicate reservation for resource '%s'"
-                                    % str(resource))
-    if ec_id not in self._ec_reserved:
-      self._ec_reserved[ec_id] = set([resource])
-    else:
-      self._ec_reserved[ec_id].add(resource)
-
-  def DropECReservations(self, ec_id):
-    if ec_id in self._ec_reserved:
-      del self._ec_reserved[ec_id]
-
-  def GetReserved(self):
-    all_reserved = set()
-    for holder_reserved in self._ec_reserved.values():
-      all_reserved.update(holder_reserved)
-    return all_reserved
-
-  def GetECReserved(self, ec_id):
-    """ Used when you want to retrieve all reservations for a specific
-        execution context. E.g when commiting reserved IPs for a specific
-        network.
-
-    """
-    ec_reserved = set()
-    if ec_id in self._ec_reserved:
-      ec_reserved.update(self._ec_reserved[ec_id])
-    return ec_reserved
-
-  def Generate(self, existing, generate_one_fn, ec_id):
-    """Generate a new resource of this type
-
-    """
-    assert callable(generate_one_fn)
-
-    all_elems = self.GetReserved()
-    all_elems.update(existing)
-    retries = 64
-    while retries > 0:
-      new_resource = generate_one_fn()
-      if new_resource is not None and new_resource not in all_elems:
-        break
-    else:
-      raise errors.ConfigurationError("Not able generate new resource"
-                                      " (last tried: %s)" % new_resource)
-    self.Reserve(ec_id, new_resource)
-    return new_resource
-
-
 def _MatchNameComponentIgnoreCase(short_name, names):
   """Wrapper around L{utils.text.MatchNameComponent}.
 
@@ -233,37 +147,6 @@
   return result
 
 
-class ConfigManager(object):
-  """Locks the configuration and exposes it to be read or modified.
-
-  """
-  def __init__(self, config_writer, shared=False):
-    assert isinstance(config_writer, ConfigWriter), \
-           "invalid argument: Not a ConfigWriter"
-    self._config_writer = config_writer
-    self._shared = shared
-
-  def __enter__(self):
-    try:
-      self._config_writer._OpenConfig(self._shared) # pylint: disable=W0212
-    except Exception:
-      logging.debug("Opening configuration failed")
-      try:
-        self._config_writer._CloseConfig(False) # pylint: disable=W0212
-      except Exception: # pylint: disable=W0703
-        logging.debug("Closing configuration failed as well")
-      raise
-
-  def __exit__(self, exc_type, exc_value, traceback):
-    # save the configuration, if this was a write opreration that succeeded
-    if exc_type is not None:
-      logging.debug("Configuration operation failed,"
-                    " the changes will not be saved")
-    # pylint: disable=W0212
-    self._config_writer._CloseConfig(not self._shared and exc_type is None)
-    return False
-
-
 def _UpdateIvNames(base_idx, disks):
   """Update the C{iv_name} attribute of disks.
 
@@ -282,6 +165,11 @@
 
   @ivar _all_rms: a list of all temporary reservation managers
 
+  Currently the class fulfills 3 main functions:
+    1. lock the configuration for access (monitor)
+    2. reload and write the config if necessary (bridge)
+    3. provide convenient access methods to config data (facade)
+
   """
   def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts,
                accept_foreign=False, wconfdcontext=None, wconfd=None):
@@ -307,6 +195,7 @@
     self._accept_foreign = accept_foreign
     self._lock_count = 0
     self._lock_current_shared = None
+    self._lock_forced = False
 
   def _ConfigData(self):
     return self._config_data
@@ -332,7 +221,7 @@
     nodegroup = self._UnlockedGetNodeGroup(node.group)
     return self._ConfigData().cluster.FillND(node, nodegroup)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNdParams(self, node):
     """Get the node params populated with cluster defaults.
 
@@ -343,7 +232,7 @@
     """
     return self._UnlockedGetNdParams(node)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNdGroupParams(self, nodegroup):
     """Get the node groups params populated with cluster defaults.
 
@@ -365,7 +254,7 @@
     """
     return self._ConfigData().cluster.FillNDGroup(group)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetGroupSshPorts(self):
     """Get a map of group UUIDs to SSH ports.
 
@@ -380,7 +269,7 @@
       port_map[uuid] = port
     return port_map
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceDiskParams(self, instance):
     """Get the disk params populated with inherit chain.
 
@@ -393,23 +282,6 @@
     nodegroup = self._UnlockedGetNodeGroup(node.group)
     return self._UnlockedGetGroupDiskParams(nodegroup)
 
-  @_ConfigSync()
-  def SetInstanceDiskTemplate(self, inst_uuid, disk_template):
-    """Set the instance's disk template to the given value.
-
-    @type inst_uuid: string
-    @param inst_uuid: The UUID of the instance object
-    @type disk_template: string
-    @param disk_template: The new disk template of the instance
-
-    """
-    instance = self._UnlockedGetInstanceInfo(inst_uuid)
-    if instance is None:
-      raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
-
-    # Update the disk template of the instance
-    instance.disk_template = disk_template
-
   def _UnlockedGetInstanceDisks(self, inst_uuid):
     """Return the disks' info for the given instance
 
@@ -427,7 +299,7 @@
     return [self._UnlockedGetDiskInfo(disk_uuid)
             for disk_uuid in instance.disks]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceDisks(self, inst_uuid):
     """Return the disks' info for the given instance
 
@@ -436,7 +308,7 @@
     """
     return self._UnlockedGetInstanceDisks(inst_uuid)
 
-  def _UnlockedAddDisk(self, disk):
+  def _UnlockedAddDisk(self, disk, replace=False):
     """Add a disk to the config.
 
     @type disk: L{objects.Disk}
@@ -448,12 +320,16 @@
 
     logging.info("Adding disk %s to configuration", disk.uuid)
 
-    self._CheckUniqueUUID(disk, include_temporary=False)
-    disk.serial_no = 1
-    disk.ctime = disk.mtime = time.time()
+    if replace:
+      self._CheckUUIDpresent(disk)
+    else:
+      self._CheckUniqueUUID(disk, include_temporary=False)
+      disk.serial_no = 1
+      disk.ctime = disk.mtime = time.time()
     disk.UpgradeConfig()
     self._ConfigData().disks[disk.uuid] = disk
     self._ConfigData().cluster.serial_no += 1
+    self._UnlockedReleaseDRBDMinors(disk.uuid)
 
   def _UnlockedAttachInstanceDisk(self, inst_uuid, disk_uuid, idx=None):
     """Attach a disk to an instance.
@@ -495,17 +371,26 @@
     instance.serial_no += 1
     instance.mtime = time.time()
 
-  @_ConfigSync()
-  def AddInstanceDisk(self, inst_uuid, disk, idx=None):
+  @ConfigSync()
+  def AddInstanceDisk(self, inst_uuid, disk, idx=None, replace=False):
     """Add a disk to the config and attach it to instance.
 
     This is a simple wrapper over L{_UnlockedAddDisk} and
     L{_UnlockedAttachInstanceDisk}.
 
     """
-    self._UnlockedAddDisk(disk)
+    self._UnlockedAddDisk(disk, replace=replace)
     self._UnlockedAttachInstanceDisk(inst_uuid, disk.uuid, idx)
 
+  @ConfigSync()
+  def AttachInstanceDisk(self, inst_uuid, disk_uuid, idx=None):
+    """Attach an existing disk to an instance.
+
+    This is a simple wrapper over L{_UnlockedAttachInstanceDisk}.
+
+    """
+    self._UnlockedAttachInstanceDisk(inst_uuid, disk_uuid, idx)
+
   def _UnlockedDetachInstanceDisk(self, inst_uuid, disk_uuid):
     """Detach a disk from an instance.
 
@@ -555,7 +440,7 @@
     del self._ConfigData().disks[disk_uuid]
     self._ConfigData().cluster.serial_no += 1
 
-  @_ConfigSync()
+  @ConfigSync()
   def RemoveInstanceDisk(self, inst_uuid, disk_uuid):
     """Detach a disk from an instance and remove it from the config.
 
@@ -566,6 +451,14 @@
     self._UnlockedDetachInstanceDisk(inst_uuid, disk_uuid)
     self._UnlockedRemoveDisk(disk_uuid)
 
+  @ConfigSync()
+  def DetachInstanceDisk(self, inst_uuid, disk_uuid):
+    """Detach a disk from an instance.
+
+    This is a simple wrapper over L{_UnlockedDetachInstanceDisk}.
+    """
+    self._UnlockedDetachInstanceDisk(inst_uuid, disk_uuid)
+
   def _UnlockedGetDiskInfo(self, disk_uuid):
     """Returns information about a disk.
 
@@ -582,7 +475,7 @@
 
     return self._ConfigData().disks[disk_uuid]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetDiskInfo(self, disk_uuid):
     """Returns information about a disk.
 
@@ -591,6 +484,69 @@
     """
     return self._UnlockedGetDiskInfo(disk_uuid)
 
+  def _UnlockedGetDiskInfoByName(self, disk_name):
+    """Return information about a named disk.
+
+    Return disk information from the configuration file, searching with the
+    name of the disk.
+
+    @param disk_name: Name of the disk
+
+    @rtype: L{objects.Disk}
+    @return: the disk object
+
+    """
+    disk = None
+    count = 0
+    for d in self._ConfigData().disks.itervalues():
+      if d.name == disk_name:
+        count += 1
+        disk = d
+
+    if count > 1:
+      raise errors.ConfigurationError("There are %s disks with this name: %s"
+                                      % (count, disk_name))
+
+    return disk
+
+  @ConfigSync(shared=1)
+  def GetDiskInfoByName(self, disk_name):
+    """Return information about a named disk.
+
+    This is a simple wrapper over L{_UnlockedGetDiskInfoByName}.
+
+    """
+    return self._UnlockedGetDiskInfoByName(disk_name)
+
+  def _UnlockedGetDiskList(self):
+    """Get the list of disks.
+
+    @return: array of disks, ex. ['disk2-uuid', 'disk1-uuid']
+
+    """
+    return self._ConfigData().disks.keys()
+
+  @ConfigSync(shared=1)
+  def GetAllDisksInfo(self):
+    """Get the configuration of all disks.
+
+    This is a simple wrapper over L{_UnlockedGetAllDisksInfo}.
+
+    """
+    return self._UnlockedGetAllDisksInfo()
+
+  def _UnlockedGetAllDisksInfo(self):
+    """Get the configuration of all disks.
+
+    @rtype: dict
+    @return: dict of (disk, disk_info), where disk_info is what
+        would GetDiskInfo return for the node
+
+    """
+    my_dict = dict([(disk_uuid, self._UnlockedGetDiskInfo(disk_uuid))
+                    for disk_uuid in self._UnlockedGetDiskList()])
+    return my_dict
+
   def _AllInstanceNodes(self, inst_uuid):
     """Compute the set of all disk-related nodes for an instance.
 
@@ -616,8 +572,9 @@
   def _UnlockedGetInstanceNodes(self, inst_uuid):
     """Get all disk-related nodes for an instance.
 
-    For non-DRBD, this will be empty, for DRBD it will contain both
-    the primary and the secondaries.
+    For non-DRBD instances, this will contain only the instance's primary node,
+    whereas for DRBD instances, it will contain both the primary and the
+    secondaries.
 
     @type inst_uuid: string
     @param inst_uuid: The UUID of the instance we want to get nodes for
@@ -630,7 +587,7 @@
     all_nodes.discard(instance.primary_node)
     return (instance.primary_node, ) + tuple(all_nodes)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceNodes(self, inst_uuid):
     """Get all disk-related nodes for an instance.
 
@@ -645,14 +602,14 @@
     @type inst_uuid: string
     @param inst_uuid: The UUID of the instance we want to get nodes for
     @rtype: list of strings
-    @return: A list of names for all the secondary nodes of the instance
+    @return: A tuple of names for all the secondary nodes of the instance
 
     """
     (all_nodes, instance) = self._AllInstanceNodes(inst_uuid)
     all_nodes.discard(instance.primary_node)
     return tuple(all_nodes)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceSecondaryNodes(self, inst_uuid):
     """Get the list of secondary nodes.
 
@@ -684,7 +641,8 @@
 
       for dev in devices:
         if dev.dev_type == constants.DT_PLAIN:
-          lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
+          if not dev.forthcoming:
+            lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
 
         elif dev.dev_type in constants.DTS_DRBD:
           if dev.children:
@@ -709,7 +667,7 @@
                   instance.primary_node)
     return ret
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceLVsByNode(self, inst_uuid, lvmap=None):
     """Provide a mapping of node to LVs a given instance owns.
 
@@ -718,7 +676,7 @@
     """
     return self._UnlockedGetInstanceLVsByNode(inst_uuid, lvmap=lvmap)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetGroupDiskParams(self, group):
     """Get the disk params populated with inherit chain.
 
@@ -741,7 +699,7 @@
     assert isinstance(data, dict), "Not a dictionary: " + str(data)
     return data
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetPotentialMasterCandidates(self):
     """Gets the list of node names of potential master candidates.
 
@@ -895,7 +853,7 @@
     existing = self._AllIDs(include_temporary=False)
     return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GenerateUniqueID(self, ec_id):
     """Generate an unique ID.
 
@@ -955,8 +913,6 @@
     @param result: list containing diagnostic messages
 
     """
-    instance_disk_uuids = [d for insts in data.instances.values()
-                           for d in insts.disks]
     for disk_uuid in data.disks:
       disk = data.disks[disk_uuid]
       result.extend(["disk %s error: %s" % (disk.uuid, msg)
@@ -964,9 +920,6 @@
       if disk.uuid != disk_uuid:
         result.append("disk '%s' is indexed by wrong UUID '%s'" %
                       (disk.name, disk_uuid))
-      if disk.uuid not in instance_disk_uuids:
-        result.append("disk '%s' is not attached to any instance" %
-                      disk.uuid)
 
   def _UnlockedVerifyConfig(self):
     """Verify function.
@@ -994,65 +947,15 @@
         except IndexError:
           pass
 
-    def _helper(owner, attr, value, template):
-      try:
-        utils.ForceDictType(value, template)
-      except errors.GenericError, err:
-        result.append("%s has invalid %s: %s" % (owner, attr, err))
-
-    def _helper_nic(owner, params):
-      try:
-        objects.NIC.CheckParameterSyntax(params)
-      except errors.ConfigurationError, err:
-        result.append("%s has invalid nicparams: %s" % (owner, err))
-
-    def _helper_ipolicy(owner, ipolicy, iscluster):
-      try:
-        objects.InstancePolicy.CheckParameterSyntax(ipolicy, iscluster)
-      except errors.ConfigurationError, err:
-        result.append("%s has invalid instance policy: %s" % (owner, err))
-      for key, value in ipolicy.items():
-        if key == constants.ISPECS_MINMAX:
-          for k in range(len(value)):
-            _helper_ispecs(owner, "ipolicy/%s[%s]" % (key, k), value[k])
-        elif key == constants.ISPECS_STD:
-          _helper(owner, "ipolicy/" + key, value,
-                  constants.ISPECS_PARAMETER_TYPES)
-        else:
-          # FIXME: assuming list type
-          if key in constants.IPOLICY_PARAMETERS:
-            exp_type = float
-            # if the value is int, it can be converted into float
-            convertible_types = [int]
-          else:
-            exp_type = list
-            convertible_types = []
-          # Try to convert from allowed types, if necessary.
-          if any(isinstance(value, ct) for ct in convertible_types):
-            try:
-              value = exp_type(value)
-              ipolicy[key] = value
-            except ValueError:
-              pass
-          if not isinstance(value, exp_type):
-            result.append("%s has invalid instance policy: for %s,"
-                          " expecting %s, got %s" %
-                          (owner, key, exp_type.__name__, type(value)))
-
-    def _helper_ispecs(owner, parentkey, params):
-      for (key, value) in params.items():
-        fullkey = "/".join([parentkey, key])
-        _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES)
-
     # check cluster parameters
-    _helper("cluster", "beparams", cluster.SimpleFillBE({}),
-            constants.BES_PARAMETER_TYPES)
-    _helper("cluster", "nicparams", cluster.SimpleFillNIC({}),
-            constants.NICS_PARAMETER_TYPES)
-    _helper_nic("cluster", cluster.SimpleFillNIC({}))
-    _helper("cluster", "ndparams", cluster.SimpleFillND({}),
-            constants.NDS_PARAMETER_TYPES)
-    _helper_ipolicy("cluster", cluster.ipolicy, True)
+    VerifyType("cluster", "beparams", cluster.SimpleFillBE({}),
+               constants.BES_PARAMETER_TYPES, result.append)
+    VerifyType("cluster", "nicparams", cluster.SimpleFillNIC({}),
+               constants.NICS_PARAMETER_TYPES, result.append)
+    VerifyNic("cluster", cluster.SimpleFillNIC({}), result.append)
+    VerifyType("cluster", "ndparams", cluster.SimpleFillND({}),
+               constants.NDS_PARAMETER_TYPES, result.append)
+    VerifyIpolicy("cluster", cluster.ipolicy, True, result.append)
 
     for disk_template in cluster.diskparams:
       if disk_template not in constants.DTS_HAVE_ACCESS:
@@ -1092,19 +995,15 @@
         if nic.nicparams:
           filled = cluster.SimpleFillNIC(nic.nicparams)
           owner = "instance %s nic %d" % (instance.name, idx)
-          _helper(owner, "nicparams",
-                  filled, constants.NICS_PARAMETER_TYPES)
-          _helper_nic(owner, filled)
-
-      # disk template checks
-      if not instance.disk_template in data.cluster.enabled_disk_templates:
-        result.append("instance '%s' uses the disabled disk template '%s'." %
-                      (instance.name, instance.disk_template))
+          VerifyType(owner, "nicparams",
+                     filled, constants.NICS_PARAMETER_TYPES, result.append)
+          VerifyNic(owner, filled, result.append)
 
       # parameter checks
       if instance.beparams:
-        _helper("instance %s" % instance.name, "beparams",
-                cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
+        VerifyType("instance %s" % instance.name, "beparams",
+                   cluster.FillBE(instance), constants.BES_PARAMETER_TYPES,
+                   result.append)
 
       # check that disks exists
       for disk_uuid in instance.disks:
@@ -1180,9 +1079,9 @@
         result.append("Node '%s' has invalid group '%s'" %
                       (node.name, node.group))
       else:
-        _helper("node %s" % node.name, "ndparams",
-                cluster.FillND(node, data.nodegroups[node.group]),
-                constants.NDS_PARAMETER_TYPES)
+        VerifyType("node %s" % node.name, "ndparams",
+                   cluster.FillND(node, data.nodegroups[node.group]),
+                   constants.NDS_PARAMETER_TYPES, result.append)
       used_globals = constants.NDC_GLOBALS.intersection(node.ndparams)
       if used_globals:
         result.append("Node '%s' has some global parameters set: %s" %
@@ -1203,12 +1102,12 @@
       else:
         nodegroups_names.add(nodegroup.name)
       group_name = "group %s" % nodegroup.name
-      _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy),
-                      False)
+      VerifyIpolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy),
+                    False, result.append)
       if nodegroup.ndparams:
-        _helper(group_name, "ndparams",
-                cluster.SimpleFillND(nodegroup.ndparams),
-                constants.NDS_PARAMETER_TYPES)
+        VerifyType(group_name, "ndparams",
+                   cluster.SimpleFillND(nodegroup.ndparams),
+                   constants.NDS_PARAMETER_TYPES, result.append)
 
     # drbd minors check
     # FIXME: The check for DRBD map needs to be implemented in WConfd
@@ -1282,7 +1181,7 @@
         feedback_fn(errmsg)
     return config_errors
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def VerifyConfig(self):
     """Verify function.
 
@@ -1295,7 +1194,7 @@
     """
     return self._UnlockedVerifyConfig()
 
-  @_ConfigSync()
+  @ConfigSync()
   def AddTcpUdpPort(self, port):
     """Adds a new port to the available port pool.
 
@@ -1309,14 +1208,14 @@
 
     self._ConfigData().cluster.tcpudp_port_pool.add(port)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetPortList(self):
     """Returns a copy of the current port list.
 
     """
     return self._ConfigData().cluster.tcpudp_port_pool.copy()
 
-  @_ConfigSync()
+  @ConfigSync()
   def AllocatePort(self):
     """Allocate a port.
 
@@ -1337,7 +1236,7 @@
       self._ConfigData().cluster.highest_used_port = port
     return port
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def ComputeDRBDMap(self):
     """Compute the used DRBD minor/nodes.
 
@@ -1354,53 +1253,56 @@
       return dict(map(lambda (k, v): (k, dict(v)),
                       self._wconfd.ComputeDRBDMap()))
 
-  def AllocateDRBDMinor(self, node_uuids, inst_uuid):
+  def AllocateDRBDMinor(self, node_uuids, disk_uuid):
     """Allocate a drbd minor.
 
     This is just a wrapper over a call to WConfd.
 
     The free minor will be automatically computed from the existing
-    devices. A node can be given multiple times in order to allocate
-    multiple minors. The result is the list of minors, in the same
+    devices. A node can not be given multiple times.
+    The result is the list of minors, in the same
     order as the passed nodes.
 
-    @type inst_uuid: string
-    @param inst_uuid: the instance for which we allocate minors
+    @type node_uuids: list of strings
+    @param node_uuids: the nodes in which we allocate minors
+    @type disk_uuid: string
+    @param disk_uuid: the disk for which we allocate minors
+    @rtype: list of ints
+    @return: A list of minors in the same order as the passed nodes
 
     """
-    assert isinstance(inst_uuid, basestring), \
-           "Invalid argument '%s' passed to AllocateDRBDMinor" % inst_uuid
+    assert isinstance(disk_uuid, basestring), \
+           "Invalid argument '%s' passed to AllocateDRBDMinor" % disk_uuid
 
     if self._offline:
       raise errors.ProgrammerError("Can't call AllocateDRBDMinor"
                                    " in offline mode")
 
-    result = self._wconfd.AllocateDRBDMinor(inst_uuid, node_uuids)
+    result = self._wconfd.AllocateDRBDMinor(disk_uuid, node_uuids)
     logging.debug("Request to allocate drbd minors, input: %s, returning %s",
                   node_uuids, result)
     return result
 
-  def _UnlockedReleaseDRBDMinors(self, inst_uuid):
-    """Release temporary drbd minors allocated for a given instance.
+  def _UnlockedReleaseDRBDMinors(self, disk_uuid):
+    """Release temporary drbd minors allocated for a given disk.
 
     This is just a wrapper over a call to WConfd.
 
-    @type inst_uuid: string
-    @param inst_uuid: the instance for which temporary minors should be
-                      released
+    @type disk_uuid: string
+    @param disk_uuid: the disk for which temporary minors should be released
 
     """
-    assert isinstance(inst_uuid, basestring), \
+    assert isinstance(disk_uuid, basestring), \
            "Invalid argument passed to ReleaseDRBDMinors"
     # in offline mode we allow the calls to release DRBD minors,
     # because then nothing can be allocated anyway;
     # this is useful for testing
     if not self._offline:
-      self._wconfd.ReleaseDRBDMinors(inst_uuid)
+      self._wconfd.ReleaseDRBDMinors(disk_uuid)
 
-  @_ConfigSync()
-  def ReleaseDRBDMinors(self, inst_uuid):
-    """Release temporary drbd minors allocated for a given instance.
+  @ConfigSync()
+  def ReleaseDRBDMinors(self, disk_uuid):
+    """Release temporary drbd minors allocated for a given disk.
 
     This should be called on the error paths, on the success paths
     it's automatically called by the ConfigWriter add and update
@@ -1408,14 +1310,27 @@
 
     This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
 
-    @type inst_uuid: string
-    @param inst_uuid: the instance for which temporary minors should be
-                      released
+    @type disk_uuid: string
+    @param disk_uuid: the disk for which temporary minors should be released
 
     """
-    self._UnlockedReleaseDRBDMinors(inst_uuid)
+    self._UnlockedReleaseDRBDMinors(disk_uuid)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
+  def GetInstanceDiskTemplate(self, inst_uuid):
+    """Return the disk template of an instance.
+
+    This corresponds to the currently attached disks. If no disks are attached,
+    it is L{constants.DT_DISKLESS}, if homogeneous disk types are attached,
+    that type is returned, if that isn't the case, L{constants.DT_MIXED} is
+    returned.
+
+    @type inst_uuid: str
+    @param inst_uuid: The uuid of the instance.
+    """
+    return utils.GetDiskTemplate(self._UnlockedGetInstanceDisks(inst_uuid))
+
+  @ConfigSync(shared=1)
   def GetConfigVersion(self):
     """Get the configuration version.
 
@@ -1424,7 +1339,7 @@
     """
     return self._ConfigData().version
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetClusterName(self):
     """Get cluster name.
 
@@ -1433,7 +1348,7 @@
     """
     return self._ConfigData().cluster.cluster_name
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMasterNode(self):
     """Get the UUID of the master node for this cluster.
 
@@ -1442,7 +1357,7 @@
     """
     return self._ConfigData().cluster.master_node
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMasterNodeName(self):
     """Get the hostname of the master node for this cluster.
 
@@ -1451,7 +1366,7 @@
     """
     return self._UnlockedGetNodeName(self._ConfigData().cluster.master_node)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMasterNodeInfo(self):
     """Get the master node information for this cluster.
 
@@ -1461,7 +1376,7 @@
     """
     return self._UnlockedGetNodeInfo(self._ConfigData().cluster.master_node)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMasterIP(self):
     """Get the IP of the master node for this cluster.
 
@@ -1470,56 +1385,56 @@
     """
     return self._ConfigData().cluster.master_ip
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMasterNetdev(self):
     """Get the master network device for this cluster.
 
     """
     return self._ConfigData().cluster.master_netdev
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMasterNetmask(self):
     """Get the netmask of the master node for this cluster.
 
     """
     return self._ConfigData().cluster.master_netmask
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetUseExternalMipScript(self):
     """Get flag representing whether to use the external master IP setup script.
 
     """
     return self._ConfigData().cluster.use_external_mip_script
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetFileStorageDir(self):
     """Get the file storage dir for this cluster.
 
     """
     return self._ConfigData().cluster.file_storage_dir
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetSharedFileStorageDir(self):
     """Get the shared file storage dir for this cluster.
 
     """
     return self._ConfigData().cluster.shared_file_storage_dir
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetGlusterStorageDir(self):
     """Get the Gluster storage dir for this cluster.
 
     """
     return self._ConfigData().cluster.gluster_storage_dir
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetHypervisorType(self):
     """Get the hypervisor type for this cluster.
 
     """
     return self._ConfigData().cluster.enabled_hypervisors[0]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetRsaHostKey(self):
     """Return the rsa hostkey from the config.
 
@@ -1529,7 +1444,7 @@
     """
     return self._ConfigData().cluster.rsahostkeypub
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetDsaHostKey(self):
     """Return the dsa hostkey from the config.
 
@@ -1539,14 +1454,14 @@
     """
     return self._ConfigData().cluster.dsahostkeypub
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetDefaultIAllocator(self):
     """Get the default instance allocator for this cluster.
 
     """
     return self._ConfigData().cluster.default_iallocator
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetDefaultIAllocatorParameters(self):
     """Get the default instance allocator parameters for this cluster.
 
@@ -1556,7 +1471,7 @@
     """
     return self._ConfigData().cluster.default_iallocator_params
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetPrimaryIPFamily(self):
     """Get cluster primary ip family.
 
@@ -1565,7 +1480,7 @@
     """
     return self._ConfigData().cluster.primary_ip_family
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMasterNetworkParameters(self):
     """Get network parameters of the master node.
 
@@ -1581,7 +1496,7 @@
 
     return result
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstallImage(self):
     """Get the install image location
 
@@ -1591,7 +1506,7 @@
     """
     return self._ConfigData().cluster.install_image
 
-  @_ConfigSync()
+  @ConfigSync()
   def SetInstallImage(self, install_image):
     """Set the install image location
 
@@ -1601,7 +1516,7 @@
     """
     self._ConfigData().cluster.install_image = install_image
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceCommunicationNetwork(self):
     """Get cluster instance communication network
 
@@ -1612,7 +1527,7 @@
     """
     return self._ConfigData().cluster.instance_communication_network
 
-  @_ConfigSync()
+  @ConfigSync()
   def SetInstanceCommunicationNetwork(self, network_name):
     """Set cluster instance communication network
 
@@ -1623,7 +1538,7 @@
     """
     self._ConfigData().cluster.instance_communication_network = network_name
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetZeroingImage(self):
     """Get the zeroing image location
 
@@ -1633,7 +1548,7 @@
     """
     return self._config_data.cluster.zeroing_image
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetCompressionTools(self):
     """Get cluster compression tools
 
@@ -1644,7 +1559,7 @@
     """
     return self._ConfigData().cluster.compression_tools
 
-  @_ConfigSync()
+  @ConfigSync()
   def SetCompressionTools(self, tools):
     """Set cluster compression tools
 
@@ -1655,7 +1570,7 @@
     """
     self._ConfigData().cluster.compression_tools = tools
 
-  @_ConfigSync()
+  @ConfigSync()
   def AddNodeGroup(self, group, ec_id, check_uuid=True):
     """Add a node group to the configuration.
 
@@ -1703,7 +1618,7 @@
     self._ConfigData().nodegroups[group.uuid] = group
     self._ConfigData().cluster.serial_no += 1
 
-  @_ConfigSync()
+  @ConfigSync()
   def RemoveNodeGroup(self, group_uuid):
     """Remove a node group from the configuration.
 
@@ -1746,7 +1661,7 @@
     raise errors.OpPrereqError("Node group '%s' not found" % target,
                                errors.ECODE_NOENT)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def LookupNodeGroup(self, target):
     """Lookup a node group's UUID.
 
@@ -1774,7 +1689,7 @@
 
     return self._ConfigData().nodegroups[uuid]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeGroup(self, uuid):
     """Lookup a node group.
 
@@ -1792,14 +1707,14 @@
     """
     return dict(self._ConfigData().nodegroups)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetAllNodeGroupsInfo(self):
     """Get the configuration of all node groups.
 
     """
     return self._UnlockedGetAllNodeGroupsInfo()
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetAllNodeGroupsInfoDict(self):
     """Get the configuration of all node groups expressed as a dictionary of
     dictionaries.
@@ -1808,14 +1723,14 @@
     return dict(map(lambda (uuid, ng): (uuid, ng.ToDict()),
                     self._UnlockedGetAllNodeGroupsInfo().items()))
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeGroupList(self):
     """Get a list of node groups.
 
     """
     return self._ConfigData().nodegroups.keys()
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeGroupMembersByNodes(self, nodes):
     """Get nodes which are member in the same nodegroups as the given nodes.
 
@@ -1826,7 +1741,7 @@
                      for member_uuid in
                        self._UnlockedGetNodeGroup(ngfn(node_uuid)).members)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMultiNodeGroupInfo(self, group_uuids):
     """Get the configuration of multiple node groups.
 
@@ -1837,14 +1752,16 @@
     """
     return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
 
-  @_ConfigSync()
-  def AddInstance(self, instance, ec_id):
+  def AddInstance(self, instance, _ec_id, replace=False):
     """Add an instance to the config.
 
     This should be used after creating a new instance.
 
     @type instance: L{objects.Instance}
     @param instance: the instance object
+    @type replace: bool
+    @param replace: if true, expect the instance to be present and
+        replace rather than add.
 
     """
     if not isinstance(instance, objects.Instance):
@@ -1857,16 +1774,17 @@
                                         " MAC address '%s' already in use." %
                                         (instance.name, nic.mac))
 
-    self._CheckUniqueUUID(instance, include_temporary=False)
+    if replace:
+      self._CheckUUIDpresent(instance)
+    else:
+      self._CheckUniqueUUID(instance, include_temporary=False)
 
     instance.serial_no = 1
     instance.ctime = instance.mtime = time.time()
-    self._ConfigData().instances[instance.uuid] = instance
-    self._ConfigData().cluster.serial_no += 1
-    self._UnlockedReleaseDRBDMinors(instance.uuid)
-    # FIXME: After RemoveInstance is moved to WConfd, use its internal
-    # function from TempRes module instead.
-    self._UnlockedCommitTemporaryIps(ec_id)
+
+    utils.SimpleRetry(True, self._wconfd.AddInstance, 0.1, 30,
+                      args=[instance.ToDict(), self._GetWConfdContext()])
+    self.OutDate()
 
   def _EnsureUUID(self, item, ec_id):
     """Ensures a given object has a valid UUID.
@@ -1895,6 +1813,19 @@
       raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
                                       " in use" % (item.name, item.uuid))
 
+  def _CheckUUIDpresent(self, item):
+    """Checks that an object with the given UUID exists.
+
+    @param item: the instance or other UUID possessing object to verify that
+        its UUID is present
+
+    """
+    if not item.uuid:
+      raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,))
+    if item.uuid not in self._AllIDs(include_temporary=False):
+      raise errors.ConfigurationError("Cannot replace '%s': UUID %s not present"
+                                      % (item.name, item.uuid))
+
   def _SetInstanceStatus(self, inst_uuid, status, disks_active,
                          admin_state_source):
     """Set the instance's status to a given value.
@@ -1928,7 +1859,7 @@
       instance.mtime = time.time()
     return instance
 
-  @_ConfigSync()
+  @ConfigSync()
   def MarkInstanceUp(self, inst_uuid):
     """Mark the instance status to up in the config.
 
@@ -1941,7 +1872,7 @@
     return self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True,
                                    constants.ADMIN_SOURCE)
 
-  @_ConfigSync()
+  @ConfigSync()
   def MarkInstanceOffline(self, inst_uuid):
     """Mark the instance status to down in the config.
 
@@ -1954,7 +1885,7 @@
     return self._SetInstanceStatus(inst_uuid, constants.ADMINST_OFFLINE, False,
                                    constants.ADMIN_SOURCE)
 
-  @_ConfigSync()
+  @ConfigSync()
   def RemoveInstance(self, inst_uuid):
     """Remove the instance from the configuration.
 
@@ -1981,7 +1912,7 @@
     del self._ConfigData().instances[inst_uuid]
     self._ConfigData().cluster.serial_no += 1
 
-  @_ConfigSync()
+  @ConfigSync()
   def RenameInstance(self, inst_uuid, new_name):
     """Rename an instance.
 
@@ -2008,7 +1939,7 @@
     # Force update of ssconf files
     self._ConfigData().cluster.serial_no += 1
 
-  @_ConfigSync()
+  @ConfigSync()
   def MarkInstanceDown(self, inst_uuid):
     """Mark the status of an instance to down in the configuration.
 
@@ -2022,7 +1953,7 @@
     return self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None,
                                    constants.ADMIN_SOURCE)
 
-  @_ConfigSync()
+  @ConfigSync()
   def MarkInstanceUserDown(self, inst_uuid):
     """Mark the status of an instance to user down in the configuration.
 
@@ -2034,7 +1965,7 @@
     self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None,
                             constants.USER_SOURCE)
 
-  @_ConfigSync()
+  @ConfigSync()
   def MarkInstanceDisksActive(self, inst_uuid):
     """Mark the status of instance disks active.
 
@@ -2044,7 +1975,7 @@
     """
     return self._SetInstanceStatus(inst_uuid, None, True, None)
 
-  @_ConfigSync()
+  @ConfigSync()
   def MarkInstanceDisksInactive(self, inst_uuid):
     """Mark the status of instance disks inactive.
 
@@ -2062,7 +1993,7 @@
     """
     return self._ConfigData().instances.keys()
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceList(self):
     """Get the list of instances.
 
@@ -2098,7 +2029,7 @@
 
     return self._ConfigData().instances[inst_uuid]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceInfo(self, inst_uuid):
     """Returns information about an instance.
 
@@ -2113,7 +2044,7 @@
     """
     return self._UnlockedGetInstanceInfo(inst_uuid)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceNodeGroups(self, inst_uuid, primary_only=False):
     """Returns set of node group UUIDs for instance's nodes.
 
@@ -2132,7 +2063,7 @@
     return frozenset(self._UnlockedGetNodeInfo(node_uuid).group
                      for node_uuid in nodes)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceNetworks(self, inst_uuid):
     """Returns set of network UUIDs for instance's nics.
 
@@ -2150,7 +2081,7 @@
 
     return frozenset(networks)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMultiInstanceInfo(self, inst_uuids):
     """Get the configuration of multiple instances.
 
@@ -2163,7 +2094,7 @@
     """
     return [(uuid, self._UnlockedGetInstanceInfo(uuid)) for uuid in inst_uuids]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMultiInstanceInfoByName(self, inst_names):
     """Get the configuration of multiple instances.
 
@@ -2184,7 +2115,7 @@
                                         " not found." % name)
     return result
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetAllInstancesInfo(self):
     """Get the configuration of all instances.
 
@@ -2200,7 +2131,7 @@
                     for inst_uuid in self._UnlockedGetInstanceList()])
     return my_dict
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstancesInfoByFilter(self, filter_fn):
     """Get instance configuration with a filter.
 
@@ -2216,7 +2147,7 @@
                 for (uuid, inst) in self._ConfigData().instances.items()
                 if filter_fn(inst))
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceInfoByName(self, inst_name):
     """Get the L{objects.Instance} object for a named instance.
 
@@ -2240,7 +2171,7 @@
       raise errors.OpExecError("Unknown instance: %s" % inst_uuid)
     return inst_info.name
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceName(self, inst_uuid):
     """Gets the instance name for the passed instance.
 
@@ -2252,7 +2183,7 @@
     """
     return self._UnlockedGetInstanceName(inst_uuid)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetInstanceNames(self, inst_uuids):
     """Gets the instance names for the passed list of nodes.
 
@@ -2264,7 +2195,7 @@
     """
     return self._UnlockedGetInstanceNames(inst_uuids)
 
-  @_ConfigSync()
+  @ConfigSync()
   def SetInstancePrimaryNode(self, inst_uuid, target_node_uuid):
     """Sets the primary node of an existing instance
 
@@ -2276,6 +2207,40 @@
     """
     self._UnlockedGetInstanceInfo(inst_uuid).primary_node = target_node_uuid
 
+  @ConfigSync()
+  def SetDiskNodes(self, disk_uuid, nodes):
+    """Sets the nodes of an existing disk
+
+    @param disk_uuid: disk UUID
+    @type disk_uuid: string
+    @param nodes: the new nodes for the disk
+    @type nodes: list of node uuids
+
+    """
+    self._UnlockedGetDiskInfo(disk_uuid).nodes = nodes
+
+  @ConfigSync()
+  def SetDiskLogicalID(self, disk_uuid, logical_id):
+    """Sets the logical_id of an existing disk
+
+    @param disk_uuid: disk UUID
+    @type disk_uuid: string
+    @param logical_id: the new logical_id for the disk
+    @type logical_id: tuple
+
+    """
+    disk = self._UnlockedGetDiskInfo(disk_uuid)
+    if disk is None:
+      raise errors.ConfigurationError("Unknown disk UUID '%s'" % disk_uuid)
+
+    if len(disk.logical_id) != len(logical_id):
+      raise errors.ProgrammerError("Logical ID format mismatch\n"
+                                   "Existing logical ID: %s\n"
+                                   "New logical ID: %s", disk.logical_id,
+                                   logical_id)
+
+    disk.logical_id = logical_id
+
   def _UnlockedGetInstanceNames(self, inst_uuids):
     return [self._UnlockedGetInstanceName(uuid) for uuid in inst_uuids]
 
@@ -2297,7 +2262,7 @@
     self._ConfigData().nodes[node.uuid] = node
     self._ConfigData().cluster.serial_no += 1
 
-  @_ConfigSync()
+  @ConfigSync()
   def AddNode(self, node, ec_id):
     """Add a node to the configuration.
 
@@ -2307,7 +2272,7 @@
     """
     self._UnlockedAddNode(node, ec_id)
 
-  @_ConfigSync()
+  @ConfigSync()
   def RemoveNode(self, node_uuid):
     """Remove a node from the configuration.
 
@@ -2354,7 +2319,7 @@
 
     return self._ConfigData().nodes[node_uuid]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeInfo(self, node_uuid):
     """Get the configuration of a node, as stored in the config.
 
@@ -2368,7 +2333,7 @@
     """
     return self._UnlockedGetNodeInfo(node_uuid)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeInstances(self, node_uuid):
     """Get the instances of a node, as stored in the config.
 
@@ -2387,7 +2352,7 @@
         sec.append(inst.uuid)
     return (pri, sec)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeGroupInstances(self, uuid, primary_only=False):
     """Get the instances of a node group.
 
@@ -2420,7 +2385,7 @@
       result += "%s=%s\n" % (key, hvparams[key])
     return result
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetHvparamsString(self, hvname):
     """Return the hypervisor parameters of the given hypervisor.
 
@@ -2444,7 +2409,7 @@
     """
     return self._ConfigData().nodes.keys()
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeList(self):
     """Return the list of nodes which are in the configuration.
 
@@ -2459,14 +2424,14 @@
                  for node in self._UnlockedGetNodeList()]
     return [node.uuid for node in all_nodes if not node.offline]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetOnlineNodeList(self):
     """Return the list of nodes which are online.
 
     """
     return self._UnlockedGetOnlineNodeList()
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetVmCapableNodeList(self):
     """Return the list of nodes which are not vm capable.
 
@@ -2475,7 +2440,7 @@
                  for node in self._UnlockedGetNodeList()]
     return [node.uuid for node in all_nodes if node.vm_capable]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNonVmCapableNodeList(self):
     """Return the list of nodes' uuids which are not vm capable.
 
@@ -2484,7 +2449,7 @@
                  for node in self._UnlockedGetNodeList()]
     return [node.uuid for node in all_nodes if not node.vm_capable]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNonVmCapableNodeNameList(self):
     """Return the list of nodes' names which are not vm capable.
 
@@ -2493,7 +2458,7 @@
                  for node in self._UnlockedGetNodeList()]
     return [node.name for node in all_nodes if not node.vm_capable]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMultiNodeInfo(self, node_uuids):
     """Get the configuration of multiple nodes.
 
@@ -2515,7 +2480,7 @@
     return dict([(node_uuid, self._UnlockedGetNodeInfo(node_uuid))
                  for node_uuid in self._UnlockedGetNodeList()])
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetAllNodesInfo(self):
     """Get the configuration of all nodes.
 
@@ -2532,7 +2497,7 @@
         return node
     return None
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeInfoByName(self, node_name):
     """Get the L{objects.Node} object for a named node.
 
@@ -2544,7 +2509,7 @@
     """
     return self._UnlockedGetNodeInfoByName(node_name)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeGroupInfoByName(self, nodegroup_name):
     """Get the L{objects.NodeGroup} object for a named node group.
 
@@ -2570,7 +2535,7 @@
     else:
       raise errors.ProgrammerError("Can't handle node spec '%s'" % node_spec)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeName(self, node_spec):
     """Gets the node name for the passed node.
 
@@ -2585,7 +2550,7 @@
   def _UnlockedGetNodeNames(self, node_specs):
     return [self._UnlockedGetNodeName(node_spec) for node_spec in node_specs]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeNames(self, node_specs):
     """Gets the node names for the passed list of nodes.
 
@@ -2597,7 +2562,7 @@
     """
     return self._UnlockedGetNodeNames(node_specs)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNodeGroupsFromNodes(self, node_uuids):
     """Returns groups for a list of nodes.
 
@@ -2619,7 +2584,7 @@
     return [node.uuid for node in self._ConfigData().nodes.values()
             if node.master_candidate]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMasterCandidateUuids(self):
     """Get the list of UUIDs of master candidates.
 
@@ -2649,7 +2614,7 @@
     mc_should = min(mc_max, self._ConfigData().cluster.candidate_pool_size)
     return (mc_now, mc_should, mc_max)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMasterCandidateStats(self, exceptions=None):
     """Get the number of current and maximum possible candidates.
 
@@ -2663,7 +2628,7 @@
     """
     return self._UnlockedGetMasterCandidateStats(exceptions)
 
-  @_ConfigSync()
+  @ConfigSync()
   def MaintainCandidatePool(self, exception_node_uuids):
     """Try to grow the candidate pool to the desired size.
 
@@ -2727,7 +2692,7 @@
     else:
       nodegroup_obj.members.remove(node.uuid)
 
-  @_ConfigSync()
+  @ConfigSync()
   def AssignGroupNodes(self, mods):
     """Changes the group of a number of nodes.
 
@@ -2817,7 +2782,7 @@
             self._AllNICs() +
             [self._ConfigData().cluster])
 
-  def GetConfigManager(self, shared=False):
+  def GetConfigManager(self, shared=False, forcelock=False):
     """Returns a ConfigManager, which is suitable to perform a synchronized
     block of configuration operations.
 
@@ -2825,7 +2790,7 @@
     runs inside the block should be very fast, preferably not using any IO.
     """
 
-    return ConfigManager(self, shared)
+    return ConfigManager(self, shared=shared, forcelock=forcelock)
 
   def _AddLockCount(self, count):
     self._lock_count += count
@@ -2834,7 +2799,7 @@
   def _LockCount(self):
     return self._lock_count
 
-  def _OpenConfig(self, shared):
+  def _OpenConfig(self, shared, force=False):
     """Read the config data from WConfd or disk.
 
     """
@@ -2844,10 +2809,12 @@
         raise errors.ConfigurationError("Can't request an exclusive"
                                         " configuration lock while holding"
                                         " shared")
-      else:
+      elif not force or self._lock_forced or not shared or self._offline:
         return # we already have the lock, do nothing
     else:
       self._lock_current_shared = shared
+    if force:
+      self._lock_forced = True
     # Read the configuration data. If offline, read the file directly.
     # If online, call WConfd.
     if self._offline:
@@ -2855,7 +2822,7 @@
         raw_data = utils.ReadFile(self._cfg_file)
         data_dict = serializer.Load(raw_data)
         # Make sure the configuration has the right version
-        _ValidateConfig(data_dict)
+        ValidateConfig(data_dict)
         data = objects.ConfigData.FromDict(data_dict)
       except errors.ConfigVersionMismatch:
         raise
@@ -2888,7 +2855,7 @@
       # Upgrade configuration if needed
       self._UpgradeConfig(saveafter=True)
     else:
-      if shared:
+      if shared and not force:
         if self._config_data is None:
           logging.debug("Requesting config, as I have no up-to-date copy")
           dict_data = self._wconfd.ReadConfig()
@@ -2919,20 +2886,18 @@
     """
     if self._AddLockCount(-1) > 0:
       return # we still have the lock, do nothing
-    try:
-      if save:
-        self._WriteConfig()
-    except Exception, err:
-      logging.critical("Can't write the configuration: %s", str(err))
-      raise
-    finally:
-      if not self._offline and not self._lock_current_shared:
-        try:
-          self._wconfd.UnlockConfig(self._GetWConfdContext())
-        except AttributeError:
-          # If the configuration hasn't been initialized yet, just ignore it.
-          pass
-        logging.debug("Configuration in WConfd unlocked")
+    if save:
+      try:
+        logging.debug("Writing configuration and unlocking it")
+        self._WriteConfig(releaselock=True)
+      except Exception, err:
+        logging.critical("Can't write the configuration: %s", str(err))
+        raise
+    elif not self._offline and \
+         not (self._lock_current_shared and not self._lock_forced):
+      logging.debug("Unlocking configuration without writing")
+      self._wconfd.UnlockConfig(self._GetWConfdContext())
+      self._lock_forced = False
 
   # TODO: To WConfd
   def _UpgradeConfig(self, saveafter=False):
@@ -2979,7 +2944,7 @@
       if self._offline:
         self._UnlockedVerifyConfigAndLog()
 
-  def _WriteConfig(self, destination=None):
+  def _WriteConfig(self, destination=None, releaselock=False):
     """Write the configuration data to persistent storage.
 
     """
@@ -3009,8 +2974,16 @@
         os.close(fd)
     else:
       try:
-        self._wconfd.WriteConfig(self._GetWConfdContext(),
-                                 self._ConfigData().ToDict())
+        if releaselock:
+          res = self._wconfd.WriteConfigAndUnlock(self._GetWConfdContext(),
+                                                  self._ConfigData().ToDict())
+          if not res:
+            logging.warning("WriteConfigAndUnlock indicates we already have"
+                            " released the lock; assuming this was just a retry"
+                            " and the initial call succeeded")
+        else:
+          self._wconfd.WriteConfig(self._GetWConfdContext(),
+                                   self._ConfigData().ToDict())
       except errors.LockError:
         raise errors.ConfigurationError("The configuration file has been"
                                         " modified since the last write, cannot"
@@ -3154,21 +3127,21 @@
                                       " values: %s" % err)
     return ssconf_values
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetSsconfValues(self):
     """Wrapper using lock around _UnlockedGetSsconf().
 
     """
     return self._UnlockedGetSsconfValues()
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetVGName(self):
     """Return the volume group name.
 
     """
     return self._ConfigData().cluster.volume_group_name
 
-  @_ConfigSync()
+  @ConfigSync()
   def SetVGName(self, vg_name):
     """Set the volume group name.
 
@@ -3176,14 +3149,14 @@
     self._ConfigData().cluster.volume_group_name = vg_name
     self._ConfigData().cluster.serial_no += 1
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetDRBDHelper(self):
     """Return DRBD usermode helper.
 
     """
     return self._ConfigData().cluster.drbd_usermode_helper
 
-  @_ConfigSync()
+  @ConfigSync()
   def SetDRBDHelper(self, drbd_helper):
     """Set DRBD usermode helper.
 
@@ -3191,14 +3164,14 @@
     self._ConfigData().cluster.drbd_usermode_helper = drbd_helper
     self._ConfigData().cluster.serial_no += 1
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetMACPrefix(self):
     """Return the mac prefix.
 
     """
     return self._ConfigData().cluster.mac_prefix
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetClusterInfo(self):
     """Returns information about the cluster
 
@@ -3208,14 +3181,14 @@
     """
     return self._ConfigData().cluster
 
-  @_ConfigSync(shared=1)
-  def HasAnyDiskOfType(self, dev_type):
+  @ConfigSync(shared=1)
+  def DisksOfType(self, dev_type):
     """Check if in there is at disk of the given type in the configuration.
 
     """
-    return self._ConfigData().HasAnyDiskOfType(dev_type)
+    return self._ConfigData().DisksOfType(dev_type)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetDetachedConfig(self):
     """Returns a detached version of a ConfigManager, which represents
     a read-only snapshot of the configuration at this particular time.
@@ -3223,7 +3196,7 @@
     """
     return DetachedConfig(self._ConfigData())
 
-  @_ConfigSync()
+  @ConfigSync()
   def Update(self, target, feedback_fn, ec_id=None):
     """Notify function to be called after updates.
 
@@ -3281,7 +3254,7 @@
       self._ConfigData().cluster.serial_no += 1
       self._ConfigData().cluster.mtime = now
 
-    if isinstance(target, objects.Instance):
+    if isinstance(target, objects.Disk):
       self._UnlockedReleaseDRBDMinors(target.uuid)
 
     if ec_id is not None:
@@ -3308,7 +3281,7 @@
   def DropECReservations(self, ec_id):
     self._UnlockedDropECReservations(ec_id)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetAllNetworksInfo(self):
     """Get configuration info of all the networks.
 
@@ -3323,7 +3296,7 @@
     """
     return self._ConfigData().networks.keys()
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNetworkList(self):
     """Get the list of networks.
 
@@ -3332,7 +3305,7 @@
     """
     return self._UnlockedGetNetworkList()
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNetworkNames(self):
     """Get a list of network names
 
@@ -3352,7 +3325,7 @@
 
     return self._ConfigData().networks[uuid]
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetNetwork(self, uuid):
     """Returns information about a network.
 
@@ -3366,7 +3339,7 @@
     """
     return self._UnlockedGetNetwork(uuid)
 
-  @_ConfigSync()
+  @ConfigSync()
   def AddNetwork(self, net, ec_id, check_uuid=True):
     """Add a network to the configuration.
 
@@ -3412,7 +3385,7 @@
     raise errors.OpPrereqError("Network '%s' not found" % target,
                                errors.ECODE_NOENT)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def LookupNetwork(self, target):
     """Lookup a network's UUID.
 
@@ -3426,7 +3399,7 @@
     """
     return self._UnlockedLookupNetwork(target)
 
-  @_ConfigSync()
+  @ConfigSync()
   def RemoveNetwork(self, network_uuid):
     """Remove a network from the configuration.
 
@@ -3461,14 +3434,14 @@
 
     return netparams
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetGroupNetParams(self, net_uuid, node_uuid):
     """Locking wrapper of _UnlockedGetGroupNetParams()
 
     """
     return self._UnlockedGetGroupNetParams(net_uuid, node_uuid)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def CheckIPInNodeGroup(self, ip, node_uuid):
     """Check IP uniqueness in nodegroup.
 
@@ -3496,14 +3469,14 @@
 
     return (None, None)
 
-  @_ConfigSync(shared=1)
+  @ConfigSync(shared=1)
   def GetCandidateCerts(self):
     """Returns the candidate certificate map.
 
     """
     return self._ConfigData().cluster.candidate_certs
 
-  @_ConfigSync()
+  @ConfigSync()
   def SetCandidateCerts(self, certs):
     """Replaces the master candidate cert list with the new values.
 
@@ -3513,7 +3486,7 @@
     """
     self._ConfigData().cluster.candidate_certs = certs
 
-  @_ConfigSync()
+  @ConfigSync()
   def AddNodeToCandidateCerts(self, node_uuid, cert_digest,
                               info_fn=logging.info, warn_fn=logging.warn):
     """Adds an entry to the candidate certificate map.
@@ -3542,7 +3515,7 @@
                   % node_uuid)
     cluster.candidate_certs[node_uuid] = cert_digest
 
-  @_ConfigSync()
+  @ConfigSync()
   def RemoveNodeFromCandidateCerts(self, node_uuid,
                                    warn_fn=logging.warn):
     """Removes the entry of the given node in the certificate map.
@@ -3571,8 +3544,38 @@
     if not self._offline:
       self._wconfd.FlushConfig()
 
+  @ConfigSync(shared=1)
+  def GetAllDiskInfo(self):
+    """Get the configuration of all disks.
+
+    @rtype: dict
+    @return: dict of (disk, disk_info), where disk_info is what
+              would GetDiskInfo return for disk
+    """
+    return self._UnlockedGetAllDiskInfo()
+
+  def _UnlockedGetAllDiskInfo(self):
+    return dict((disk_uuid, self._UnlockedGetDiskInfo(disk_uuid))
+                for disk_uuid in self._UnlockedGetDiskList())
+
+  @ConfigSync(shared=1)
+  def GetInstanceForDisk(self, disk_uuid):
+    """Returns the instance the disk is currently attached to.
+
+    @type disk_uuid: string
+    @param disk_uuid: the identifier of the disk in question.
+
+    @rtype: string
+    @return: uuid of instance the disk is attached to.
+    """
+    for inst_uuid, inst_info in self._UnlockedGetAllInstancesInfo().items():
+      if disk_uuid in inst_info.disks:
+        return inst_uuid
+
 
 class DetachedConfig(ConfigWriter):
+  """Read-only snapshot of the config."""
+
   def __init__(self, config_data):
     super(DetachedConfig, self).__init__(self, offline=True)
     self._SetConfigData(config_data)
@@ -3582,7 +3585,7 @@
     raise errors.ProgrammerError("DetachedConfig supports only read-only"
                                  " operations")
 
-  def _OpenConfig(self, shared):
+  def _OpenConfig(self, shared, force=None):
     if not shared:
       DetachedConfig._WriteCallError()
 
diff --git a/lib/config/temporary_reservations.py b/lib/config/temporary_reservations.py
new file mode 100644
index 0000000..c90c23c
--- /dev/null
+++ b/lib/config/temporary_reservations.py
@@ -0,0 +1,101 @@
+#
+#
+
+# Copyright (C) 2014 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Reserve resources, so that jobs can't take them.
+
+"""
+
+from ganeti import errors
+
+
+class TemporaryReservationManager(object):
+  """A temporary resource reservation manager.
+
+  This is used to reserve resources in a job, before using them, making sure
+  other jobs cannot get them in the meantime.
+
+  """
+  def __init__(self):
+    self._ec_reserved = {}
+
+  def Reserved(self, resource):
+    for holder_reserved in self._ec_reserved.values():
+      if resource in holder_reserved:
+        return True
+    return False
+
+  def Reserve(self, ec_id, resource):
+    if self.Reserved(resource):
+      raise errors.ReservationError("Duplicate reservation for resource '%s'"
+                                    % str(resource))
+    if ec_id not in self._ec_reserved:
+      self._ec_reserved[ec_id] = set([resource])
+    else:
+      self._ec_reserved[ec_id].add(resource)
+
+  def DropECReservations(self, ec_id):
+    if ec_id in self._ec_reserved:
+      del self._ec_reserved[ec_id]
+
+  def GetReserved(self):
+    all_reserved = set()
+    for holder_reserved in self._ec_reserved.values():
+      all_reserved.update(holder_reserved)
+    return all_reserved
+
+  def GetECReserved(self, ec_id):
+    """ Used when you want to retrieve all reservations for a specific
+        execution context. E.g when commiting reserved IPs for a specific
+        network.
+
+    """
+    ec_reserved = set()
+    if ec_id in self._ec_reserved:
+      ec_reserved.update(self._ec_reserved[ec_id])
+    return ec_reserved
+
+  def Generate(self, existing, generate_one_fn, ec_id):
+    """Generate a new resource of this type
+
+    """
+    assert callable(generate_one_fn)
+
+    all_elems = self.GetReserved()
+    all_elems.update(existing)
+    retries = 64
+    while retries > 0:
+      new_resource = generate_one_fn()
+      if new_resource is not None and new_resource not in all_elems:
+        break
+    else:
+      raise errors.ConfigurationError("Not able generate new resource"
+                                      " (last tried: %s)" % new_resource)
+    self.Reserve(ec_id, new_resource)
+    return new_resource
diff --git a/lib/config/utils.py b/lib/config/utils.py
new file mode 100644
index 0000000..61a1d70
--- /dev/null
+++ b/lib/config/utils.py
@@ -0,0 +1,80 @@
+#
+#
+
+# Copyright (C) 2014 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Utilities used by the config module."""
+
+
+import logging
+
+
+def ConfigSync(shared=0):
+  """Configuration synchronization decorator.
+
+  """
+  def wrap(fn):
+    def sync_function(*args, **kwargs):
+      with args[0].GetConfigManager(shared):
+        return fn(*args, **kwargs)
+    return sync_function
+  return wrap
+
+
+class ConfigManager(object):
+  """Locks the configuration and exposes it to be read or modified.
+
+  """
+  def __init__(self, config_writer, shared=False, forcelock=False):
+    assert hasattr(config_writer, '_ConfigData'), \
+           "invalid argument: Not a ConfigWriter"
+    self._config_writer = config_writer
+    self._shared = shared
+    self._forcelock = forcelock
+
+  def __enter__(self):
+    try:
+      self._config_writer._OpenConfig(# pylint: disable=W0212
+                                      self._shared,
+                                      force=self._forcelock)
+    except Exception:
+      logging.debug("Opening configuration failed")
+      try:
+        self._config_writer._CloseConfig(False) # pylint: disable=W0212
+      except Exception: # pylint: disable=W0703
+        logging.debug("Closing configuration failed as well")
+      raise
+
+  def __exit__(self, exc_type, exc_value, traceback):
+    # save the configuration, if this was a write opreration that succeeded
+    if exc_type is not None:
+      logging.debug("Configuration operation failed,"
+                    " the changes will not be saved")
+    # pylint: disable=W0212
+    self._config_writer._CloseConfig(not self._shared and exc_type is None)
+    return False
diff --git a/lib/config/verify.py b/lib/config/verify.py
new file mode 100644
index 0000000..e53b384
--- /dev/null
+++ b/lib/config/verify.py
@@ -0,0 +1,153 @@
+#
+#
+
+# Copyright (C) 2015 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Verification helpers for the configuration object."""
+
+from ganeti import constants
+from ganeti import errors
+from ganeti import objects
+from ganeti import utils
+
+
+def ValidateConfig(data):
+  """Verifies that a configuration dict looks valid.
+
+  This only verifies the version of the configuration.
+
+  @raise errors.ConfigurationError: if the version differs from what
+      we expect
+
+  """
+  if data['version'] != constants.CONFIG_VERSION:
+    raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION,
+                                       data['version'])
+
+
+def VerifyType(owner, attr, value, template, callback):
+  """Checks if an attribute has correct form.
+
+  @type owner: str
+  @param owner: name of the object containing the attribute
+  @type attr: str
+  @param attr: name of the attribute
+  @type value: dict
+  @param value: actual value of the attribute
+  @type template: dict
+  @param template: expected types of the keys
+  @type callback: callable
+  @param callback: will be called if there is an error
+
+  """
+  try:
+    utils.ForceDictType(value, template)
+  except errors.GenericError, err:
+    return callback("%s has invalid %s: %s" % (owner, attr, err))
+
+
+def VerifyNic(owner, params, callback):
+  """Checks if a NIC has correct form.
+
+  @type owner: str
+  @param owner: name of the object containing the attribute
+  @type params: dict
+  @param params: actual value of the NIC parameters
+  @type callback: callable
+  @param callback: will be called if there is an error
+
+  """
+  try:
+    objects.NIC.CheckParameterSyntax(params)
+  except errors.ConfigurationError, err:
+    callback("%s has invalid nicparams: %s" % (owner, err))
+
+
+def VerifyIpolicy(owner, ipolicy, iscluster, callback):
+  """Checks if an ipolicy has correct form.
+
+  @type owner: str
+  @param owner: name of the object containing the attribute
+  @type ipolicy: dict
+  @param ipolicy: actual value of the ipolicy parameters
+  @type iscluster: bool
+  @param iscluster: True iff the owner is the cluster
+  @type callback: callable
+  @param callback: will be called if there is an error
+
+  """
+  try:
+    objects.InstancePolicy.CheckParameterSyntax(ipolicy, iscluster)
+  except errors.ConfigurationError, err:
+    callback("%s has invalid instance policy: %s" % (owner, err))
+  for key, value in ipolicy.items():
+    if key == constants.ISPECS_MINMAX:
+      for k in range(len(value)):
+        VerifyIspecs(owner, "ipolicy/%s[%s]" % (key, k), value[k], callback)
+    elif key == constants.ISPECS_STD:
+      VerifyType(owner, "ipolicy/" + key, value,
+                 constants.ISPECS_PARAMETER_TYPES, callback)
+    else:
+      # FIXME: assuming list type
+      if key in constants.IPOLICY_PARAMETERS:
+        exp_type = float
+        # if the value is int, it can be converted into float
+        convertible_types = [int]
+      else:
+        exp_type = list
+        convertible_types = []
+        # Try to convert from allowed types, if necessary.
+        if any(isinstance(value, ct) for ct in convertible_types):
+          try:
+            value = exp_type(value)
+            ipolicy[key] = value
+          except ValueError:
+            pass
+        if not isinstance(value, exp_type):
+          callback("%s has invalid instance policy: for %s,"
+                   " expecting %s, got %s" %
+                   (owner, key, exp_type.__name__, type(value)))
+
+
+def VerifyIspecs(owner, parentkey, params, callback):
+  """Checks if an ispec has correct form.
+
+  @type owner: str
+  @param owner: name of the object containing the attribute
+  @type parentkey: str
+  @param parentkey: the root name of the key
+  @type params: dict
+  @param params: actual value of the ispec parameters
+  @type callback: callable
+  @param callback: will be called if there is an error
+
+  """
+  for (key, value) in params.items():
+    fullkey = "/".join([parentkey, key])
+    VerifyType(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES,
+               callback)
diff --git a/lib/ht.py b/lib/ht.py
index 4ed45e7..3a194e0 100644
--- a/lib/ht.py
+++ b/lib/ht.py
@@ -458,7 +458,7 @@
      elements match the given types.
 
   """
-  desc = WithDesc("Tuple of %s" % (Parens(val_types), ))
+  desc = WithDesc("Tuple of %s" % Parens(', '.join(str(v) for v in val_types)))
   return desc(TAnd(TOr(TTuple, TList), TIsLength(len(val_types)),
                    TItems(val_types)))
 
@@ -556,12 +556,13 @@
                                        for (check, i) in zip(items, value)))
 
 
+TMaxValue = lambda max: WithDesc('Less than %s' % max)(lambda val: val < max)
 TAllocPolicy = TElemOf(constants.VALID_ALLOC_POLICIES)
 TCVErrorCode = TElemOf(constants.CV_ALL_ECODES_STRINGS)
 TQueryResultCode = TElemOf(constants.RS_ALL)
 TExportTarget = TOr(TNonEmptyString, TList)
 TExportMode = TElemOf(constants.EXPORT_MODES)
-TDiskIndex = TAnd(TNonNegativeInt, lambda val: val < constants.MAX_DISKS)
+TDiskIndex = TAnd(TNonNegativeInt, TMaxValue(constants.MAX_DISKS))
 TReplaceDisksMode = TElemOf(constants.REPLACE_MODES)
 TDiskTemplate = TElemOf(constants.DISK_TEMPLATES)
 TEvacMode = TElemOf(constants.NODE_EVAC_MODES)
diff --git a/lib/hypervisor/hv_kvm/__init__.py b/lib/hypervisor/hv_kvm/__init__.py
index 4b9406a..4df0246 100644
--- a/lib/hypervisor/hv_kvm/__init__.py
+++ b/lib/hypervisor/hv_kvm/__init__.py
@@ -871,12 +871,10 @@
         data.append(info)
     return data
 
-  def _GenerateKVMBlockDevicesOptions(self, instance, up_hvp, kvm_disks,
+  def _GenerateKVMBlockDevicesOptions(self, up_hvp, kvm_disks,
                                       kvmhelp, devlist):
     """Generate KVM options regarding instance's block devices.
 
-    @type instance: L{objects.Instance}
-    @param instance: the instance object
     @type up_hvp: dict
     @param up_hvp: the instance's runtime hypervisor parameters
     @type kvm_disks: list of tuples
@@ -921,18 +919,18 @@
       aio_val = ""
     # Cache mode
     disk_cache = up_hvp[constants.HV_DISK_CACHE]
-    if instance.disk_template in constants.DTS_EXT_MIRROR:
-      if disk_cache != "none":
-        # TODO: make this a hard error, instead of a silent overwrite
-        logging.warning("KVM: overriding disk_cache setting '%s' with 'none'"
-                        " to prevent shared storage corruption on migration",
-                        disk_cache)
-      cache_val = ",cache=none"
-    elif disk_cache != constants.HT_CACHE_DEFAULT:
-      cache_val = ",cache=%s" % disk_cache
-    else:
-      cache_val = ""
     for cfdev, link_name, uri in kvm_disks:
+      if cfdev.dev_type in constants.DTS_EXT_MIRROR:
+        if disk_cache != "none":
+          # TODO: make this a hard error, instead of a silent overwrite
+          logging.warning("KVM: overriding disk_cache setting '%s' with 'none'"
+                          " to prevent shared storage corruption on migration",
+                          disk_cache)
+        cache_val = ",cache=none"
+      elif disk_cache != constants.HT_CACHE_DEFAULT:
+        cache_val = ",cache=%s" % disk_cache
+      else:
+        cache_val = ""
       if cfdev.mode != constants.DISK_RDWR:
         raise errors.HypervisorError("Instance has read-only disks which"
                                      " are not supported by KVM")
@@ -1541,8 +1539,14 @@
   def _ExecuteKVMRuntime(self, instance, kvm_runtime, kvmhelp, incoming=None):
     """Execute a KVM cmd, after completing it with some last minute data.
 
+    @type instance: L{objects.Instance} object
+    @param instance: the VM this command acts upon
+    @type kvm_runtime: tuple of (list of str, list of L{objects.NIC} objects,
+        dict of hypervisor options, list of tuples (L{objects.Disk}, str, str)
+    @param kvm_runtime: (kvm command, NICs of the instance, options at startup
+        of the instance, [(disk, link_name, uri)..])
     @type incoming: tuple of strings
-    @param incoming: (target_host_ip, port)
+    @param incoming: (target_host_ip, port) for migration.
     @type kvmhelp: string
     @param kvmhelp: output of kvm --help
 
@@ -1673,8 +1677,7 @@
         continue
       self._ConfigureNIC(instance, nic_seq, nic, taps[nic_seq])
 
-    bdev_opts = self._GenerateKVMBlockDevicesOptions(instance,
-                                                     up_hvp,
+    bdev_opts = self._GenerateKVMBlockDevicesOptions(up_hvp,
                                                      kvm_disks,
                                                      kvmhelp,
                                                      devlist)
diff --git a/lib/locking.py b/lib/locking.py
index 8c3a1ae..a3d73b6 100644
--- a/lib/locking.py
+++ b/lib/locking.py
@@ -937,24 +937,16 @@
 #   same time.
 # - LEVEL_NODE_RES is for node resources and should be used by operations with
 #   possibly high impact on the node's disks.
-# - LEVEL_NODE_ALLOC blocks instance allocations for the whole cluster
-#   ("NAL" is the only lock at this level). It should be acquired in shared
-#   mode when an opcode blocks all or a significant amount of a cluster's
-#   locks. Opcodes doing instance allocations should acquire in exclusive mode.
-#   Once the set of acquired locks for an opcode has been reduced to the working
-#   set, the NAL should be released as well to allow allocations to proceed.
 (LEVEL_CLUSTER,
  LEVEL_INSTANCE,
- LEVEL_NODE_ALLOC,
  LEVEL_NODEGROUP,
  LEVEL_NODE,
  LEVEL_NODE_RES,
- LEVEL_NETWORK) = range(0, 7)
+ LEVEL_NETWORK) = range(0, 6)
 
 LEVELS = [
   LEVEL_CLUSTER,
   LEVEL_INSTANCE,
-  LEVEL_NODE_ALLOC,
   LEVEL_NODEGROUP,
   LEVEL_NODE,
   LEVEL_NODE_RES,
@@ -974,7 +966,6 @@
 LEVEL_NAMES = {
   LEVEL_CLUSTER: "cluster",
   LEVEL_INSTANCE: "instance",
-  LEVEL_NODE_ALLOC: "node-alloc",
   LEVEL_NODEGROUP: "nodegroup",
   LEVEL_NODE: "node",
   LEVEL_NODE_RES: "node-res",
@@ -983,6 +974,3 @@
 
 # Constant for the big ganeti lock
 BGL = "BGL"
-
-#: Node allocation lock
-NAL = "NAL"
diff --git a/lib/masterd/iallocator.py b/lib/masterd/iallocator.py
index f393e37..a94d880 100644
--- a/lib/masterd/iallocator.py
+++ b/lib/masterd/iallocator.py
@@ -42,6 +42,7 @@
 
 import ganeti.masterd.instance as gmi
 
+import logging
 
 _STRING_LIST = ht.TListOf(ht.TString)
 _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
@@ -141,6 +142,12 @@
     """
     raise NotImplementedError
 
+  def GetExtraParams(self): # pylint: disable=R0201
+    """Gets extra parameters to the IAllocator call.
+
+    """
+    return {}
+
   def ValidateResult(self, ia, result):
     """Validates the result of an request.
 
@@ -193,7 +200,9 @@
     done.
 
     """
-    disk_space = gmi.ComputeDiskSize(self.disk_template, self.disks)
+    for d in self.disks:
+      d[constants.IDISK_TYPE] = self.disk_template
+    disk_space = gmi.ComputeDiskSize(self.disks)
 
     return {
       "name": self.name,
@@ -267,23 +276,24 @@
 
     """
     instance = cfg.GetInstanceInfo(self.inst_uuid)
+    disks = cfg.GetInstanceDisks(self.inst_uuid)
     if instance is None:
       raise errors.ProgrammerError("Unknown instance '%s' passed to"
                                    " IAllocator" % self.inst_uuid)
 
-    if instance.disk_template not in constants.DTS_MIRRORED:
+    if not utils.AllDiskOfType(disks, constants.DTS_MIRRORED):
       raise errors.OpPrereqError("Can't relocate non-mirrored instances",
                                  errors.ECODE_INVAL)
 
     secondary_nodes = cfg.GetInstanceSecondaryNodes(instance.uuid)
-    if (instance.disk_template in constants.DTS_INT_MIRROR and
+    if (utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR) and
         len(secondary_nodes) != 1):
       raise errors.OpPrereqError("Instance has not exactly one secondary node",
                                  errors.ECODE_STATE)
 
-    inst_disks = cfg.GetInstanceDisks(instance.uuid)
-    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in inst_disks]
-    disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes)
+    disk_sizes = [{constants.IDISK_SIZE: disk.size,
+                   constants.IDISK_TYPE: disk.dev_type} for disk in disks]
+    disk_space = gmi.ComputeDiskSize(disk_sizes)
 
     return {
       "name": instance.name,
@@ -359,6 +369,7 @@
   REQ_PARAMS = [
     ("instances", _STRING_LIST),
     ("evac_mode", ht.TEvacMode),
+    ("ignore_soft_errors", ht.TMaybe(ht.TBool)),
     ]
   REQ_RESULT = _NEVAC_RESULT
 
@@ -371,6 +382,16 @@
       "evac_mode": self.evac_mode,
       }
 
+  def GetExtraParams(self):
+    """Get extra iallocator command line options for
+    node-evacuate requests.
+
+    """
+    if self.ignore_soft_errors:
+      return {"ignore-soft-errors": None}
+    else:
+      return {}
+
 
 class IAReqGroupChange(IARequestBase):
   """A group change request.
@@ -675,9 +696,9 @@
           i_used_mem = int(node_instances_info[node_uuid]
                            .payload[iinfo.name]["memory"])
         i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
-        mem_free -= max(0, i_mem_diff)
-
-        if iinfo.admin_state == constants.ADMINST_UP:
+        if iinfo.admin_state == constants.ADMINST_UP \
+            and not iinfo.forthcoming:
+          mem_free -= max(0, i_mem_diff)
           i_p_up_mem += beinfo[constants.BE_MAXMEM]
     return (i_p_mem, i_p_up_mem, mem_free)
 
@@ -754,6 +775,7 @@
           nic_dict["bridge"] = filled_params[constants.NIC_LINK]
         nic_data.append(nic_dict)
       inst_disks = cfg.GetInstanceDisks(iinfo.uuid)
+      inst_disktemplate = cfg.GetInstanceDiskTemplate(iinfo.uuid)
       pir = {
         "tags": list(iinfo.GetTags()),
         "admin_state": iinfo.admin_state,
@@ -765,16 +787,16 @@
                  cfg.GetNodeNames(
                    cfg.GetInstanceSecondaryNodes(iinfo.uuid)),
         "nics": nic_data,
-        "disks": [{constants.IDISK_SIZE: dsk.size,
+        "disks": [{constants.IDISK_TYPE: dsk.dev_type,
+                   constants.IDISK_SIZE: dsk.size,
                    constants.IDISK_MODE: dsk.mode,
                    constants.IDISK_SPINDLES: dsk.spindles}
                   for dsk in inst_disks],
-        "disk_template": iinfo.disk_template,
+        "disk_template": inst_disktemplate,
         "disks_active": iinfo.disks_active,
         "hypervisor": iinfo.hypervisor,
         }
-      pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template,
-                                                    pir["disks"])
+      pir["disk_space_total"] = gmi.ComputeDiskSize(pir["disks"])
       instance_data[iinfo.name] = pir
 
     return instance_data
@@ -785,14 +807,17 @@
     """
     request = req.GetRequest(self.cfg)
     disk_template = None
-    if "disk_template" in request:
+    if request.get("disk_template") is not None:
       disk_template = request["disk_template"]
+    elif isinstance(req, IAReqRelocate):
+      disk_template = self.cfg.GetInstanceDiskTemplate(self.req.inst_uuid)
     self._ComputeClusterData(disk_template=disk_template)
 
     request["type"] = req.MODE
     self.in_data["request"] = request
 
     self.in_text = serializer.Dump(self.in_data)
+    logging.debug("IAllocator request: %s", self.in_text)
 
   def Run(self, name, validate=True, call_fn=None):
     """Run an instance allocator and return the results.
@@ -803,6 +828,9 @@
 
     ial_params = self.cfg.GetDefaultIAllocatorParameters()
 
+    for ial_param in self.req.GetExtraParams().items():
+      ial_params[ial_param[0]] = ial_param[1]
+
     result = call_fn(self.cfg.GetMasterNode(), name, self.in_text, ial_params)
     result.Raise("Failure while running the iallocator script")
 
diff --git a/lib/masterd/instance.py b/lib/masterd/instance.py
index 6999656..9abbc69 100644
--- a/lib/masterd/instance.py
+++ b/lib/masterd/instance.py
@@ -1162,22 +1162,38 @@
     self._feedback_fn = feedback_fn
     self._instance = instance
 
-    self._snap_disks = []
-    self._removed_snaps = [False] * len(instance.disks)
+    self._snapshots = [None] * len(instance.disks)
+    self._snapshots_removed = [False] * len(instance.disks)
+
+  def _SnapshotsReady(self):
+    """Returns true if snapshots are ready to be used in exports.
+
+    """
+    return all(self._snapshots)
 
   def CreateSnapshots(self):
-    """Creates a snapshot for every disk of the instance.
+    """Attempts to create a snapshot for every disk of the instance.
 
     Currently support drbd, plain and ext disk templates.
 
+    @rtype: bool
+    @return: Whether following transfers can use snapshots
+
     """
-    assert not self._snap_disks
+    if any(self._snapshots):
+      raise errors.ProgrammerError("Snapshot creation was invoked more than "
+                                   "once")
 
     instance = self._instance
+    inst_disks = self._lu.cfg.GetInstanceDisks(instance.uuid)
+
+    # A quick check whether we can support snapshots at all
+    if not all([d.SupportsSnapshots() for d in inst_disks]):
+      return False
+
     src_node = instance.primary_node
     src_node_name = self._lu.cfg.GetNodeName(src_node)
 
-    inst_disks = self._lu.cfg.GetInstanceDisks(instance.uuid)
     for idx, disk in enumerate(inst_disks):
       self._feedback_fn("Creating a snapshot of disk/%s on node %s" %
                         (idx, src_node_name))
@@ -1187,7 +1203,6 @@
       result = self._lu.rpc.call_blockdev_snapshot(src_node,
                                                    (disk, instance),
                                                    None, None)
-      new_dev = False
       msg = result.fail_msg
       if msg:
         self._lu.LogWarning("Could not snapshot disk/%s on node %s: %s",
@@ -1208,11 +1223,19 @@
         new_dev = objects.Disk(dev_type=dev_type, size=disk.size,
                                logical_id=disk_id, iv_name=disk.iv_name,
                                params=disk_params)
+        new_dev.uuid = self._lu.cfg.GenerateUniqueID(self._lu.proc.GetECId())
 
-      self._snap_disks.append(new_dev)
+        self._snapshots[idx] = new_dev
+        self._snapshots_removed[idx] = False
 
-    assert len(self._snap_disks) == len(instance.disks)
-    assert len(self._removed_snaps) == len(instance.disks)
+    # One final check to see if we have managed to snapshot everything
+    if self._SnapshotsReady():
+      return True
+    else:
+      # If we failed to do so, the existing snapshots are of little value to us
+      # so we can remove them straight away.
+      self.Cleanup()
+      return False
 
   def _RemoveSnapshot(self, disk_index):
     """Removes an LVM snapshot.
@@ -1221,22 +1244,47 @@
     @param disk_index: Index of the snapshot to be removed
 
     """
-    disk = self._snap_disks[disk_index]
-    if disk and not self._removed_snaps[disk_index]:
-      src_node = self._instance.primary_node
-      src_node_name = self._lu.cfg.GetNodeName(src_node)
+    snapshot = self._snapshots[disk_index]
+    if snapshot is not None and not self._snapshots_removed[disk_index]:
+      src_node_uuid = self._instance.primary_node
+      src_node_name = self._lu.cfg.GetNodeName(src_node_uuid)
 
       self._feedback_fn("Removing snapshot of disk/%s on node %s" %
                         (disk_index, src_node_name))
 
-      result = self._lu.rpc.call_blockdev_remove(src_node,
-                                                 (disk, self._instance))
+      result = self._lu.rpc.call_blockdev_remove(src_node_uuid,
+                                                 (snapshot, self._instance))
       if result.fail_msg:
         self._lu.LogWarning("Could not remove snapshot for disk/%d from node"
                             " %s: %s", disk_index, src_node_name,
                             result.fail_msg)
       else:
-        self._removed_snaps[disk_index] = True
+        self._snapshots_removed[disk_index] = True
+
+  def _GetDisksToTransfer(self):
+    """Returns disks to be transferred, whether snapshots or instance disks.
+
+    @rtype: list of L{objects.Disk}
+    @return: The disks to transfer
+
+    """
+    if self._SnapshotsReady():
+      return self._snapshots
+    else:
+      return self._lu.cfg.GetInstanceDisks(self._instance.uuid)
+
+  def _GetDiskLabel(self, idx):
+    """Returns a label which should be used to represent a disk to transfer.
+
+    @type idx: int
+    @param idx: The disk index
+    @rtype: string
+
+    """
+    if self._SnapshotsReady():
+      return "snapshot/%d" % idx
+    else:
+      return "disk/%d" % idx
 
   def LocalExport(self, dest_node, compress):
     """Intra-cluster instance export.
@@ -1247,20 +1295,16 @@
     @param compress: Compression tool to use
 
     """
+    disks_to_transfer = self._GetDisksToTransfer()
+
     instance = self._instance
     src_node_uuid = instance.primary_node
 
-    assert len(self._snap_disks) == len(instance.disks)
-
     transfers = []
 
-    for idx, dev in enumerate(self._snap_disks):
-      if not dev:
-        transfers.append(None)
-        continue
-
+    for idx, dev in enumerate(disks_to_transfer):
       path = utils.PathJoin(pathutils.EXPORT_DIR, "%s.new" % instance.name,
-                            dev.logical_id[1])
+                            dev.uuid)
 
       finished_fn = compat.partial(self._TransferFinished, idx)
 
@@ -1272,10 +1316,8 @@
         src_ioargs = (dev, instance)
 
       # FIXME: pass debug option from opcode to backend
-      dt = DiskTransfer("snapshot/%s" % idx,
-                        src_io, src_ioargs,
-                        constants.IEIO_FILE, (path, ),
-                        finished_fn)
+      dt = DiskTransfer(self._GetDiskLabel(idx), src_io, src_ioargs,
+                        constants.IEIO_FILE, (path, ), finished_fn)
       transfers.append(dt)
 
     # Actually export data
@@ -1291,7 +1333,7 @@
     if all(dresults):
       self._feedback_fn("Finalizing export on %s" % dest_node.name)
       result = self._lu.rpc.call_finalize_export(dest_node.uuid, instance,
-                                                 self._snap_disks)
+                                                 disks_to_transfer)
       msg = result.fail_msg
       fin_resu = not msg
       if msg:
@@ -1322,15 +1364,15 @@
 
     """
     instance = self._instance
-    inst_disks = self._lu.cfg.GetInstanceDisks(instance.uuid)
+    disks_to_transfer = self._GetDisksToTransfer()
 
-    assert len(disk_info) == len(instance.disks)
+    assert len(disk_info) == len(disks_to_transfer)
 
-    cbs = _RemoteExportCb(self._feedback_fn, len(instance.disks))
+    cbs = _RemoteExportCb(self._feedback_fn, len(disks_to_transfer))
 
     ieloop = ImportExportLoop(self._lu)
     try:
-      for idx, (dev, (host, port, magic)) in enumerate(zip(inst_disks,
+      for idx, (dev, (host, port, magic)) in enumerate(zip(disks_to_transfer,
                                                            disk_info)):
         # Decide whether to use IPv6
         ipv6 = netutils.IP6Address.IsValid(host)
@@ -1375,8 +1417,7 @@
     """Remove all snapshots.
 
     """
-    assert len(self._removed_snaps) == len(self._instance.disks)
-    for idx in range(len(self._instance.disks)):
+    for idx in range(len(self._snapshots)):
       self._RemoveSnapshot(idx)
 
 
@@ -1686,27 +1727,29 @@
   return cluster.SimpleFillIPolicy(group.ipolicy)
 
 
-def ComputeDiskSize(disk_template, disks):
+def ComputeDiskSize(disks):
   """Compute disk size requirements according to disk template
 
   """
   # Required free disk space as a function of disk and swap space
-  req_size_dict = {
-    constants.DT_DISKLESS: 0,
-    constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
-    # 128 MB are added for drbd metadata for each disk
-    constants.DT_DRBD8:
-      sum(d[constants.IDISK_SIZE] + constants.DRBD_META_SIZE for d in disks),
-    constants.DT_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
-    constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
-    constants.DT_GLUSTER: sum(d[constants.IDISK_SIZE] for d in disks),
-    constants.DT_BLOCK: 0,
-    constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks),
-    constants.DT_EXT: sum(d[constants.IDISK_SIZE] for d in disks),
-  }
+  def size_f(d):
+    dev_type = d[constants.IDISK_TYPE]
+    req_size_dict = {
+      constants.DT_DISKLESS: 0,
+      constants.DT_PLAIN: d[constants.IDISK_SIZE],
+      # Extra space for drbd metadata is added to each disk
+      constants.DT_DRBD8:
+        d[constants.IDISK_SIZE] + constants.DRBD_META_SIZE,
+      constants.DT_FILE: d[constants.IDISK_SIZE],
+      constants.DT_SHARED_FILE: d[constants.IDISK_SIZE],
+      constants.DT_GLUSTER: d[constants.IDISK_SIZE],
+      constants.DT_BLOCK: 0,
+      constants.DT_RBD: d[constants.IDISK_SIZE],
+      constants.DT_EXT: d[constants.IDISK_SIZE],
+    }
+    if dev_type not in req_size_dict:
+      raise errors.ProgrammerError("Disk template '%s' size requirement"
+                                   " is unknown" % dev_type)
+    return req_size_dict[dev_type]
 
-  if disk_template not in req_size_dict:
-    raise errors.ProgrammerError("Disk template '%s' size requirement"
-                                 " is unknown" % disk_template)
-
-  return req_size_dict[disk_template]
+  return sum(map(size_f, disks))
diff --git a/lib/mcpu.py b/lib/mcpu.py
index 57dee90..e0289bf 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -62,19 +62,6 @@
 _OP_PREFIX = "Op"
 _LU_PREFIX = "LU"
 
-#: LU classes which don't need to acquire the node allocation lock
-#: (L{locking.NAL}) when they acquire all node or node resource locks
-_NODE_ALLOC_WHITELIST = frozenset([])
-
-#: LU classes which don't need to acquire the node allocation lock
-#: (L{locking.NAL}) in the same mode (shared/exclusive) as the node
-#: or node resource locks
-_NODE_ALLOC_MODE_WHITELIST = compat.UniqueFrozenset([
-  cmdlib.LUBackupExport,
-  cmdlib.LUBackupRemove,
-  cmdlib.LUOobCommand,
-  ])
-
 
 class LockAcquireTimeout(Exception):
   """Exception to report timeouts on acquiring locks.
@@ -270,43 +257,6 @@
                                " queries) can not submit jobs")
 
 
-def _VerifyLocks(lu, _mode_whitelist=_NODE_ALLOC_MODE_WHITELIST,
-                 _nal_whitelist=_NODE_ALLOC_WHITELIST):
-  """Performs consistency checks on locks acquired by a logical unit.
-
-  @type lu: L{cmdlib.LogicalUnit}
-  @param lu: Logical unit instance
-
-  """
-  if not __debug__:
-    return
-
-  allocset = lu.owned_locks(locking.LEVEL_NODE_ALLOC)
-  have_nal = locking.NAL in allocset
-
-  for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
-    # TODO: Verify using actual lock mode, not using LU variables
-    if level in lu.needed_locks:
-      share_node_alloc = lu.share_locks[locking.LEVEL_NODE_ALLOC]
-      share_level = lu.share_locks[level]
-
-      if lu.__class__ in _mode_whitelist:
-        assert share_node_alloc != share_level, \
-          "LU is whitelisted to use different modes for node allocation lock"
-      else:
-        assert bool(share_node_alloc) == bool(share_level), \
-          ("Node allocation lock must be acquired using the same mode as nodes"
-           " and node resources")
-
-      if lu.__class__ in _nal_whitelist:
-        assert not have_nal, \
-          "LU is whitelisted for not acquiring the node allocation lock"
-      elif lu.needed_locks[level] == locking.ALL_SET:
-        assert have_nal, \
-          ("Node allocation lock must be used if an LU acquires all nodes"
-           " or node resources")
-
-
 def _LockList(names):
   """If 'names' is a string, make it a single-element list.
 
@@ -448,7 +398,6 @@
         expand_fns = {
           locking.LEVEL_CLUSTER: (lambda: [locking.BGL]),
           locking.LEVEL_INSTANCE: self.cfg.GetInstanceList,
-          locking.LEVEL_NODE_ALLOC: (lambda: [locking.NAL]),
           locking.LEVEL_NODEGROUP: self.cfg.GetNodeGroupList,
           locking.LEVEL_NODE: self.cfg.GetNodeList,
           locking.LEVEL_NODE_RES: self.cfg.GetNodeList,
@@ -582,8 +531,6 @@
 
       logging.debug("Finished acquiring locks")
 
-      _VerifyLocks(lu)
-
       if self._cbs:
         self._cbs.NotifyStart()
 
diff --git a/lib/objects.py b/lib/objects.py
index 0b21523..96e7092 100644
--- a/lib/objects.py
+++ b/lib/objects.py
@@ -111,10 +111,9 @@
   @see: L{FillDict} for parameters and return value
 
   """
-  assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
-
-  return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
-                             skip_keys=skip_keys))
+  return dict((dt, FillDict(default_dparams.get(dt, {}),
+                            custom_dparams.get(dt, {}),
+                            skip_keys=skip_keys))
               for dt in constants.DISK_TEMPLATES)
 
 
@@ -452,19 +451,18 @@
     obj.filters = outils.ContainerFromDicts(obj.filters, dict, Filter)
     return obj
 
-  def HasAnyDiskOfType(self, dev_type):
+  def DisksOfType(self, dev_type):
     """Check if in there is at disk of the given type in the configuration.
 
     @type dev_type: L{constants.DTS_BLOCK}
     @param dev_type: the type to look for
-    @rtype: boolean
-    @return: boolean indicating if a disk of the given type was found or not
+    @rtype: list of disks
+    @return: all disks of the dev_type
 
     """
-    for disk in self.disks.values():
-      if disk.IsBasedOnDiskType(dev_type):
-        return True
-    return False
+
+    return [disk for disk in self.disks.values()
+            if disk.IsBasedOnDiskType(dev_type)]
 
   def UpgradeConfig(self):
     """Fill defaults for missing configuration values.
@@ -501,7 +499,9 @@
     """
     if not self.cluster.enabled_disk_templates:
       template_set = \
-        set([inst.disk_template for inst in self.instances.values()])
+        set([d.dev_type for d in self.disks.values()])
+      if any(not inst.disks for inst in self.instances.values()):
+        template_set.add(constants.DT_DISKLESS)
       # Add drbd and plain, if lvm is enabled (by specifying a volume group)
       if self.cluster.volume_group_name:
         template_set.add(constants.DT_DRBD8)
@@ -552,10 +552,12 @@
 class Disk(ConfigObject):
   """Config object representing a block device."""
   __slots__ = [
+    "forthcoming",
     "name",
     "dev_type",
     "logical_id",
     "children",
+    "nodes",
     "iv_name",
     "size",
     "mode",
@@ -597,6 +599,10 @@
     """Test if this device needs to be opened on a secondary node."""
     return self.dev_type in (constants.DT_PLAIN,)
 
+  def SupportsSnapshots(self):
+    """Test if this device supports snapshots."""
+    return self.dev_type in constants.DTS_SNAPSHOT_CAPABLE
+
   def StaticDevPath(self):
     """Return the device path if this device type has a static one.
 
@@ -669,6 +675,17 @@
       raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
     return result
 
+  def GetPrimaryNode(self, node_uuid):
+    """This function returns the primary node of the device.
+
+    If the device is not a DRBD device, we still return the node the device
+    lives on.
+
+    """
+    if self.dev_type in constants.DTS_DRBD:
+      return self.logical_id[0]
+    return node_uuid
+
   def ComputeNodeTree(self, parent_node_uuid):
     """Compute the node/disk tree for this disk and its children.
 
@@ -1181,6 +1198,7 @@
 class Instance(TaggableObject):
   """Config object representing an instance."""
   __slots__ = [
+    "forthcoming",
     "name",
     "primary_node",
     "secondary_nodes",
@@ -1243,6 +1261,10 @@
       else:
         nlist = []
       bo[attr] = nlist
+
+    if 'disk_template' in bo:
+      del bo['disk_template']
+
     return bo
 
   @classmethod
@@ -1378,6 +1400,8 @@
     "setinfo_script",
     "verify_script",
     "snapshot_script",
+    "open_script",
+    "close_script",
     "supported_parameters",
     ]
 
diff --git a/lib/query.py b/lib/query.py
index 29056ef..43d8fad 100644
--- a/lib/query.py
+++ b/lib/query.py
@@ -2228,6 +2228,9 @@
     (_MakeField("console", "Console", QFT_OTHER,
                 "Instance console information"), IQ_CONSOLE, 0,
      _GetInstanceConsole),
+    (_MakeField("forthcoming", "Forthcoming", QFT_BOOL,
+                "Whether the Instance is forthcoming"), IQ_CONFIG, 0,
+     lambda _, inst: bool(inst.forthcoming)),
     ]
 
   # Add simple fields
diff --git a/lib/rapi/client_utils.py b/lib/rapi/client_utils.py
index 231d277..224e1a2 100644
--- a/lib/rapi/client_utils.py
+++ b/lib/rapi/client_utils.py
@@ -103,5 +103,11 @@
   @type reporter: L{cli.JobPollReportCbBase}
   @param reporter: PollJob reporter instance
 
+  @return: The opresult of the job
+  @raise errors.JobLost: If job can't be found
+  @raise errors.OpExecError: if job didn't succeed
+
+  @see: L{ganeti.cli.GenericPollJob}
+
   """
   return cli.GenericPollJob(job_id, RapiJobPollCb(rapi_client), reporter)
diff --git a/lib/rpc_defs.py b/lib/rpc_defs.py
index 17fa948..09b2fa8 100644
--- a/lib/rpc_defs.py
+++ b/lib/rpc_defs.py
@@ -417,6 +417,11 @@
     ("instance_name", None, None),
     ("disks", ED_DISKS_DICT_DP, None),
     ], None, None, "Closes the given block devices"),
+  ("blockdev_open", SINGLE, None, constants.RPC_TMO_NORMAL, [
+    ("instance_name", None, None),
+    ("disks", ED_DISKS_DICT_DP, None),
+    ("exclusive", None, None),
+    ], None, None, "Opens the given block devices in required mode"),
   ("blockdev_getdimensions", SINGLE, None, constants.RPC_TMO_NORMAL, [
     ("disks", ED_MULTI_DISKS_DICT_DP, None),
     ], None, None, "Returns size and spindles of the given disks"),
@@ -426,7 +431,6 @@
    "Disconnects the network of the given drbd devices"),
   ("drbd_attach_net", MULTI, None, constants.RPC_TMO_NORMAL, [
     ("disks", ED_DISKS_DICT_DP, None),
-    ("instance_name", None, None),
     ("multimaster", None, None),
     ], None, None, "Connects the given DRBD devices"),
   ("drbd_wait_sync", MULTI, None, constants.RPC_TMO_SLOW, [
diff --git a/lib/server/noded.py b/lib/server/noded.py
index bcdb000..880f2e1 100644
--- a/lib/server/noded.py
+++ b/lib/server/noded.py
@@ -411,6 +411,15 @@
     return backend.BlockdevClose(params[0], disks)
 
   @staticmethod
+  def perspective_blockdev_open(params):
+    """Opens the given block devices.
+
+    """
+    disks = [objects.Disk.FromDict(cf) for cf in params[1]]
+    exclusive = params[2]
+    return backend.BlockdevOpen(params[0], disks, exclusive)
+
+  @staticmethod
   def perspective_blockdev_getdimensions(params):
     """Compute the sizes of the given block devices.
 
@@ -449,9 +458,9 @@
     disk list must all be drbd devices.
 
     """
-    disks, instance_name, multimaster = params
+    disks, multimaster = params
     disks = [objects.Disk.FromDict(disk) for disk in disks]
-    return backend.DrbdAttachNet(disks, instance_name, multimaster)
+    return backend.DrbdAttachNet(disks, multimaster)
 
   @staticmethod
   def perspective_drbd_wait_sync(params):
@@ -1104,7 +1113,10 @@
     name, idata, ial_params_dict = params
     ial_params = []
     for ial_param in ial_params_dict.items():
-      ial_params.append("--" + ial_param[0] + "=" + ial_param[1])
+      if ial_param[1] is not None:
+        ial_params.append("--" + ial_param[0] + "=" + ial_param[1])
+      else:
+        ial_params.append("--" + ial_param[0])
     iar = backend.IAllocatorRunner()
     return iar.Run(name, idata, ial_params)
 
diff --git a/lib/storage/base.py b/lib/storage/base.py
index d4348f7..dcafb41 100644
--- a/lib/storage/base.py
+++ b/lib/storage/base.py
@@ -171,15 +171,18 @@
     """
     raise NotImplementedError
 
-  def Open(self, force=False):
+  def Open(self, force=False, exclusive=True):
     """Make the device ready for use.
 
-    This makes the device ready for I/O. For now, just the DRBD
-    devices need this.
+    This makes the device ready for I/O.
 
     The force parameter signifies that if the device has any kind of
     --force thing, it should be used, we know what we are doing.
 
+    The exclusive parameter denotes whether the device will
+    be opened for exclusive access (True) or for concurrent shared
+    access by multiple nodes (False) (e.g. during migration).
+
     @type force: boolean
 
     """
diff --git a/lib/storage/bdev.py b/lib/storage/bdev.py
index b392fba..e3a48de 100644
--- a/lib/storage/bdev.py
+++ b/lib/storage/bdev.py
@@ -593,7 +593,7 @@
                                   is_degraded=self._degraded,
                                   ldisk_status=ldisk_status)
 
-  def Open(self, force=False):
+  def Open(self, force=False, exclusive=True):
     """Make the device ready for I/O.
 
     This is a no-op for the LV device type.
@@ -834,7 +834,7 @@
     """
     pass
 
-  def Open(self, force=False):
+  def Open(self, force=False, exclusive=True):
     """Make the device ready for I/O.
 
     """
@@ -1174,7 +1174,7 @@
         base.ThrowError("rbd unmap failed (%s): %s",
                         result.fail_reason, result.output)
 
-  def Open(self, force=False):
+  def Open(self, force=False, exclusive=True):
     """Make the device ready for I/O.
 
     """
diff --git a/lib/storage/drbd.py b/lib/storage/drbd.py
index 3d0c8c0..5c4817b 100644
--- a/lib/storage/drbd.py
+++ b/lib/storage/drbd.py
@@ -668,7 +668,7 @@
                                   is_degraded=is_degraded,
                                   ldisk_status=ldisk_status)
 
-  def Open(self, force=False):
+  def Open(self, force=False, exclusive=True):
     """Make the local state primary.
 
     If the 'force' parameter is given, DRBD is instructed to switch the device
diff --git a/lib/storage/extstorage.py b/lib/storage/extstorage.py
index 3ddcaed..311662f 100644
--- a/lib/storage/extstorage.py
+++ b/lib/storage/extstorage.py
@@ -142,6 +142,16 @@
     self.dev_path = result[0]
     self.uris = result[1:]
 
+    if not self.dev_path:
+      logging.info("A local block device is not available")
+      self.dev_path = None
+      if not self.uris:
+        logging.error("Neither a block device nor a userspace URI is available")
+        return False
+
+      self.attached = True
+      return True
+
     # Verify that dev_path exists and is a block device
     try:
       st = os.stat(self.dev_path)
@@ -181,17 +191,22 @@
     self.minor = None
     self.dev_path = None
 
-  def Open(self, force=False):
+  def Open(self, force=False, exclusive=True):
     """Make the device ready for I/O.
 
     """
-    pass
+    _ExtStorageAction(constants.ES_ACTION_OPEN, self.unique_id,
+                      self.ext_params,
+                      name=self.name, uuid=self.uuid,
+                      exclusive=exclusive)
 
   def Close(self):
     """Notifies that the device will no longer be used for I/O.
 
     """
-    pass
+    _ExtStorageAction(constants.ES_ACTION_CLOSE, self.unique_id,
+                      self.ext_params,
+                      name=self.name, uuid=self.uuid)
 
   def Grow(self, amount, dryrun, backingstore, excl_stor):
     """Grow the Volume.
@@ -276,7 +291,8 @@
 def _ExtStorageAction(action, unique_id, ext_params,
                       size=None, grow=None, metadata=None,
                       name=None, uuid=None,
-                      snap_name=None, snap_size=None):
+                      snap_name=None, snap_size=None,
+                      exclusive=None):
   """Take an External Storage action.
 
   Take an External Storage action concerning or affecting
@@ -303,6 +319,8 @@
   @param snap_size: the size of the snapshot
   @type snap_name: string
   @param snap_name: the name of the snapshot
+  @type exclusive: boolean
+  @param exclusive: Whether the Volume will be opened exclusively or not
   @param uuid: uuid of the Volume (objects.Disk.uuid)
   @rtype: None or a block device path (during attach)
 
@@ -317,7 +335,8 @@
   # Create the basic environment for the driver's scripts
   create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
                                       grow, metadata, name, uuid,
-                                      snap_name, snap_size)
+                                      snap_name, snap_size,
+                                      exclusive)
 
   # Do not use log file for action `attach' as we need
   # to get the output from RunResult
@@ -331,17 +350,16 @@
     base.ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
                     action)
 
-  # Explicitly check if the script is valid
-  try:
-    _CheckExtStorageFile(inst_es.path, action) # pylint: disable=E1103
-  except errors.BlockDeviceError:
-    base.ThrowError("Action '%s' is not supported by provider '%s'" %
-                    (action, driver))
-
   # Find out which external script to run according the given action
   script_name = action + "_script"
   script = getattr(inst_es, script_name)
 
+  # Here script is either a valid file path or None if the script is optional
+  if not script:
+    logging.info("Optional action '%s' is not supported by provider '%s',"
+                 " skipping", action, driver)
+    return
+
   # Run the external script
   # pylint: disable=E1103
   result = utils.RunCmd([script], env=create_env,
@@ -368,7 +386,7 @@
     return result.stdout
 
 
-def _CheckExtStorageFile(base_dir, filename):
+def _CheckExtStorageFile(base_dir, filename, required):
   """Check prereqs for an ExtStorage file.
 
   Check if file exists, if it is a regular file and in case it is
@@ -378,8 +396,15 @@
   @param base_dir: Base directory containing ExtStorage installations.
   @type filename: string
   @param filename: The basename of the ExtStorage file.
+  @type required: bool
+  @param required: Whether the file is required or not.
 
-  @raises BlockDeviceError: In case prereqs are not met.
+  @rtype: String
+  @return: The file path if the file is found and is valid,
+           None if the file is not found and not required.
+
+  @raises BlockDeviceError: In case prereqs are not met
+    (found and not valid/executable, not found and required)
 
   """
 
@@ -387,6 +412,11 @@
   try:
     st = os.stat(file_path)
   except EnvironmentError, err:
+    if not required:
+      logging.info("Optional file '%s' under path '%s' is missing",
+                   filename, base_dir)
+      return None
+
     base.ThrowError("File '%s' under path '%s' is missing (%s)" %
                     (filename, base_dir, utils.ErrnoOrStr(err)))
 
@@ -399,6 +429,8 @@
       base.ThrowError("File '%s' under path '%s' is not executable" %
                       (filename, base_dir))
 
+  return file_path
+
 
 def ExtStorageFromDisk(name, base_dir=None):
   """Create an ExtStorage instance from disk.
@@ -425,23 +457,27 @@
     return False, ("Directory for External Storage Provider %s not"
                    " found in search path" % name)
 
-  # ES Files dictionary, we will populate it with the absolute path
-  # names; if the value is True, then it is a required file, otherwise
-  # an optional one
+  # ES Files dictionary: this will be populated later with the absolute path
+  # names for each script; currently we denote for each script if it is
+  # required (True) or optional (False)
   es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
 
-  # Let the snapshot script be optional
+  # Let the snapshot, open, and close scripts be optional
+  # for backwards compatibility
   es_files[constants.ES_SCRIPT_SNAPSHOT] = False
+  es_files[constants.ES_SCRIPT_OPEN] = False
+  es_files[constants.ES_SCRIPT_CLOSE] = False
 
   es_files[constants.ES_PARAMETERS_FILE] = True
 
   for (filename, required) in es_files.items():
-    es_files[filename] = utils.PathJoin(es_dir, filename)
     try:
-      _CheckExtStorageFile(es_dir, filename)
+      # Here we actually fill the dict with the ablsolute path name for each
+      # script or None, depending on the corresponding checks. See the
+      # function's docstrings for more on these checks.
+      es_files[filename] = _CheckExtStorageFile(es_dir, filename, required)
     except errors.BlockDeviceError, err:
-      if required:
-        return False, str(err)
+      return False, str(err)
 
   parameters = []
   if constants.ES_PARAMETERS_FILE in es_files:
@@ -463,6 +499,8 @@
                        setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
                        verify_script=es_files[constants.ES_SCRIPT_VERIFY],
                        snapshot_script=es_files[constants.ES_SCRIPT_SNAPSHOT],
+                       open_script=es_files[constants.ES_SCRIPT_OPEN],
+                       close_script=es_files[constants.ES_SCRIPT_CLOSE],
                        supported_parameters=parameters)
   return True, es_obj
 
@@ -470,7 +508,8 @@
 def _ExtStorageEnvironment(unique_id, ext_params,
                            size=None, grow=None, metadata=None,
                            name=None, uuid=None,
-                           snap_name=None, snap_size=None):
+                           snap_name=None, snap_size=None,
+                           exclusive=None):
   """Calculate the environment for an External Storage script.
 
   @type unique_id: tuple (driver, vol_name)
@@ -491,6 +530,8 @@
   @param snap_size: the size of the snapshot
   @type snap_name: string
   @param snap_name: the name of the snapshot
+  @type exclusive: boolean
+  @param exclusive: Whether the Volume will be opened exclusively or not
   @rtype: dict
   @return: dict of environment variables
 
@@ -525,6 +566,9 @@
   if snap_size is not None:
     result["VOL_SNAPSHOT_SIZE"] = str(snap_size)
 
+  if exclusive is not None:
+    result["VOL_OPEN_EXCLUSIVE"] = str(exclusive)
+
   return result
 
 
diff --git a/lib/storage/filestorage.py b/lib/storage/filestorage.py
index 0e37d59..74234e8 100644
--- a/lib/storage/filestorage.py
+++ b/lib/storage/filestorage.py
@@ -91,6 +91,7 @@
       _file_path_acceptance_fn = CheckFileStoragePathAcceptance
     _file_path_acceptance_fn(path)
 
+    self.file_path_acceptance_fn = _file_path_acceptance_fn
     self.path = path
 
   def Exists(self, assert_exists=None):
@@ -167,6 +168,19 @@
     except EnvironmentError, err:
       base.ThrowError("%s: can't grow: ", self.path, str(err))
 
+  def Move(self, new_path):
+    """Move file to a location inside the file storage dir.
+
+    """
+    # Check that the file exists
+    self.Exists(assert_exists=True)
+    self.file_path_acceptance_fn(new_path)
+    try:
+      os.rename(self.path, new_path)
+      self.path = new_path
+    except OSError, err:
+      base.ThrowError("%s: can't rename to %s: ", str(err), new_path)
+
 
 class FileStorage(base.BlockDev):
   """File device.
@@ -208,7 +222,7 @@
     """
     pass
 
-  def Open(self, force=False):
+  def Open(self, force=False, exclusive=True):
     """Make the device ready for I/O.
 
     This is a no-op for the file type.
@@ -237,8 +251,7 @@
     """Renames the file.
 
     """
-    # TODO: implement rename for file-based storage
-    base.ThrowError("Rename is not supported for file-based storage")
+    return self.file.Move(new_id[1])
 
   def Grow(self, amount, dryrun, backingstore, excl_stor):
     """Grow the file
diff --git a/lib/storage/gluster.py b/lib/storage/gluster.py
index 656474f..6418c9d 100644
--- a/lib/storage/gluster.py
+++ b/lib/storage/gluster.py
@@ -338,7 +338,7 @@
     self.dev_path = None
     self.attached = False
 
-  def Open(self, force=False):
+  def Open(self, force=False, exclusive=True):
     """Make the device ready for I/O.
 
     This is a no-op for the file type.
diff --git a/lib/tools/burnin.py b/lib/tools/burnin.py
index 0969d32..cea8a70 100755
--- a/lib/tools/burnin.py
+++ b/lib/tools/burnin.py
@@ -37,8 +37,11 @@
 import time
 import socket
 import urllib
+import random
+import string # pylint: disable=W0402
 from itertools import izip, islice, cycle
 from cStringIO import StringIO
+from operator import or_
 
 from ganeti import opcodes
 from ganeti import constants
@@ -130,6 +133,10 @@
   sys.exit(exit_code)
 
 
+def RandomString(size=8, chars=string.ascii_uppercase + string.digits):
+  return ''.join(random.choice(chars) for x in range(size))
+
+
 class SimpleOpener(urllib.FancyURLopener):
   """A simple url opener"""
   # pylint: disable=W0221
@@ -312,24 +319,11 @@
   return wrap
 
 
-class Burner(object):
-  """Burner class."""
+class FeedbackAccumulator(object):
+  """Feedback accumulator class."""
 
-  def __init__(self):
-    """Constructor."""
-    self.url_opener = SimpleOpener()
-    self._feed_buf = StringIO()
-    self.nodes = []
-    self.instances = []
-    self.to_rem = []
-    self.queued_ops = []
-    self.opts = None
-    self.queue_retry = False
-    self.disk_count = self.disk_growth = self.disk_size = None
-    self.hvp = self.bep = None
-    self.ParseOptions()
-    self.cl = cli.GetClient()
-    self.GetState()
+  _feed_buf = StringIO()
+  opts = None
 
   def ClearFeedbackBuf(self):
     """Clear the feedback buffer."""
@@ -346,6 +340,16 @@
     if self.opts.verbose:
       Log(formatted_msg, indent=3)
 
+
+class JobHandler(FeedbackAccumulator):
+  """Class for handling Ganeti jobs."""
+
+  queued_ops = []
+  queue_retry = False
+
+  def __init__(self):
+    self.cl = cli.GetClient()
+
   def MaybeRetry(self, retry_count, msg, fn, *args):
     """Possibly retry a given function execution.
 
@@ -480,6 +484,26 @@
 
     return val
 
+
+class Burner(JobHandler):
+  """Burner class."""
+
+  def __init__(self):
+    """Constructor."""
+    super(Burner, self).__init__()
+
+    self.url_opener = SimpleOpener()
+    self.nodes = []
+    self.instances = []
+    self.to_rem = []
+    self.disk_count = self.disk_growth = self.disk_size = None
+    self.hvp = self.bep = None
+    self.ParseOptions()
+    self.disk_nodes = {}
+    self.instance_nodes = {}
+    self.GetState()
+    self.confd_reply = None
+
   def ParseOptions(self):
     """Parses the command line options.
 
@@ -597,6 +621,16 @@
     self.hv_can_migrate = \
       hypervisor.GetHypervisorClass(self.hypervisor).CAN_MIGRATE
 
+  def FindMatchingDisk(self, instance):
+    """Find a disk whose nodes match the instance's disk nodes."""
+    instance_nodes = self.instance_nodes[instance]
+    for disk, disk_nodes in self.disk_nodes.iteritems():
+      if instance_nodes == disk_nodes:
+        # Erase that disk from the dictionary so that we don't pick it again.
+        del self.disk_nodes[disk]
+        return disk
+    Err("Couldn't find matching detached disk for instance %s" % instance)
+
   @_DoCheckInstances
   @_DoBatch(False)
   def BurnCreateInstances(self):
@@ -942,24 +976,6 @@
       Log("deactivate disks (when offline)", indent=2)
       self.ExecOrQueue(instance, [op_act, op_stop, op_act, op_deact, op_start])
 
-  @_DoCheckInstances
-  @_DoBatch(False)
-  def BurnAddRemoveDisks(self):
-    """Add and remove an extra disk for the instances."""
-    Log("Adding and removing disks")
-    for instance in self.instances:
-      Log("instance %s", instance, indent=1)
-      op_add = opcodes.OpInstanceSetParams(
-        instance_name=instance,
-        disks=[(constants.DDM_ADD, {"size": self.disk_size[0]})])
-      op_rem = opcodes.OpInstanceSetParams(
-        instance_name=instance, disks=[(constants.DDM_REMOVE, {})])
-      op_stop = self.StopInstanceOp(instance)
-      op_start = self.StartInstanceOp(instance)
-      Log("adding a disk", indent=2)
-      Log("removing last disk", indent=2)
-      self.ExecOrQueue(instance, [op_add, op_stop, op_rem, op_start])
-
   @_DoBatch(False)
   def BurnAddRemoveNICs(self):
     """Add, change and remove an extra NIC for the instances."""
@@ -997,6 +1013,8 @@
           Log("Node role for master: OK", indent=1)
         else:
           Err("Node role for master: wrong: %s" % reply.server_reply.answer)
+      elif reply.orig_request.type == constants.CONFD_REQ_INSTANCE_DISKS:
+        self.confd_reply = reply.server_reply.answer
 
   def DoConfdRequestReply(self, req):
     self.confd_counting_callback.RegisterQuery(req.rsalt)
@@ -1035,6 +1053,81 @@
         query=self.cluster_info["master"])
     self.DoConfdRequestReply(req)
 
+  @_DoCheckInstances
+  @_DoBatch(False)
+  def BurnAddDisks(self):
+    """Add an extra disk to every instance and then detach it."""
+    Log("Adding and detaching disks")
+
+    # Instantiate a Confd client
+    filter_callback = confd_client.ConfdFilterCallback(self.ConfdCallback)
+    counting_callback = confd_client.ConfdCountingCallback(filter_callback)
+    self.confd_counting_callback = counting_callback
+    self.confd_client = confd_client.GetConfdClient(counting_callback)
+
+    # Iterate all instances, start them, add a disk with a unique name and
+    # detach it. Do all disk operations with hotplugging (if possible).
+    for instance in self.instances:
+      Log("instance %s", instance, indent=1)
+
+      # Fetch disk info for an instance from the confd. The result of the query
+      # will be stored in the confd_reply attribute of Burner.
+      req = (confd_client.ConfdClientRequest(
+        type=constants.CONFD_REQ_INSTANCE_DISKS, query=instance))
+      self.DoConfdRequestReply(req)
+
+      disk_name = RandomString()
+
+      nodes = [set(disk["nodes"]) for disk in self.confd_reply]
+      nodes = reduce(or_, nodes)
+      self.instance_nodes[instance] = nodes
+      self.disk_nodes[disk_name] = nodes
+
+      op_stop = self.StopInstanceOp(instance)
+      op_add = opcodes.OpInstanceSetParams(
+        instance_name=instance, hotplug_if_possible=True,
+        disks=[(constants.DDM_ADD, {"size": self.disk_size[0],
+                                    "name": disk_name})])
+      op_detach = opcodes.OpInstanceSetParams(
+        instance_name=instance, hotplug_if_possible=True,
+        disks=[(constants.DDM_DETACH, {})])
+      op_start = self.StartInstanceOp(instance)
+      Log("adding a disk with name %s" % disk_name, indent=2)
+      Log("detaching last disk", indent=2)
+      self.ExecOrQueue(instance, [op_start, op_add, op_detach, op_stop,
+                                  op_start])
+
+  @_DoCheckInstances
+  @_DoBatch(False)
+  def BurnRemoveDisks(self):
+    """Attach a previously detached disk to an instance and then remove it."""
+    Log("Attaching and removing disks")
+
+    # Iterate all instances in random order, attach the detached disks, remove
+    # them and then restart the instances. Do all disk operation with
+    # hotplugging (if possible).
+    instances_copy = list(self.instances)
+    random.shuffle(instances_copy)
+    for instance in instances_copy:
+      Log("instance %s", instance, indent=1)
+
+      disk_name = self.FindMatchingDisk(instance)
+      op_attach = opcodes.OpInstanceSetParams(
+        instance_name=instance, hotplug_if_possible=True,
+        disks=[(constants.DDM_ATTACH, {"name": disk_name})])
+      op_rem = opcodes.OpInstanceSetParams(
+        instance_name=instance, hotplug_if_possible=True,
+        disks=[(constants.DDM_REMOVE, {})])
+      op_stop = self.StopInstanceOp(instance)
+      op_start = self.StartInstanceOp(instance)
+      Log("attaching a disk with name %s" % disk_name, indent=2)
+      Log("removing last disk", indent=2)
+      self.ExecOrQueue(instance, [op_attach, op_rem, op_stop, op_start])
+
+    # Disk nodes are useful only for this test.
+    del self.disk_nodes
+    del self.instance_nodes
+
   def _CheckInstanceAlive(self, instance):
     """Check if an instance is alive by doing http checks.
 
@@ -1081,6 +1174,9 @@
     try:
       self.BurnCreateInstances()
 
+      if self.opts.do_startstop:
+        self.BurnStopStart()
+
       if self.bep[constants.BE_MINMEM] < self.bep[constants.BE_MAXMEM]:
         self.BurnModifyRuntimeMemory()
 
@@ -1126,8 +1222,8 @@
       if self.opts.do_renamesame:
         self.BurnRenameSame(self.opts.name_check, self.opts.ip_check)
 
-      if self.opts.do_addremove_disks:
-        self.BurnAddRemoveDisks()
+      if self.opts.do_confd_tests:
+        self.BurnConfd()
 
       default_nic_mode = self.cluster_default_nicparams[constants.NIC_MODE]
       # Don't add/remove nics in routed mode, as we would need an ip to add
@@ -1141,15 +1237,13 @@
       if self.opts.do_activate_disks:
         self.BurnActivateDisks()
 
+      if self.opts.do_addremove_disks:
+        self.BurnAddDisks()
+        self.BurnRemoveDisks()
+
       if self.opts.rename:
         self.BurnRename(self.opts.name_check, self.opts.ip_check)
 
-      if self.opts.do_confd_tests:
-        self.BurnConfd()
-
-      if self.opts.do_startstop:
-        self.BurnStopStart()
-
       has_err = False
     finally:
       if has_err:
diff --git a/lib/tools/cfgupgrade.py b/lib/tools/cfgupgrade.py
new file mode 100644
index 0000000..d7b70b5
--- /dev/null
+++ b/lib/tools/cfgupgrade.py
@@ -0,0 +1,857 @@
+#
+#
+
+# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Library of the tools/cfgupgrade utility.
+
+This code handles only the types supported by simplejson. As an
+example, 'set' is a 'list'.
+
+"""
+
+import copy
+import os
+import os.path
+import sys
+import logging
+import optparse
+import time
+import functools
+from cStringIO import StringIO
+
+from ganeti import cli
+from ganeti import constants
+from ganeti import serializer
+from ganeti import utils
+from ganeti import bootstrap
+from ganeti import config
+from ganeti import pathutils
+from ganeti import netutils
+
+from ganeti.utils import version
+
+
+#: Target major version we will upgrade to
+TARGET_MAJOR = 2
+#: Target minor version we will upgrade to
+TARGET_MINOR = 14
+#: Target major version for downgrade
+DOWNGRADE_MAJOR = 2
+#: Target minor version for downgrade
+DOWNGRADE_MINOR = 13
+
+# map of legacy device types
+# (mapping differing old LD_* constants to new DT_* constants)
+DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
+# (mapping differing new DT_* constants to old LD_* constants)
+DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
+
+
+class Error(Exception):
+  """Generic exception"""
+  pass
+
+
+def ParseOptions(args=None):
+  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
+  parser.add_option("--dry-run", dest="dry_run",
+                    action="store_true",
+                    help="Try to do the conversion, but don't write"
+                         " output file")
+  parser.add_option(cli.FORCE_OPT)
+  parser.add_option(cli.DEBUG_OPT)
+  parser.add_option(cli.VERBOSE_OPT)
+  parser.add_option("--ignore-hostname", dest="ignore_hostname",
+                    action="store_true", default=False,
+                    help="Don't abort if hostname doesn't match")
+  parser.add_option("--path", help="Convert configuration in this"
+                    " directory instead of '%s'" % pathutils.DATA_DIR,
+                    default=pathutils.DATA_DIR, dest="data_dir")
+  parser.add_option("--confdir",
+                    help=("Use this directory instead of '%s'" %
+                          pathutils.CONF_DIR),
+                    default=pathutils.CONF_DIR, dest="conf_dir")
+  parser.add_option("--no-verify",
+                    help="Do not verify configuration after upgrade",
+                    action="store_true", dest="no_verify", default=False)
+  parser.add_option("--downgrade",
+                    help="Downgrade to the previous stable version",
+                    action="store_true", dest="downgrade", default=False)
+  return parser.parse_args(args=args)
+
+
+def OrFail(description=None):
+  """Make failure non-fatal and improve reporting."""
+  def wrapper(f):
+    @functools.wraps(f)
+    def wrapped(self):
+      safety = copy.deepcopy(self.config_data)
+      try:
+        f(self)
+      except BaseException, e:
+        msg = "%s failed:\n%s" % (description or f.func_name, e)
+        logging.exception(msg)
+        self.config_data = safety
+        self.errors.append(msg)
+    return wrapped
+  return wrapper
+
+
+class CfgUpgrade(object):
+  def __init__(self, opts, args):
+    self.opts = opts
+    self.args = args
+    self.errors = []
+
+  def Run(self):
+    """Main program.
+
+    """
+    self._ComposePaths()
+
+    self.SetupLogging()
+
+    # Option checking
+    if self.args:
+      raise Error("No arguments expected")
+    if self.opts.downgrade and not self.opts.no_verify:
+      self.opts.no_verify = True
+
+    # Check master name
+    if not (self.CheckHostname(self.opts.SSCONF_MASTER_NODE) or
+            self.opts.ignore_hostname):
+      logging.error("Aborting due to hostname mismatch")
+      sys.exit(constants.EXIT_FAILURE)
+
+    self._AskUser()
+
+    # Check whether it's a Ganeti configuration directory
+    if not (os.path.isfile(self.opts.CONFIG_DATA_PATH) and
+            os.path.isfile(self.opts.SERVER_PEM_PATH) and
+            os.path.isfile(self.opts.KNOWN_HOSTS_PATH)):
+      raise Error(("%s does not seem to be a Ganeti configuration"
+                   " directory") % self.opts.data_dir)
+
+    if not os.path.isdir(self.opts.conf_dir):
+      raise Error("Not a directory: %s" % self.opts.conf_dir)
+
+    self.config_data = serializer.LoadJson(utils.ReadFile(
+        self.opts.CONFIG_DATA_PATH))
+
+    try:
+      config_version = self.config_data["version"]
+    except KeyError:
+      raise Error("Unable to determine configuration version")
+
+    (config_major, config_minor, config_revision) = \
+      version.SplitVersion(config_version)
+
+    logging.info("Found configuration version %s (%d.%d.%d)",
+                 config_version, config_major, config_minor, config_revision)
+
+    if "config_version" in self.config_data["cluster"]:
+      raise Error("Inconsistent configuration: found config_version in"
+                  " configuration file")
+
+    # Downgrade to the previous stable version
+    if self.opts.downgrade:
+      self._Downgrade(config_major, config_minor, config_version,
+                      config_revision)
+
+    # Upgrade from 2.{0..13} to 2.14
+    elif config_major == 2 and config_minor in range(0, 14):
+      if config_revision != 0:
+        logging.warning("Config revision is %s, not 0", config_revision)
+      if not self.UpgradeAll():
+        raise Error("Upgrade failed:\n%s" % '\n'.join(self.errors))
+
+    elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
+      logging.info("No changes necessary")
+
+    else:
+      raise Error("Configuration version %d.%d.%d not supported by this tool" %
+                  (config_major, config_minor, config_revision))
+
+    try:
+      logging.info("Writing configuration file to %s",
+                   self.opts.CONFIG_DATA_PATH)
+      utils.WriteFile(file_name=self.opts.CONFIG_DATA_PATH,
+                      data=serializer.DumpJson(self.config_data),
+                      mode=0600,
+                      dry_run=self.opts.dry_run,
+                      backup=True)
+
+      if not self.opts.dry_run:
+        # This creates the cluster certificate if it does not exist yet.
+        # In this case, we do not automatically create a client certificate
+        # as well, because if the cluster certificate did not exist before,
+        # no client certificate will exist on any node yet. In this case
+        # all client certificate should be renewed by 'gnt-cluster
+        # renew-crypto --new-node-certificates'. This will be enforced
+        # by a nagging warning in 'gnt-cluster verify'.
+        bootstrap.GenerateClusterCrypto(
+          False, False, False, False, False, False, None,
+          nodecert_file=self.opts.SERVER_PEM_PATH,
+          rapicert_file=self.opts.RAPI_CERT_FILE,
+          spicecert_file=self.opts.SPICE_CERT_FILE,
+          spicecacert_file=self.opts.SPICE_CACERT_FILE,
+          hmackey_file=self.opts.CONFD_HMAC_KEY,
+          cds_file=self.opts.CDS_FILE)
+
+    except Exception:
+      logging.critical("Writing configuration failed. It is probably in an"
+                       " inconsistent state and needs manual intervention.")
+      raise
+
+    self._TestLoadingConfigFile()
+
+  def SetupLogging(self):
+    """Configures the logging module.
+
+    """
+    formatter = logging.Formatter("%(asctime)s: %(message)s")
+
+    stderr_handler = logging.StreamHandler()
+    stderr_handler.setFormatter(formatter)
+    if self.opts.debug:
+      stderr_handler.setLevel(logging.NOTSET)
+    elif self.opts.verbose:
+      stderr_handler.setLevel(logging.INFO)
+    else:
+      stderr_handler.setLevel(logging.WARNING)
+
+    root_logger = logging.getLogger("")
+    root_logger.setLevel(logging.NOTSET)
+    root_logger.addHandler(stderr_handler)
+
+  @staticmethod
+  def CheckHostname(path):
+    """Ensures hostname matches ssconf value.
+
+    @param path: Path to ssconf file
+
+    """
+    ssconf_master_node = utils.ReadOneLineFile(path)
+    hostname = netutils.GetHostname().name
+
+    if ssconf_master_node == hostname:
+      return True
+
+    logging.warning("Warning: ssconf says master node is '%s', but this"
+                    " machine's name is '%s'; this tool must be run on"
+                    " the master node", ssconf_master_node, hostname)
+    return False
+
+  @staticmethod
+  def _FillIPolicySpecs(default_ipolicy, ipolicy):
+    if "minmax" in ipolicy:
+      for (key, spec) in ipolicy["minmax"][0].items():
+        for (par, val) in default_ipolicy["minmax"][0][key].items():
+          if par not in spec:
+            spec[par] = val
+
+  def UpgradeIPolicy(self, ipolicy, default_ipolicy, isgroup):
+    minmax_keys = ["min", "max"]
+    if any((k in ipolicy) for k in minmax_keys):
+      minmax = {}
+      for key in minmax_keys:
+        if key in ipolicy:
+          if ipolicy[key]:
+            minmax[key] = ipolicy[key]
+          del ipolicy[key]
+      if minmax:
+        ipolicy["minmax"] = [minmax]
+    if isgroup and "std" in ipolicy:
+      del ipolicy["std"]
+    self._FillIPolicySpecs(default_ipolicy, ipolicy)
+
+  @OrFail("Setting networks")
+  def UpgradeNetworks(self):
+    assert isinstance(self.config_data, dict)
+    # pylint can't infer config_data type
+    # pylint: disable=E1103
+    networks = self.config_data.get("networks", None)
+    if not networks:
+      self.config_data["networks"] = {}
+
+  @OrFail("Upgrading cluster")
+  def UpgradeCluster(self):
+    assert isinstance(self.config_data, dict)
+    # pylint can't infer config_data type
+    # pylint: disable=E1103
+    cluster = self.config_data.get("cluster", None)
+    if cluster is None:
+      raise Error("Cannot find cluster")
+    ipolicy = cluster.setdefault("ipolicy", None)
+    if ipolicy:
+      self.UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
+    ial_params = cluster.get("default_iallocator_params", None)
+    if not ial_params:
+      cluster["default_iallocator_params"] = {}
+    if not "candidate_certs" in cluster:
+      cluster["candidate_certs"] = {}
+    cluster["instance_communication_network"] = \
+      cluster.get("instance_communication_network", "")
+    cluster["install_image"] = \
+      cluster.get("install_image", "")
+    cluster["zeroing_image"] = \
+      cluster.get("zeroing_image", "")
+    cluster["compression_tools"] = \
+      cluster.get("compression_tools", constants.IEC_DEFAULT_TOOLS)
+    if "enabled_user_shutdown" not in cluster:
+      cluster["enabled_user_shutdown"] = False
+    cluster["data_collectors"] = cluster.get("data_collectors", {})
+    for name in constants.DATA_COLLECTOR_NAMES:
+      cluster["data_collectors"][name] = \
+        cluster["data_collectors"].get(
+            name, dict(active=True,
+                       interval=constants.MOND_TIME_INTERVAL * 1e6))
+
+  @OrFail("Upgrading groups")
+  def UpgradeGroups(self):
+    cl_ipolicy = self.config_data["cluster"].get("ipolicy")
+    for group in self.config_data["nodegroups"].values():
+      networks = group.get("networks", None)
+      if not networks:
+        group["networks"] = {}
+      ipolicy = group.get("ipolicy", None)
+      if ipolicy:
+        if cl_ipolicy is None:
+          raise Error("A group defines an instance policy but there is no"
+                      " instance policy at cluster level")
+        self.UpgradeIPolicy(ipolicy, cl_ipolicy, True)
+
+  def GetExclusiveStorageValue(self):
+    """Return a conservative value of the exclusive_storage flag.
+
+    Return C{True} if the cluster or at least a nodegroup have the flag set.
+
+    """
+    ret = False
+    cluster = self.config_data["cluster"]
+    ndparams = cluster.get("ndparams")
+    if ndparams is not None and ndparams.get("exclusive_storage"):
+      ret = True
+    for group in self.config_data["nodegroups"].values():
+      ndparams = group.get("ndparams")
+      if ndparams is not None and ndparams.get("exclusive_storage"):
+        ret = True
+    return ret
+
+  def RemovePhysicalId(self, disk):
+    if "children" in disk:
+      for d in disk["children"]:
+        self.RemovePhysicalId(d)
+    if "physical_id" in disk:
+      del disk["physical_id"]
+
+  def ChangeDiskDevType(self, disk, dev_type_map):
+    """Replaces disk's dev_type attributes according to the given map.
+
+    This can be used for both, up or downgrading the disks.
+    """
+    if disk["dev_type"] in dev_type_map:
+      disk["dev_type"] = dev_type_map[disk["dev_type"]]
+    if "children" in disk:
+      for child in disk["children"]:
+        self.ChangeDiskDevType(child, dev_type_map)
+
+  def UpgradeDiskDevType(self, disk):
+    """Upgrades the disks' device type."""
+    self.ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
+
+  @staticmethod
+  def _ConvertNicNameToUuid(iobj, network2uuid):
+    for nic in iobj["nics"]:
+      name = nic.get("network", None)
+      if name:
+        uuid = network2uuid.get(name, None)
+        if uuid:
+          print("NIC with network name %s found."
+                " Substituting with uuid %s." % (name, uuid))
+          nic["network"] = uuid
+
+  @classmethod
+  def AssignUuid(cls, disk):
+    if not "uuid" in disk:
+      disk["uuid"] = utils.io.NewUUID()
+    if "children" in disk:
+      for d in disk["children"]:
+        cls.AssignUuid(d)
+
+  def _ConvertDiskAndCheckMissingSpindles(self, iobj, instance):
+    missing_spindles = False
+    if "disks" not in iobj:
+      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
+    disks = iobj["disks"]
+    if not all(isinstance(d, str) for d in disks):
+      #  Disks are not top level citizens
+      for idx, dobj in enumerate(disks):
+        self.RemovePhysicalId(dobj)
+
+        expected = "disk/%s" % idx
+        current = dobj.get("iv_name", "")
+        if current != expected:
+          logging.warning("Updating iv_name for instance %s/disk %s"
+                          " from '%s' to '%s'",
+                          instance, idx, current, expected)
+          dobj["iv_name"] = expected
+
+        if "dev_type" in dobj:
+          self.UpgradeDiskDevType(dobj)
+
+        if not "spindles" in dobj:
+          missing_spindles = True
+
+        self.AssignUuid(dobj)
+    return missing_spindles
+
+  @OrFail("Upgrading instance with spindles")
+  def UpgradeInstances(self):
+    """Upgrades the instances' configuration."""
+
+    network2uuid = dict((n["name"], n["uuid"])
+                        for n in self.config_data["networks"].values())
+    if "instances" not in self.config_data:
+      raise Error("Can't find the 'instances' key in the configuration!")
+
+    missing_spindles = False
+    for instance, iobj in self.config_data["instances"].items():
+      self._ConvertNicNameToUuid(iobj, network2uuid)
+      if self._ConvertDiskAndCheckMissingSpindles(iobj, instance):
+        missing_spindles = True
+      if "admin_state_source" not in iobj:
+        iobj["admin_state_source"] = constants.ADMIN_SOURCE
+
+    if self.GetExclusiveStorageValue() and missing_spindles:
+      # We cannot be sure that the instances that are missing spindles have
+      # exclusive storage enabled (the check would be more complicated), so we
+      # give a noncommittal message
+      logging.warning("Some instance disks could be needing to update the"
+                      " spindles parameter; you can check by running"
+                      " 'gnt-cluster verify', and fix any problem with"
+                      " 'gnt-cluster repair-disk-sizes'")
+
+  def UpgradeRapiUsers(self):
+    if (os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24) and
+        not os.path.islink(self.opts.RAPI_USERS_FILE_PRE24)):
+      if os.path.exists(self.opts.RAPI_USERS_FILE):
+        raise Error("Found pre-2.4 RAPI users file at %s, but another file"
+                    " already exists at %s" %
+                    (self.opts.RAPI_USERS_FILE_PRE24,
+                     self.opts.RAPI_USERS_FILE))
+      logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
+                   self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE)
+      if not self.opts.dry_run:
+        utils.RenameFile(self.opts.RAPI_USERS_FILE_PRE24,
+                         self.opts.RAPI_USERS_FILE, mkdir=True, mkdir_mode=0750)
+
+    # Create a symlink for RAPI users file
+    if (not (os.path.islink(self.opts.RAPI_USERS_FILE_PRE24) or
+             os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24)) and
+        os.path.isfile(self.opts.RAPI_USERS_FILE)):
+      logging.info("Creating symlink from %s to %s",
+                   self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE)
+      if not self.opts.dry_run:
+        os.symlink(self.opts.RAPI_USERS_FILE, self.opts.RAPI_USERS_FILE_PRE24)
+
+  def UpgradeWatcher(self):
+    # Remove old watcher state file if it exists
+    if os.path.exists(self.opts.WATCHER_STATEFILE):
+      logging.info("Removing watcher state file %s",
+                   self.opts.WATCHER_STATEFILE)
+      if not self.opts.dry_run:
+        utils.RemoveFile(self.opts.WATCHER_STATEFILE)
+
+  @OrFail("Upgrading file storage paths")
+  def UpgradeFileStoragePaths(self):
+    # Write file storage paths
+    if not os.path.exists(self.opts.FILE_STORAGE_PATHS_FILE):
+      cluster = self.config_data["cluster"]
+      file_storage_dir = cluster.get("file_storage_dir")
+      shared_file_storage_dir = cluster.get("shared_file_storage_dir")
+      del cluster
+
+      logging.info("Ganeti 2.7 and later only allow whitelisted directories"
+                   " for file storage; writing existing configuration values"
+                   " into '%s'",
+                   self.opts.FILE_STORAGE_PATHS_FILE)
+
+      if file_storage_dir:
+        logging.info("File storage directory: %s", file_storage_dir)
+      if shared_file_storage_dir:
+        logging.info("Shared file storage directory: %s",
+                     shared_file_storage_dir)
+
+      buf = StringIO()
+      buf.write("# List automatically generated from configuration by\n")
+      buf.write("# cfgupgrade at %s\n" % time.asctime())
+      if file_storage_dir:
+        buf.write("%s\n" % file_storage_dir)
+      if shared_file_storage_dir:
+        buf.write("%s\n" % shared_file_storage_dir)
+      utils.WriteFile(file_name=self.opts.FILE_STORAGE_PATHS_FILE,
+                      data=buf.getvalue(),
+                      mode=0600,
+                      dry_run=self.opts.dry_run,
+                      backup=True)
+
+  @staticmethod
+  def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
+    if old_key not in nodes_by_old_key:
+      logging.warning("Can't find node '%s' in configuration, "
+                      "assuming that it's already up-to-date", old_key)
+      return old_key
+    return nodes_by_old_key[old_key][new_key_field]
+
+  def ChangeNodeIndices(self, config_data, old_key_field, new_key_field):
+    def ChangeDiskNodeIndices(disk):
+      # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
+      # considered when up/downgrading from/to any versions touching 2.9 on the
+      # way.
+      drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
+      if disk["dev_type"] in drbd_disk_types:
+        for i in range(0, 2):
+          disk["logical_id"][i] = self.GetNewNodeIndex(nodes_by_old_key,
+                                                       disk["logical_id"][i],
+                                                       new_key_field)
+      if "children" in disk:
+        for child in disk["children"]:
+          ChangeDiskNodeIndices(child)
+
+    nodes_by_old_key = {}
+    nodes_by_new_key = {}
+    for (_, node) in config_data["nodes"].items():
+      nodes_by_old_key[node[old_key_field]] = node
+      nodes_by_new_key[node[new_key_field]] = node
+
+    config_data["nodes"] = nodes_by_new_key
+
+    cluster = config_data["cluster"]
+    cluster["master_node"] = self.GetNewNodeIndex(nodes_by_old_key,
+                                                  cluster["master_node"],
+                                                  new_key_field)
+
+    for inst in config_data["instances"].values():
+      inst["primary_node"] = self.GetNewNodeIndex(nodes_by_old_key,
+                                                  inst["primary_node"],
+                                                  new_key_field)
+
+    for disk in config_data["disks"].values():
+      ChangeDiskNodeIndices(disk)
+
+  @staticmethod
+  def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
+    insts_by_old_key = {}
+    insts_by_new_key = {}
+    for (_, inst) in config_data["instances"].items():
+      insts_by_old_key[inst[old_key_field]] = inst
+      insts_by_new_key[inst[new_key_field]] = inst
+
+    config_data["instances"] = insts_by_new_key
+
+  @OrFail("Changing node indices")
+  def UpgradeNodeIndices(self):
+    self.ChangeNodeIndices(self.config_data, "name", "uuid")
+
+  @OrFail("Changing instance indices")
+  def UpgradeInstanceIndices(self):
+    self.ChangeInstanceIndices(self.config_data, "name", "uuid")
+
+  @OrFail("Adding filters")
+  def UpgradeFilters(self):
+    # pylint can't infer config_data type
+    # pylint: disable=E1103
+    filters = self.config_data.get("filters", None)
+    if not filters:
+      self.config_data["filters"] = {}
+
+  @OrFail("Set top level disks")
+  def UpgradeTopLevelDisks(self):
+    """Upgrades the disks as config top level citizens."""
+    if "instances" not in self.config_data:
+      raise Error("Can't find the 'instances' key in the configuration!")
+
+    if "disks" in self.config_data:
+      # Disks are already top level citizens
+      return
+
+    self.config_data["disks"] = dict()
+    for iobj in self.config_data["instances"].values():
+      disk_uuids = []
+      for disk in iobj["disks"]:
+        duuid = disk["uuid"]
+        disk["serial_no"] = 1
+        # Instances may not have the ctime value, and the Haskell serialization
+        # will have set it to zero.
+        disk["ctime"] = disk["mtime"] = iobj.get("ctime", 0)
+        self.config_data["disks"][duuid] = disk
+        disk_uuids.append(duuid)
+      iobj["disks"] = disk_uuids
+
+  @OrFail("Removing disk template")
+  def UpgradeDiskTemplate(self):
+    if "instances" not in self.config_data:
+      raise Error("Can't find the 'instances' dictionary in the configuration.")
+    instances = self.config_data["instances"]
+    for inst in instances.values():
+      if "disk_template" in inst:
+        del inst["disk_template"]
+
+  # The following function is based on a method of class Disk with the same
+  # name, but adjusted to work with dicts and sets.
+  def _ComputeAllNodes(self, disk):
+    """Recursively compute nodes given a top device."""
+    nodes = set()
+    if disk["dev_type"] in constants.DTS_DRBD:
+      nodes = set(disk["logical_id"][:2])
+    for child in disk.get("children", []):
+      nodes |= self._ComputeAllNodes(child)
+    return nodes
+
+  def _RecursiveUpdateNodes(self, disk, nodes):
+    disk["nodes"] = nodes
+    for child in disk.get("children", []):
+      self._RecursiveUpdateNodes(child, nodes)
+
+  @OrFail("Upgrading disk nodes")
+  def UpgradeDiskNodes(self):
+    """Specify the nodes from which a disk is accessible in its definition.
+
+    For every disk that is attached to an instance, get the UUIDs of the nodes
+    that it's accessible from. There are three main cases:
+    1) Internally mirrored disks (DRBD):
+    These disks are accessible from two nodes, so the nodes list will include
+    these. Their children (data, meta) are also accessible from two nodes,
+    therefore they will inherit the nodes of the parent.
+    2) Externally mirrored disks (Blockdev, Ext, Gluster, RBD, Shared File):
+    These disks should be accessible from any node in the cluster, therefore the
+    nodes list will be empty.
+    3) Single-node disks (Plain, File):
+    These disks are accessible from one node only, therefore the nodes list will
+    consist only of the primary instance node.
+    """
+    disks = self.config_data["disks"]
+    for instance in self.config_data["instances"].itervalues():
+      # Get all disk nodes for an instance
+      instance_node = set([instance["primary_node"]])
+      disk_nodes = set()
+      for disk_uuid in instance["disks"]:
+        disk_nodes |= self._ComputeAllNodes(disks[disk_uuid])
+      all_nodes = list(instance_node | disk_nodes)
+
+      # Populate the `nodes` list field of each disk.
+      for disk_uuid in instance["disks"]:
+        disk = disks[disk_uuid]
+        if "nodes" in disk:
+          # The "nodes" field has already been added for this disk.
+          continue
+
+        if disk["dev_type"] in constants.DTS_INT_MIRROR:
+          self._RecursiveUpdateNodes(disk, all_nodes)
+        elif disk["dev_type"] in (constants.DT_PLAIN, constants.DT_FILE):
+          disk["nodes"] = all_nodes
+        else:
+          disk["nodes"] = []
+
+  def UpgradeAll(self):
+    self.config_data["version"] = version.BuildVersion(TARGET_MAJOR,
+                                                       TARGET_MINOR, 0)
+    self.UpgradeRapiUsers()
+    self.UpgradeWatcher()
+    steps = [self.UpgradeFileStoragePaths,
+             self.UpgradeNetworks,
+             self.UpgradeCluster,
+             self.UpgradeGroups,
+             self.UpgradeInstances,
+             self.UpgradeTopLevelDisks,
+             self.UpgradeNodeIndices,
+             self.UpgradeInstanceIndices,
+             self.UpgradeFilters,
+             self.UpgradeDiskNodes,
+             self.UpgradeDiskTemplate]
+    for s in steps:
+      s()
+    return not self.errors
+
+  # DOWNGRADE ------------------------------------------------------------
+
+  def _RecursiveRemoveNodes(self, disk):
+    if "nodes" in disk:
+      del disk["nodes"]
+    for disk in disk.get("children", []):
+      self._RecursiveRemoveNodes(disk)
+
+  @OrFail("Downgrading disk nodes")
+  def DowngradeDiskNodes(self):
+    if "disks" not in self.config_data:
+      raise Error("Can't find the 'disks' dictionary in the configuration.")
+    for disk in self.config_data["disks"].itervalues():
+      self._RecursiveRemoveNodes(disk)
+
+  @OrFail("Removing forthcoming instances")
+  def DowngradeForthcomingInstances(self):
+    if "instances" not in self.config_data:
+      raise Error("Can't find the 'instances' dictionary in the configuration.")
+    instances = self.config_data["instances"]
+    uuids = instances.keys()
+    for uuid in uuids:
+      if instances[uuid].get("forthcoming"):
+        del instances[uuid]
+
+  @OrFail("Removing forthcoming disks")
+  def DowngradeForthcomingDisks(self):
+    if "instances" not in self.config_data:
+      raise Error("Can't find the 'instances' dictionary in the configuration.")
+    instances = self.config_data["instances"]
+    if "disks" not in self.config_data:
+      raise Error("Can't find the 'disks' dictionary in the configuration.")
+    disks = self.config_data["disks"]
+    uuids = disks.keys()
+    for uuid in uuids:
+      if disks[uuid].get("forthcoming"):
+        del disks[uuid]
+        for inst in instances:
+          if "disk" in inst and uuid in inst["disks"]:
+            inst["disks"].remove(uuid)
+
+  @OrFail("Re-adding disk template")
+  def DowngradeDiskTemplate(self):
+    if "instances" not in self.config_data:
+      raise Error("Can't find the 'instances' dictionary in the configuration.")
+    instances = self.config_data["instances"]
+    if "disks" not in self.config_data:
+      raise Error("Can't find the 'disks' dictionary in the configuration.")
+    disks = self.config_data["disks"]
+    for inst in instances.values():
+      instance_disks = [disks.get(uuid) for uuid in inst["disks"]]
+      if any(d is None for d in instance_disks):
+        raise Error("Can't find all disks of instance %s in the configuration."
+                    % inst.name)
+      dev_types = set(d["dev_type"] for d in instance_disks)
+      if len(dev_types) > 1:
+        raise Error("Instance %s has mixed disk types: %s" %
+                    (inst.name, ', '.join(dev_types)))
+      elif len(dev_types) < 1:
+        inst["disk_template"] = constants.DT_DISKLESS
+      else:
+        inst["disk_template"] = dev_types.pop()
+
+  def DowngradeAll(self):
+    self.config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
+                                                       DOWNGRADE_MINOR, 0)
+    steps = [self.DowngradeForthcomingInstances,
+             self.DowngradeForthcomingDisks,
+             self.DowngradeDiskNodes,
+             self.DowngradeDiskTemplate]
+    for s in steps:
+      s()
+    return not self.errors
+
+  def _ComposePaths(self):
+    # We need to keep filenames locally because they might be renamed between
+    # versions.
+    self.opts.data_dir = os.path.abspath(self.opts.data_dir)
+    self.opts.CONFIG_DATA_PATH = self.opts.data_dir + "/config.data"
+    self.opts.SERVER_PEM_PATH = self.opts.data_dir + "/server.pem"
+    self.opts.CLIENT_PEM_PATH = self.opts.data_dir + "/client.pem"
+    self.opts.KNOWN_HOSTS_PATH = self.opts.data_dir + "/known_hosts"
+    self.opts.RAPI_CERT_FILE = self.opts.data_dir + "/rapi.pem"
+    self.opts.SPICE_CERT_FILE = self.opts.data_dir + "/spice.pem"
+    self.opts.SPICE_CACERT_FILE = self.opts.data_dir + "/spice-ca.pem"
+    self.opts.RAPI_USERS_FILE = self.opts.data_dir + "/rapi/users"
+    self.opts.RAPI_USERS_FILE_PRE24 = self.opts.data_dir + "/rapi_users"
+    self.opts.CONFD_HMAC_KEY = self.opts.data_dir + "/hmac.key"
+    self.opts.CDS_FILE = self.opts.data_dir + "/cluster-domain-secret"
+    self.opts.SSCONF_MASTER_NODE = self.opts.data_dir + "/ssconf_master_node"
+    self.opts.WATCHER_STATEFILE = self.opts.data_dir + "/watcher.data"
+    self.opts.FILE_STORAGE_PATHS_FILE = (self.opts.conf_dir +
+                                         "/file-storage-paths")
+
+  def _AskUser(self):
+    if not self.opts.force:
+      if self.opts.downgrade:
+        usertext = ("The configuration is going to be DOWNGRADED "
+                    "to version %s.%s. Some configuration data might be "
+                    " removed if they don't fit"
+                    " in the old format. Please make sure you have read the"
+                    " upgrade notes (available in the UPGRADE file and included"
+                    " in other documentation formats) to understand what they"
+                    " are. Continue with *DOWNGRADING* the configuration?" %
+                    (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
+      else:
+        usertext = ("Please make sure you have read the upgrade notes for"
+                    " Ganeti %s (available in the UPGRADE file and included"
+                    " in other documentation formats). Continue with upgrading"
+                    " configuration?" % constants.RELEASE_VERSION)
+      if not cli.AskUser(usertext):
+        sys.exit(constants.EXIT_FAILURE)
+
+  def _Downgrade(self, config_major, config_minor, config_version,
+                 config_revision):
+    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
+            (config_major == DOWNGRADE_MAJOR and
+             config_minor == DOWNGRADE_MINOR)):
+      raise Error("Downgrade supported only from the latest version (%s.%s),"
+                  " found %s (%s.%s.%s) instead" %
+                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
+                   config_minor, config_revision))
+    if not self.DowngradeAll():
+      raise Error("Downgrade failed:\n%s" % "\n".join(self.errors))
+
+  def _TestLoadingConfigFile(self):
+    # test loading the config file
+    all_ok = True
+    if not (self.opts.dry_run or self.opts.no_verify):
+      logging.info("Testing the new config file...")
+      cfg = config.ConfigWriter(cfg_file=self.opts.CONFIG_DATA_PATH,
+                                accept_foreign=self.opts.ignore_hostname,
+                                offline=True)
+      # if we reached this, it's all fine
+      vrfy = cfg.VerifyConfig()
+      if vrfy:
+        logging.error("Errors after conversion:")
+        for item in vrfy:
+          logging.error(" - %s", item)
+        all_ok = False
+      else:
+        logging.info("File loaded successfully after upgrading")
+      del cfg
+
+    if self.opts.downgrade:
+      action = "downgraded"
+      out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
+    else:
+      action = "upgraded"
+      out_ver = constants.RELEASE_VERSION
+    if all_ok:
+      cli.ToStderr("Configuration successfully %s to version %s.",
+                   action, out_ver)
+    else:
+      cli.ToStderr("Configuration %s to version %s, but there are errors."
+                   "\nPlease review the file.", action, out_ver)
diff --git a/lib/utils/__init__.py b/lib/utils/__init__.py
index 7e07bcf..bdd9761 100644
--- a/lib/utils/__init__.py
+++ b/lib/utils/__init__.py
@@ -885,3 +885,82 @@
                                    errors.ECODE_NOTUNIQUE)
       else:
         valid.append(name)
+
+
+def AllDiskOfType(disks_info, dev_types):
+  """Checks if the instance has only disks of any of the dev_types.
+
+  @type disks_info: list of L{Disk}
+  @param disks_info: all the disks of the instance.
+  @type dev_types: list of disk templates
+  @param dev_types: the disk type required.
+
+  @rtype: bool
+  @return: True iff the instance only has disks of type dev_type.
+  """
+
+  assert not isinstance(dev_types, str)
+
+  if not disks_info and constants.DT_DISKLESS not in dev_types:
+    return False
+
+  for disk in disks_info:
+    if disk.dev_type not in dev_types:
+      return False
+
+  return True
+
+
+def AnyDiskOfType(disks_info, dev_types):
+  """Checks if the instance has some disks of any types in dev_types.
+
+  @type disks_info: list of L{Disk}
+  @param disks_info: all the disks of the instance.
+  @type dev_types: list of disk template
+  @param dev_types: the disk type required.
+
+  @rtype: bool
+  @return: True if the instance has disks of type dev_types or the instance has
+    no disks and the dev_types allow DT_DISKLESS.
+  """
+
+  assert not isinstance(dev_types, str)
+
+  if not disks_info and constants.DT_DISKLESS in dev_types:
+    return True
+
+  for disk in disks_info:
+    if disk.dev_type in dev_types:
+      return True
+
+  return False
+
+
+def GetDiskTemplateString(disk_types):
+  """Gives a summary disk template from disk devtypes.
+
+  @type disk_types: list of string
+  @param disk_types: all the dev_types of the instance.
+  @rtype disk template
+  @returns the summarized disk template of the disk types.
+
+  """
+  disk_types = set(dev_type for dev_type in disk_types)
+  if not disk_types:
+    return constants.DT_DISKLESS
+  elif len(disk_types) > 1:
+    return constants.DT_MIXED
+  else:
+    return disk_types.pop()
+
+
+def GetDiskTemplate(disks_info):
+  """Gives a summary disk template from disks.
+
+  @type disks_info: list of L{Disk}
+  @param disks_info: all the disks of the instance.
+  @rtype disk template
+  @returns the summarized disk template of the disk types.
+
+  """
+  return GetDiskTemplateString(d.dev_type for d in disks_info)
diff --git a/lib/utils/retry.py b/lib/utils/retry.py
index fda3fcd..8079303 100644
--- a/lib/utils/retry.py
+++ b/lib/utils/retry.py
@@ -233,6 +233,26 @@
   return result
 
 
+def CountRetry(expected, fn, count, args=None):
+  """A wrapper over L{SimpleRetry} implementing a count down.
+
+  Where L{Retry} fixes the time, after which the command is assumed to be
+  failing, this function assumes the total number of tries.
+
+  @see: L{Retry}
+  """
+
+  rdict = {"tries": 0}
+
+  get_tries = lambda: rdict["tries"]
+
+  def inc_tries(t):
+    rdict["tries"] += t
+
+  return SimpleRetry(expected, fn, 1, count, args=args,
+                     wait_fn=inc_tries, _time_fn=get_tries)
+
+
 def RetryByNumberOfTimes(max_retries, exception_class, fn, *args, **kwargs):
   """Retries calling a function up to the specified number of times.
 
diff --git a/lib/utils/text.py b/lib/utils/text.py
index 98d3db7..4bb0f81 100644
--- a/lib/utils/text.py
+++ b/lib/utils/text.py
@@ -159,6 +159,10 @@
   if units not in ("m", "g", "t", "h"):
     raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
 
+  if not isinstance(value, (int, long, float)):
+    raise errors.ProgrammerError("Invalid value specified '%s (%s)'" % (
+        value, type(value)))
+
   suffix = ""
 
   if units == "m" or (units == "h" and value < 1024):
diff --git a/man/ganeti-extstorage-interface.rst b/man/ganeti-extstorage-interface.rst
index 97ce14f..49631b0 100644
--- a/man/ganeti-extstorage-interface.rst
+++ b/man/ganeti-extstorage-interface.rst
@@ -22,15 +22,18 @@
 ---------
 
 There are eight required files: *create*, *attach*, *detach*, *remove*,
-*grow*, *setinfo*, *verify*, *snapshot* (executables) and *parameters.list*
+*grow*, *setinfo*, *verify*, (executables) and *parameters.list*
 (text file).
 
+There are also three optional files: *open*, *close*, and
+*snapshot* (executables).
+
 Common environment
 ~~~~~~~~~~~~~~~~~~
 
 All commands will get their input via environment variables. A common
-set of variables will be exported for all commands, and some of them
-might have extra ones. Note that all counts are zero-based.
+set of variables will be exported for all commands, and some commands
+might have extra variables. Note that all counts are zero-based.
 
 Since Ganeti version 2.5, the environment will be cleaned up before
 being passed to scripts, therefore they will not inherit the environment
@@ -46,7 +49,8 @@
     disk count.
 
 VOL_SIZE
-    The volume's size in mebibytes.
+    Available only to the **create** and **grow** scripts. The volume's
+    size in mebibytes.
 
 VOL_NEW_SIZE
     Available only to the **grow** script. It declares the new size of
@@ -64,7 +68,7 @@
     this value to ``originstname+X`` where ``X`` is the instance's name.
 
 VOL_CNAME
-    The name of the Disk config object (optional).
+    The human-readable name of the Disk config object (optional).
 
 VOL_UUID
     The uuid of the Disk config object.
@@ -73,7 +77,11 @@
     The name of the volume's snapshot.
 
 VOL_SNAPSHOT_SIZE
-    The size of the volume's size
+    The size of the volume's snapshot.
+
+VOL_OPEN_EXCLUSIVE
+    Whether the volume will be opened for exclusive access or not.
+    This will be False (denoting shared access) during migration.
 
 EXECUTABLE SCRIPTS
 ------------------
@@ -121,6 +129,23 @@
 If the requested volume is already mapped, then the script should just
 return to its stdout the path which is already mapped to.
 
+In case the storage technology supports userspace access to volumes as
+well, e.g. the QEMU Hypervisor can see an RBD volume using its embedded
+driver for the RBD protocol, then the provider can return extra lines
+denoting the available userspace access URIs per hypervisor. The URI
+should be in the following format: <hypervisor>:<uri>. For example, a
+RADOS provider should return kvm:rbd:<pool>/<volume name> in the second
+line of stdout after the local block device path (e.g. /dev/rbd1).
+
+So, if the ``access`` disk parameter is ``userspace`` for the ext disk
+template, then the QEMU command will end up having file=<URI> in
+the ``-drive`` option.
+
+In case the storage technology supports *only* userspace access to
+volumes, then the first line of stdout should be an empty line, denoting
+that a local block device is not available. If neither a block device
+nor a URI is returned, then Ganeti will complain.
+
 detach
 ~~~~~~
 
@@ -212,7 +237,7 @@
 The *snapshot* script is used to take a snapshot of the given volume.
 
 The ``VOL_SNAPSHOT_NAME`` and ``VOL_SNAPSHOT_SIZE`` variables contain
-the name and size of the snapshot that is about to be taken.
+the name and size of the snapshot we are about to create.
 
 Currently this operation is used only during gnt-backup export and
 Ganeti sets those values to ``VOL_NAME.snap`` and ``VOL_SIZE``
@@ -224,6 +249,34 @@
 implement it. Of course if it is not present, instance backup export
 will not be supported for the given provider.
 
+open
+~~~~
+
+The *open* script is used to open the given volume.
+
+This makes the volume ready for I/O.
+
+The ``VOL_OPEN_EXCLUSIVE`` variable denotes whether the volume will be
+opened for exclusive access or not. It is True by default and
+False (denoting shared access) during migration.
+
+The script returns ``0`` on success.
+
+Please note that this script is optional and not all providers should
+implement it.
+
+close
+~~~~~
+
+The *close* script is used to close the given volume.
+
+This disables I/O on the volume.
+
+The script returns ``0`` on success.
+
+Please note that this script is optional and not all providers should
+implement it.
+
 TEXT FILES
 ----------
 
diff --git a/man/ganeti-os-interface.rst b/man/ganeti-os-interface.rst
index 5ebcaa0..77e2642 100644
--- a/man/ganeti-os-interface.rst
+++ b/man/ganeti-os-interface.rst
@@ -70,7 +70,12 @@
     either a block device or a regular file, in which case the OS
     scripts should use ``losetup`` (if they need to mount it). E.g. the
     first disk of the instance might be exported as
-    ``DISK_0_PATH=/dev/drbd0``.
+    ``DISK_0_PATH=/dev/drbd0``. If the disk is only accessible via a
+    userspace URI, this not be set.
+
+DISK_%N_URI
+    The userspace URI to the storage for disk N of the instance, if
+    configured.
 
 DISK_%N_ACCESS
     This is how the hypervisor will export the instance disks: either
@@ -201,12 +206,22 @@
 given block device. The output of this program will be passed
 during restore to the **import** command.
 
-The specific disk to backup is denoted by two additional environment
-variables: ``EXPORT_INDEX`` which denotes the index in the instance
-disks structure (and could be used for example to skip the second disk
-if not needed for backup) and ``EXPORT_DEVICE`` which has the same value
-as ``DISK_N_PATH`` but is duplicated here for easier usage by shell
-scripts (rather than parse the ``DISK_...`` variables).
+The specific disk to backup is denoted by four additional environment
+variables:
+
+EXPORT_INDEX
+    The index in the instance disks structure (and could be used for
+    example to skip the second disk if not needed for backup).
+
+EXPORT_DISK_PATH
+    Alias for ``DISK_N_PATH``. It is duplicated here for easier usage
+    by shell scripts (rather than parse the ``DISK_...`` variables).
+
+EXPORT_DISK_URI
+    Alias for ``DISK_N_URI``, analagous to ``EXPORT_DISK_PATH``.
+
+EXPORT_DEVICE
+    Historical alias for ``EXPORT_DISK_PATH``.
 
 To provide the user with an estimate on how long the export will take,
 a predicted size can be written to the file descriptor passed in the
@@ -228,7 +243,8 @@
 stdin.
 
 The difference in variables is that the current disk is denoted by
-``IMPORT_DEVICE`` and ``IMPORT_INDEX`` (instead of ``EXPORT_...``).
+``IMPORT_DISK_PATH``, ``IMPORT_DISK_URI``, ``IMPORT_DEVICE`` and
+``IMPORT_INDEX`` (instead of ``EXPORT_...``).
 
 rename
 ~~~~~~
diff --git a/man/gnt-backup.rst b/man/gnt-backup.rst
index 0f00b80..ecb0923 100644
--- a/man/gnt-backup.rst
+++ b/man/gnt-backup.rst
@@ -29,7 +29,7 @@
 | [\--ignore-remove-failures] [\--submit] [\--print-jobid]
 | [\--transport-compression=*compression-mode*]
 | [\--zero-free-space] [\--zeroing-timeout-fixed]
-| [\--zeroing-timeout-per-mib]
+| [\--zeroing-timeout-per-mib] [\--long-sleep]
 | {*instance*}
 
 Exports an instance to the target node. All the instance data and
@@ -62,6 +62,12 @@
 determining the minimum time to wait, and the latter how much longer
 to wait per MiB of data the instance has.
 
+The ``--long-sleep`` option allows Ganeti to keep the instance shut
+down for the entire duration of the export if necessary. This is
+needed if snapshots are not supported by the underlying storage type,
+or if the creation of snapshots fails for some reason - e.g. lack of
+space.
+
 Should the snapshotting or transfer of any of the instance disks
 fail, the backup will not complete and any previous backups will be
 preserved. The exact details of the failures will be shown during the
diff --git a/man/gnt-filter.rst b/man/gnt-filter.rst
index 159f01a..1a5bbab 100644
--- a/man/gnt-filter.rst
+++ b/man/gnt-filter.rst
@@ -211,7 +211,7 @@
 order to throttle replication traffic.
 ::
 
-  gnt-filter add '--predicates=[["opcode", ["=", "OP_ID", "OP_INSTNCE_REPLACE_DISKS"]]]' '--action=RATE_LIMIT 10'
+  gnt-filter add '--predicates=[["opcode", ["=", "OP_ID", "OP_INSTANCE_REPLACE_DISKS"]]]' '--action=RATE_LIMIT 10'
 
 .. vim: set textwidth=72 :
 .. Local Variables:
diff --git a/man/gnt-instance.rst b/man/gnt-instance.rst
index 62d34e8..a29fd79 100644
--- a/man/gnt-instance.rst
+++ b/man/gnt-instance.rst
@@ -32,7 +32,7 @@
 |  \| {size=*VAL*,provider=*PROVIDER*}[,param=*value*... ][,options...]
 |  \| {-s|\--os-size} *SIZE*}
 | [\--no-ip-check] [\--no-name-check] [\--no-conflicts-check]
-| [\--no-start] [\--no-install]
+| [\--no-start] [\--no-install] [{\--forthcoming \| \--commit}]
 | [\--net=*N* [:options...] \| \--no-nics]
 | [{-B|\--backend-parameters} *BEPARAMS*]
 | [{-H|\--hypervisor-parameters} *HYPERVISOR* [: option=*value*... ]]
@@ -196,6 +196,18 @@
 instance (without an OS, it most likely won't be able to start-up
 successfully).
 
+Passing the ``--forthcoming`` option, Ganeti will not at all try
+to create the instance or its disks. Instead the instance will
+only be added to the configuration, so that the resources are reserved.
+If the ``--commit`` option is passed, then it is a prerequisite that
+an instance with that name has already been added to the configuration
+as a forthcoming instance and the request is to replace this instance
+by the newly created real one.
+Note that if the reason for reserving an instance is that DNS names
+still need to be propagated, the reservation has to be done with
+``--no-name-check`` and ``--no-ip-check`` as these options are not
+implied by ``--forthcoming``.
+
 The ``-B (--backend-parameters)`` option specifies the backend
 parameters for the instance. If no such parameters are specified, the
 values are inherited from the cluster. Possible parameters are:
@@ -1334,8 +1346,10 @@
 | [\--disk add:size=*SIZE*[,options...] \|
 |  \--disk *N*:add,size=*SIZE*[,options...] \|
 |  \--disk *N*:add,size=*SIZE*,provider=*PROVIDER*[,options...][,param=*value*... ] \|
+|  \--disk *N*:attach,{name=*NAME* | uuid=*UUID*}\|
 |  \--disk *ID*:modify[,options...]
 |  \--disk [*ID*:]remove]
+|  \--disk [*ID*:]detach]
 | [\{-t|\--disk-template} { plain | rbd } \|
 |  \{-t|\--disk-template} drbd -n *new_secondary*] [\--no-wait-for-sync] \|
 |  \{-t|\--disk-template} ext {-e|--ext-params} {provider=*PROVIDER*}[,param=*value*... ] \|
@@ -1392,20 +1406,27 @@
 by ballooning it up or down to the new value.
 
 The ``--disk add:size=*SIZE*,[options..]`` option adds a disk to the
-instance, and ``--disk *N*:add:size=*SIZE*,[options..]`` will add a disk
-to the the instance at a specific index. The available options are the
-same as in the **add** command (``spindles``, ``mode``, ``name``, ``vg``,
-``metavg`` and ``access``). Per default, gnt-instance waits for the disk
+instance, and ``--disk *N*:add,size=*SIZE*,[options..]`` will add a disk
+to the instance at a specific index. The available options are the same
+as in the **add** command (``spindles``, ``mode``, ``name``, ``vg``,
+``metavg`` and ``access``). By default, gnt-instance waits for the disk
 mirror to sync.
 If you do not want this behavior, use the ``--no-wait-for-sync`` option.
 When adding an ExtStorage disk, the ``provider=*PROVIDER*`` option is
 also mandatory and specifies the ExtStorage provider. Also, for
 ExtStorage disks arbitrary parameters can be passed as additional comma
-separated options, same as in the **add** command. The ``--disk remove``
+separated options, same as in the **add** command. The
+``--disk attach:name=*NAME*`` option attaches an existing disk to the
+instance at the last disk index and ``--disk *N*:attach,name=*NAME*``
+will attach a disk to the instance at a specific index. The accepted
+disk identifiers are its ``name`` or ``uuid``. The ``--disk remove``
 option will remove the last disk of the instance. Use
-``--disk `` *ID*``:remove`` to remove a disk by its identifier. *ID*
-can be the index of the disk, the disks's name or the disks's UUID. The
-``--disk *ID*:modify[,options...]`` will change the options of the disk.
+``--disk `` *ID*``:remove`` to remove a disk by its identifier. *ID* can
+be the index of the disk, the disks's name or the disks's UUID. The
+above apply also to the ``--disk detach`` option, which removes a disk
+from an instance but keeps it in the configuration and doesn't destroy
+it. The ``--disk *ID*:modify[,options...]`` will change the options of
+the disk.
 Available options are:
 
 mode
diff --git a/man/gnt-node.rst b/man/gnt-node.rst
index a8f5bd7..0940d7f 100644
--- a/man/gnt-node.rst
+++ b/man/gnt-node.rst
@@ -97,6 +97,7 @@
 
 | **evacuate** [-f] [\--early-release] [\--submit] [\--print-jobid]
 | [{-I|\--iallocator} *NAME* \| {-n|\--new-secondary} *destination\_node*]
+| [--ignore-soft-errors]
 | [{-p|\--primary-only} \| {-s|\--secondary-only} ]
 |  {*node*}
 
@@ -141,6 +142,8 @@
 the default disk template, even if the instance's disk templates differ
 from that.
 
+The ``--ignore-soft-errors`` option is passed through to the allocator.
+
 See **ganeti**\(7) for a description of ``--submit`` and other common
 options.
 
diff --git a/man/hbal.rst b/man/hbal.rst
index 00b1b74..8043a46 100644
--- a/man/hbal.rst
+++ b/man/hbal.rst
@@ -134,6 +134,8 @@
 - standard deviation of the dynamic load on the nodes, for cpus,
   memory, disk and network
 - standard deviation of the CPU load provided by MonD
+- the count of instances with primary and secondary in the same failure
+  domain
 
 The free memory and free disk values help ensure that all nodes are
 somewhat balanced in their resource usage. The reserved memory helps
@@ -142,7 +144,8 @@
 N+1. And finally, the N+1 percentage helps guide the algorithm towards
 eliminating N+1 failures, if possible.
 
-Except for the N+1 failures and offline instances counts, we use the
+Except for the N+1 failures, offline instances counts, and failure
+domain violation counts, we use the
 standard deviation since when used with values within a fixed range
 (we use percents expressed as values between zero and one) it gives
 consistent results across all metrics (there are some small issues
@@ -242,6 +245,21 @@
 each hypervisor used and explictly state the allowed migration directions
 by means of *htools:allowmigration:* tags.
 
+LOCATION TAGS
+~~~~~~~~~~~~~
+
+Within a node group, certain nodes might be more likely to fail simultaneously
+due to a common cause of error (e.g., if they share the same power supply unit).
+Ganeti can be made aware of thos common causes of failure by means of tags.
+
+cluster tags *htools:nlocation:a*, *htools:nlocation:b*, etc
+  This make make node tags of the form *a:\**, *b:\**, etc be considered
+  to have a common cause of failure.
+
+Instances with primary and secondary node having a common cause of failure are
+considered badly placed. While such placements are always allowed, they count
+heavily towards the cluster score.
+
 OPTIONS
 -------
 
diff --git a/pylintrc b/pylintrc
index 51f9072..47be2b8 100644
--- a/pylintrc
+++ b/pylintrc
@@ -77,7 +77,9 @@
 
 [FORMAT]
 max-line-length = 80
-max-module-lines = 4500
+# TODO if you hit this limit, split the module, and reduce this number to the
+# next biggest one.
+max-module-lines = 3600
 indent-string = "  "
 indent-after-paren = 2
 
diff --git a/qa/ganeti-qa.py b/qa/ganeti-qa.py
index 3be0614..5d9748b 100755
--- a/qa/ganeti-qa.py
+++ b/qa/ganeti-qa.py
@@ -466,8 +466,7 @@
   # FIXME: export explicitly bails out on file based storage. other non-lvm
   # based storage types are untested, though. Also note that import could still
   # work, but is deeply embedded into the "export" case.
-  if (qa_config.TestEnabled("instance-export") and
-      instance.disk_template not in constants.DTS_FILEBASED):
+  if qa_config.TestEnabled("instance-export"):
     RunTest(qa_instance.TestInstanceExportNoTarget, instance)
 
     pnode = inodes[0]
@@ -492,8 +491,7 @@
 
   # FIXME: inter-cluster-instance-move crashes on file based instances :/
   # See Issue 414.
-  if (qa_config.TestEnabled([qa_rapi.Enabled, "inter-cluster-instance-move"])
-      and (instance.disk_template not in constants.DTS_FILEBASED)):
+  if (qa_config.TestEnabled([qa_rapi.Enabled, "inter-cluster-instance-move"])):
     newinst = qa_config.AcquireInstance()
     try:
       tnode = qa_config.AcquireNode(exclude=inodes)
diff --git a/qa/qa_instance.py b/qa/qa_instance.py
index 28c7db5..da4381e 100644
--- a/qa/qa_instance.py
+++ b/qa/qa_instance.py
@@ -97,16 +97,13 @@
         else:
           nodes.append(nodestr)
 
-  disk_template = info["Disk template"]
-  if not disk_template:
-    raise qa_error.Error("Can't get instance disk template")
-  storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
-
   re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
   vols = []
   drbd_min = {}
+  dtypes = []
   for (count, diskinfo) in enumerate(info["Disks"]):
     (dtype, _) = diskinfo["disk/%s" % count].split(",", 1)
+    dtypes.append(dtype)
     if dtype == constants.DT_DRBD8:
       for child in diskinfo["child devices"]:
         vols.append(child["logical_id"])
@@ -121,6 +118,10 @@
     elif dtype == constants.DT_PLAIN:
       vols.append(diskinfo["logical_id"])
 
+  # TODO remove and modify calling sites
+  disk_template = utils.GetDiskTemplateString(dtypes)
+  storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
+
   assert nodes
   assert len(nodes) < 2 or vols
   return {
@@ -1083,10 +1084,13 @@
 def TestInstanceExport(instance, node):
   """gnt-backup export -n ..."""
   name = instance.name
-  # Export does not work for file-based templates, thus we skip the test
+  options = ["gnt-backup", "export", "-n", node.primary]
+
+  # For files and shared files, the --long-sleep option should be used
   if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
-    return
-  AssertCommand(["gnt-backup", "export", "-n", node.primary, name])
+    options.append("--long-sleep")
+
+  AssertCommand(options + [name])
   return qa_utils.ResolveInstanceName(name)
 
 
diff --git a/qa/qa_utils.py b/qa/qa_utils.py
index 89609c7..3dfe03f 100644
--- a/qa/qa_utils.py
+++ b/qa/qa_utils.py
@@ -73,6 +73,9 @@
 _QA_OUTPUT = pathutils.GetLogFilename("qa-output")
 
 
+_RETRIES = 3
+
+
 (INST_DOWN,
  INST_UP) = range(500, 502)
 
@@ -286,11 +289,7 @@
   if tty:
     args.append("-t")
 
-  if strict:
-    tmp = "yes"
-  else:
-    tmp = "no"
-  args.append("-oStrictHostKeyChecking=%s" % tmp)
+  args.append("-oStrictHostKeyChecking=%s" % ("yes" if strict else "no", ))
   args.append("-oClearAllForwardings=yes")
   args.append("-oForwardAgent=yes")
   if opts:
@@ -382,6 +381,13 @@
   return out
 
 
+def _NoTimeout(state):
+  """False iff the command timed out."""
+  rcode, out = state
+
+  return rcode == 0 or not ('TimeoutError' in out or 'timed out' in out)
+
+
 def GetCommandOutput(node, cmd, tty=False, use_multiplexer=True, log_cmd=True,
                      fail=False):
   """Returns the output of a command executed on the given node.
@@ -401,11 +407,17 @@
   @param fail: whether the command is expected to fail
   """
   assert cmd
-  p = StartLocalCommand(GetSSHCommand(node, cmd, tty=tty,
-                                      use_multiplexer=use_multiplexer),
-                        stdout=subprocess.PIPE, log_cmd=log_cmd)
-  rcode = p.wait()
-  out = _GetCommandStdout(p)
+
+  def CallCommand():
+    command = GetSSHCommand(node, cmd, tty=tty,
+                            use_multiplexer=use_multiplexer)
+    p = StartLocalCommand(command, stdout=subprocess.PIPE, log_cmd=log_cmd)
+    rcode = p.wait()
+    out = _GetCommandStdout(p)
+    return rcode, out
+
+  # TODO: make retries configurable
+  rcode, out = utils.CountRetry(_NoTimeout, CallCommand, _RETRIES)
   _AssertRetCode(rcode, fail, cmd, node)
   return out
 
diff --git a/src/Ganeti/BasicTypes.hs b/src/Ganeti/BasicTypes.hs
index 9d9bd61..15a26a3 100644
--- a/src/Ganeti/BasicTypes.hs
+++ b/src/Ganeti/BasicTypes.hs
@@ -77,14 +77,6 @@
   , emptyListSet
   ) where
 
--- The following macro is just a temporary solution for 2.12 and 2.13.
--- Since 2.14 cabal creates proper macros for all dependencies.
-#define MIN_VERSION_monad_control(maj,min,rev) \
-  (((maj)<MONAD_CONTROL_MAJOR)|| \
-   (((maj)==MONAD_CONTROL_MAJOR)&&((min)<=MONAD_CONTROL_MINOR))|| \
-   (((maj)==MONAD_CONTROL_MAJOR)&&((min)==MONAD_CONTROL_MINOR)&& \
-    ((rev)<=MONAD_CONTROL_REV)))
-
 import Control.Applicative
 import Control.Exception (try)
 import Control.Monad
diff --git a/src/Ganeti/Compat.hs b/src/Ganeti/Compat.hs
index 66ed0f2..e5276d1 100644
--- a/src/Ganeti/Compat.hs
+++ b/src/Ganeti/Compat.hs
@@ -52,7 +52,7 @@
 -- This wraps either the old or the new name of the function,
 -- depending on the detected library version.
 rwhnf :: Control.Parallel.Strategies.Strategy a
-#ifdef PARALLEL3
+#if MIN_VERSION_parallel(3,0,0)
 rwhnf = Control.Parallel.Strategies.rseq
 #else
 rwhnf = Control.Parallel.Strategies.rwhnf
diff --git a/src/Ganeti/Config.hs b/src/Ganeti/Config.hs
index 0e1cfc2..97be65f 100644
--- a/src/Ganeti/Config.hs
+++ b/src/Ganeti/Config.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE ViewPatterns #-}
+
 {-| Implementation of the Ganeti configuration database.
 
 -}
@@ -64,6 +66,7 @@
     , getInstAllNodes
     , getInstDisks
     , getInstDisksFromObj
+    , getDrbdMinorsForDisk
     , getDrbdMinorsForInstance
     , getFilledInstHvParams
     , getFilledInstBeParams
@@ -80,10 +83,12 @@
     ) where
 
 import Control.Applicative
+import Control.Arrow ((&&&))
 import Control.Monad
 import Control.Monad.State
 import qualified Data.Foldable as F
 import Data.List (foldl', nub)
+import Data.Maybe (fromMaybe)
 import Data.Monoid
 import qualified Data.Map as M
 import qualified Data.Set as S
@@ -125,11 +130,16 @@
 
 -- * Query functions
 
+-- | Annotate Nothing as missing parameter and apply the given
+-- transformation otherwise
+withMissingParam :: String -> (a -> ErrorResult b) -> Maybe a -> ErrorResult b
+withMissingParam = maybe . Bad . ParameterError
+
 -- | Computes the nodes covered by a disk.
 computeDiskNodes :: Disk -> S.Set String
 computeDiskNodes dsk =
   case diskLogicalId dsk of
-    LIDDrbd8 nodeA nodeB _ _ _ _ -> S.fromList [nodeA, nodeB]
+    Just (LIDDrbd8 nodeA nodeB _ _ _ _) -> S.fromList [nodeA, nodeB]
     _ -> S.empty
 
 -- | Computes all disk-related nodes of an instance. For non-DRBD,
@@ -143,21 +153,22 @@
 
 -- | Computes all nodes of an instance.
 instNodes :: ConfigData -> Instance -> S.Set String
-instNodes cfg inst = instPrimaryNode inst `S.insert` instDiskNodes cfg inst
+instNodes cfg inst = maybe id S.insert (instPrimaryNode inst)
+                      $ instDiskNodes cfg inst
 
 -- | Computes the secondary nodes of an instance. Since this is valid
 -- only for DRBD, we call directly 'instDiskNodes', skipping over the
 -- extra primary insert.
 instSecondaryNodes :: ConfigData -> Instance -> S.Set String
 instSecondaryNodes cfg inst =
-  instPrimaryNode inst `S.delete` instDiskNodes cfg inst
+  maybe id S.delete (instPrimaryNode inst) $ instDiskNodes cfg inst
 
 -- | Get instances of a given node.
 -- The node is specified through its UUID.
 getNodeInstances :: ConfigData -> String -> ([Instance], [Instance])
 getNodeInstances cfg nname =
     let all_inst = M.elems . fromContainer . configInstances $ cfg
-        pri_inst = filter ((== nname) . instPrimaryNode) all_inst
+        pri_inst = filter ((== Just nname) . instPrimaryNode) all_inst
         sec_inst = filter ((nname `S.member`) . instSecondaryNodes cfg) all_inst
     in (pri_inst, sec_inst)
 
@@ -256,8 +267,10 @@
   in case getItem "Instance" name instances of
        -- if not found by uuid, we need to look it up by name
        Ok inst -> Ok inst
-       Bad _ -> let by_name = M.mapKeys
-                              (instName . (M.!) instances) instances
+       Bad _ -> let by_name =
+                      M.delete ""
+                      . M.mapKeys (fromMaybe "" . instName . (M.!) instances)
+                      $ instances
                 in getItem "Instance" name by_name
 
 -- | Looks up a disk by uuid.
@@ -286,12 +299,12 @@
 -- | Computes a node group's node params.
 getGroupNdParams :: ConfigData -> NodeGroup -> FilledNDParams
 getGroupNdParams cfg ng =
-  fillNDParams (clusterNdparams $ configCluster cfg) (groupNdparams ng)
+  fillParams (clusterNdparams $ configCluster cfg) (groupNdparams ng)
 
 -- | Computes a node group's ipolicy.
 getGroupIpolicy :: ConfigData -> NodeGroup -> FilledIPolicy
 getGroupIpolicy cfg ng =
-  fillIPolicy (clusterIpolicy $ configCluster cfg) (groupIpolicy ng)
+  fillParams (clusterIpolicy $ configCluster cfg) (groupIpolicy ng)
 
 -- | Computes a group\'s (merged) disk params.
 getGroupDiskParams :: ConfigData -> NodeGroup -> GroupDiskParams
@@ -318,14 +331,19 @@
 getFilledInstHvParams :: [String] -> ConfigData -> Instance -> HvParams
 getFilledInstHvParams globals cfg inst =
   -- First get the defaults of the parent
-  let hvName = instHypervisor inst
+  let maybeHvName = instHypervisor inst
       hvParamMap = fromContainer . clusterHvparams $ configCluster cfg
-      parentHvParams = maybe M.empty fromContainer $ M.lookup hvName hvParamMap
+      parentHvParams =
+        maybe M.empty fromContainer (maybeHvName >>= flip M.lookup hvParamMap)
   -- Then the os defaults for the given hypervisor
-      osName = instOs inst
+      maybeOsName = instOs inst
       osParamMap = fromContainer . clusterOsHvp $ configCluster cfg
-      osHvParamMap = maybe M.empty fromContainer $ M.lookup osName osParamMap
-      osHvParams = maybe M.empty fromContainer $ M.lookup hvName osHvParamMap
+      osHvParamMap =
+        maybe M.empty (maybe M.empty fromContainer . flip M.lookup osParamMap)
+          maybeOsName
+      osHvParams =
+        maybe M.empty (maybe M.empty fromContainer . flip M.lookup osHvParamMap)
+          maybeHvName
   -- Then the child
       childHvParams = fromContainer . instHvparams $ inst
   -- Helper function
@@ -338,16 +356,18 @@
 getFilledInstBeParams cfg inst = do
   let beParamMap = fromContainer . clusterBeparams . configCluster $ cfg
   parentParams <- getItem "FilledBeParams" C.ppDefault beParamMap
-  return $ fillBeParams parentParams (instBeparams inst)
+  return $ fillParams parentParams (instBeparams inst)
 
 -- | Retrieves the instance os params, missing values filled with cluster
 -- defaults. This does NOT include private and secret parameters.
 getFilledInstOsParams :: ConfigData -> Instance -> OsParams
 getFilledInstOsParams cfg inst =
-  let osLookupName = takeWhile (/= '+') (instOs inst)
+  let maybeOsLookupName = liftM (takeWhile (/= '+')) (instOs inst)
       osParamMap = fromContainer . clusterOsparams $ configCluster cfg
       childOsParams = instOsparams inst
-  in case getItem "OsParams" osLookupName osParamMap of
+  in case withMissingParam "Instance without OS"
+            (flip (getItem "OsParams") osParamMap)
+            maybeOsLookupName of
        Ok parentOsParams -> GenericContainer $
                               fillDict (fromContainer parentOsParams)
                                        (fromContainer childOsParams) []
@@ -356,13 +376,15 @@
 -- | Looks up an instance's primary node.
 getInstPrimaryNode :: ConfigData -> String -> ErrorResult Node
 getInstPrimaryNode cfg name =
-  liftM instPrimaryNode (getInstance cfg name) >>= getNode cfg
+  getInstance cfg name
+  >>= withMissingParam "Instance without primary node" return . instPrimaryNode
+  >>= getNode cfg
 
 -- | Retrieves all nodes hosting a DRBD disk
 getDrbdDiskNodes :: ConfigData -> Disk -> [Node]
 getDrbdDiskNodes cfg disk =
   let retrieved = case diskLogicalId disk of
-                    LIDDrbd8 nodeA nodeB _ _ _ _ ->
+                    Just (LIDDrbd8 nodeA nodeB _ _ _ _) ->
                       justOk [getNode cfg nodeA, getNode cfg nodeB]
                     _                            -> []
   in retrieved ++ concatMap (getDrbdDiskNodes cfg) (diskChildren disk)
@@ -374,9 +396,9 @@
 getInstAllNodes :: ConfigData -> String -> ErrorResult [Node]
 getInstAllNodes cfg name = do
   inst_disks <- getInstDisks cfg name
-  let diskNodes = concatMap (getDrbdDiskNodes cfg) inst_disks
+  let disk_nodes = concatMap (getDrbdDiskNodes cfg) inst_disks
   pNode <- getInstPrimaryNode cfg name
-  return . nub $ pNode:diskNodes
+  return . nub $ pNode:disk_nodes
 
 -- | Get disks for a given instance.
 -- The instance is specified by name or uuid.
@@ -397,9 +419,9 @@
   -> Disk -> a
 collectFromDrbdDisks f = col
   where
-    col Disk { diskLogicalId = (LIDDrbd8 nA nB port mA mB secret)
-             , diskChildren = ch
-             } = f nA nB port mA mB secret <> F.foldMap col ch
+    col (diskLogicalId &&& diskChildren ->
+           (Just (LIDDrbd8 nA nB port mA mB secret), ch)) =
+             f nA nB port mA mB secret <> F.foldMap col ch
     col d = F.foldMap col (diskChildren d)
 
 -- | Returns the DRBD secrets of a given 'Disk'
@@ -418,7 +440,7 @@
   let child_minors = concatMap (getDrbdMinorsForNode node) (diskChildren disk)
       this_minors =
         case diskLogicalId disk of
-          LIDDrbd8 nodeA nodeB _ minorA minorB _
+          Just (LIDDrbd8 nodeA nodeB _ minorA minorB _)
             | nodeA == node -> [(minorA, nodeB)]
             | nodeB == node -> [(minorB, nodeA)]
           _ -> []
@@ -445,10 +467,10 @@
                      -> Instance
                      -> [(String, Int, String, String, String, String)]
 getInstMinorsForNode cfg node inst =
-  let role = if node == instPrimaryNode inst
+  let role = if Just node == instPrimaryNode inst
                then rolePrimary
                else roleSecondary
-      iname = instName inst
+      iname = fromMaybe "" $ instName inst
       inst_disks = case getInstDisksFromObj cfg inst of
                      Ok disks -> disks
                      Bad _ -> []
@@ -461,6 +483,7 @@
      zip [(0::Int)..] $ inst_disks
 
 -- | Builds link -> ip -> instname map.
+-- For instances without a name, we insert the uuid instead.
 --
 -- TODO: improve this by splitting it into multiple independent functions:
 --
@@ -474,11 +497,12 @@
   let cluster = configCluster cfg
       instances = M.elems . fromContainer . configInstances $ cfg
       defparams = (M.!) (fromContainer $ clusterNicparams cluster) C.ppDefault
-      nics = concatMap (\i -> [(instName i, nic) | nic <- instNics i])
+      nics = concatMap (\i -> [(fromMaybe (instUuid i) $ instName i, nic)
+                                | nic <- instNics i])
              instances
   in foldl' (\accum (iname, nic) ->
                let pparams = nicNicparams nic
-                   fparams = fillNicParams defparams pparams
+                   fparams = fillParams defparams pparams
                    link = nicpLink fparams
                in case nicIp nic of
                     Nothing -> accum
@@ -500,7 +524,7 @@
 getNodeNdParams cfg node = do
   group <- getGroupOfNode cfg node
   let gparams = getGroupNdParams cfg group
-  return $ fillNDParams gparams (nodeNdparams node)
+  return $ fillParams gparams (nodeNdparams node)
 
 -- * Network
 
@@ -539,19 +563,20 @@
 
 getInstanceLVsByNode :: ConfigData -> Instance -> ErrorResult NodeLVsMap
 getInstanceLVsByNode cd inst =
-    (MM.fromList . lvsByNode (instPrimaryNode inst))
-    <$> getInstDisksFromObj cd inst
+    withMissingParam "Instance without Primary Node"
+      (\i -> return $ MM.fromList . lvsByNode i)
+      (instPrimaryNode inst)
+    <*> getInstDisksFromObj cd inst
   where
     lvsByNode :: String -> [Disk] -> [(String, LogicalVolume)]
     lvsByNode node = concatMap (lvsByNode1 node)
     lvsByNode1 :: String -> Disk -> [(String, LogicalVolume)]
-    lvsByNode1 _    Disk { diskLogicalId = (LIDDrbd8 nA nB _ _ _ _)
-                         , diskChildren = ch
-                         } = lvsByNode nA ch ++ lvsByNode nB ch
-    lvsByNode1 node Disk { diskLogicalId = (LIDPlain lv) }
-                           = [(node, lv)]
-    lvsByNode1 node Disk { diskChildren = ch }
-                           = lvsByNode node ch
+    lvsByNode1 _ (diskLogicalId &&& diskChildren
+                   -> (Just (LIDDrbd8 nA nB _ _ _ _), ch)) =
+                         lvsByNode nA ch ++ lvsByNode nB ch
+    lvsByNode1 node (diskLogicalId -> (Just (LIDPlain lv))) =
+                         [(node, lv)]
+    lvsByNode1 node (diskChildren -> ch) = lvsByNode node ch
 
 getAllLVs :: ConfigData -> ErrorResult (S.Set LogicalVolume)
 getAllLVs cd = mconcat <$> mapM (liftM MM.values . getInstanceLVsByNode cd)
diff --git a/src/Ganeti/Constants.hs b/src/Ganeti/Constants.hs
index dc03b63..7e960af 100644
--- a/src/Ganeti/Constants.hs
+++ b/src/Ganeti/Constants.hs
@@ -45,7 +45,7 @@
 -}
 module Ganeti.Constants where
 
-import Control.Arrow ((***))
+import Control.Arrow ((***),(&&&))
 import Data.List ((\\))
 import Data.Map (Map)
 import qualified Data.Map as Map (empty, fromList, keys, insert)
@@ -904,6 +904,9 @@
 dtGluster :: String
 dtGluster = Types.diskTemplateToRaw DTGluster
 
+dtMixed :: String
+dtMixed = "mixed"
+
 -- | This is used to order determine the default disk template when
 -- the list of enabled disk templates is inferred from the current
 -- state of the cluster.  This only happens on an upgrade from a
@@ -926,16 +929,9 @@
 mapDiskTemplateStorageType :: Map String String
 mapDiskTemplateStorageType =
   Map.fromList $
-  map (Types.diskTemplateToRaw *** Types.storageTypeToRaw)
-  [(DTBlock, StorageBlock),
-   (DTDrbd8, StorageLvmVg),
-   (DTExt, StorageExt),
-   (DTSharedFile, StorageSharedFile),
-   (DTFile, StorageFile),
-   (DTDiskless, StorageDiskless),
-   (DTPlain, StorageLvmVg),
-   (DTRbd, StorageRados),
-   (DTGluster, StorageGluster)]
+  map (   Types.diskTemplateToRaw
+      &&& Types.storageTypeToRaw . diskTemplateToStorageType)
+  [minBound..maxBound]
 
 -- | The set of network-mirrored disk templates
 dtsIntMirror :: FrozenSet String
@@ -981,6 +977,13 @@
   ConstantUtils.mkSet $ map Types.diskTemplateToRaw
   [DTSharedFile, DTFile, DTGluster]
 
+-- | The set of file based disk templates whose path is tied to the instance
+-- name
+dtsInstanceDependentPath :: FrozenSet String
+dtsInstanceDependentPath =
+  ConstantUtils.mkSet $ map Types.diskTemplateToRaw
+  [DTSharedFile, DTFile]
+
 -- | The set of disk templates that can be moved by copying
 --
 -- Note: a requirement is that they're not accessed externally or
@@ -989,6 +992,11 @@
 dtsCopyable =
   ConstantUtils.mkSet $ map Types.diskTemplateToRaw [DTPlain, DTFile]
 
+-- | The set of disk templates which can be snapshot.
+dtsSnapshotCapable :: FrozenSet String
+dtsSnapshotCapable =
+  ConstantUtils.mkSet $ map Types.diskTemplateToRaw [DTPlain, DTDrbd8, DTExt]
+
 -- | The set of disk templates that are supported by exclusive_storage
 dtsExclStorage :: FrozenSet String
 dtsExclStorage = ConstantUtils.mkSet $ map Types.diskTemplateToRaw [DTPlain]
@@ -1210,14 +1218,20 @@
 ddmAdd :: String
 ddmAdd = Types.ddmFullToRaw DdmFullAdd
 
+ddmAttach :: String
+ddmAttach = Types.ddmFullToRaw DdmFullAttach
+
 ddmModify :: String
 ddmModify = Types.ddmFullToRaw DdmFullModify
 
 ddmRemove :: String
 ddmRemove = Types.ddmFullToRaw DdmFullRemove
 
+ddmDetach :: String
+ddmDetach = Types.ddmFullToRaw DdmFullDetach
+
 ddmsValues :: FrozenSet String
-ddmsValues = ConstantUtils.mkSet [ddmAdd, ddmRemove]
+ddmsValues = ConstantUtils.mkSet [ddmAdd, ddmAttach, ddmRemove, ddmDetach]
 
 ddmsValuesWithModify :: FrozenSet String
 ddmsValuesWithModify = ConstantUtils.mkSet $ map Types.ddmFullToRaw [minBound..]
@@ -1463,6 +1477,12 @@
 esActionSnapshot :: String
 esActionSnapshot = "snapshot"
 
+esActionOpen :: String
+esActionOpen = "open"
+
+esActionClose :: String
+esActionClose = "close"
+
 esScriptCreate :: String
 esScriptCreate = esActionCreate
 
@@ -1487,6 +1507,12 @@
 esScriptSnapshot :: String
 esScriptSnapshot = esActionSnapshot
 
+esScriptOpen :: String
+esScriptOpen = esActionOpen
+
+esScriptClose :: String
+esScriptClose = esActionClose
+
 esScripts :: FrozenSet String
 esScripts =
   ConstantUtils.mkSet [esScriptAttach,
@@ -1496,7 +1522,9 @@
                        esScriptRemove,
                        esScriptSetinfo,
                        esScriptVerify,
-                       esScriptSnapshot]
+                       esScriptSnapshot,
+                       esScriptOpen,
+                       esScriptClose]
 
 esParametersFile :: String
 esParametersFile = "parameters.list"
@@ -2512,17 +2540,22 @@
 idiskAccess :: String
 idiskAccess = "access"
 
+idiskType :: String
+idiskType = "dev_type"
+
 idiskParamsTypes :: Map String VType
 idiskParamsTypes =
-  Map.fromList [(idiskSize, VTypeSize),
-                (idiskSpindles, VTypeInt),
-                (idiskMode, VTypeString),
-                (idiskAdopt, VTypeString),
-                (idiskVg, VTypeString),
-                (idiskMetavg, VTypeString),
-                (idiskProvider, VTypeString),
-                (idiskAccess, VTypeString),
-                (idiskName, VTypeMaybeString)]
+  Map.fromList [ (idiskSize, VTypeSize)
+               , (idiskSpindles, VTypeInt)
+               , (idiskMode, VTypeString)
+               , (idiskAdopt, VTypeString)
+               , (idiskVg, VTypeString)
+               , (idiskMetavg, VTypeString)
+               , (idiskProvider, VTypeString)
+               , (idiskAccess, VTypeString)
+               , (idiskName, VTypeMaybeString)
+               , (idiskType, VTypeString)
+               ]
 
 idiskParams :: FrozenSet String
 idiskParams = ConstantUtils.mkSet (Map.keys idiskParamsTypes)
@@ -4268,9 +4301,9 @@
 wconfdDefRwto :: Int
 wconfdDefRwto = 60
 
--- | The prefix of the WConfD livelock file name
-wconfdLivelockPrefix :: String
-wconfdLivelockPrefix = "wconf-daemon"
+-- | The prefix of the WConfD livelock file name.
+wconfLivelockPrefix :: String
+wconfLivelockPrefix = "wconf-daemon"
 
 -- * Confd
 
diff --git a/src/Ganeti/DataCollectors/InstStatus.hs b/src/Ganeti/DataCollectors/InstStatus.hs
index f466f09..578819b 100644
--- a/src/Ganeti/DataCollectors/InstStatus.hs
+++ b/src/Ganeti/DataCollectors/InstStatus.hs
@@ -143,15 +143,16 @@
 -- Builds the status of an instance using runtime information about the Xen
 -- Domains, their uptime information and the static information provided by
 -- the ConfD server.
-buildStatus :: Map.Map String Domain -> Map.Map Int UptimeInfo -> Instance
-  -> IO InstStatus
+buildStatus :: Map.Map String Domain -> Map.Map Int UptimeInfo
+               -> RealInstanceData
+               -> IO InstStatus
 buildStatus domains uptimes inst = do
-  let name = instName inst
+  let name = realInstName inst
       currDomain = Map.lookup name domains
       idNum = fmap domId currDomain
       currUInfo = idNum >>= (`Map.lookup` uptimes)
       uptime = fmap uInfoUptime currUInfo
-      adminState = instAdminState inst
+      adminState = realInstAdminState inst
       actualState =
         if adminState == AdminDown && isNothing currDomain
           then ActualShutdown
@@ -166,11 +167,11 @@
   return $
     InstStatus
       name
-      (instUuid inst)
+      (realInstUuid inst)
       adminState
       actualState
       uptime
-      (instMtime inst)
+      (realInstMtime inst)
       trail
       status
 
@@ -193,11 +194,13 @@
   answer <- runResultT $ getInstances node srvAddr srvPort
   inst <- exitIfBad "Can't get instance info from ConfD" answer
   d <- getInferredDomInfo
+  let toReal (RealInstance i) = Just i
+      toReal _ = Nothing
   reportData <-
     case d of
       BT.Ok domains -> do
         uptimes <- getUptimeInfo
-        let primaryInst =  fst inst
+        let primaryInst =  mapMaybe toReal $ fst inst
         iStatus <- mapM (buildStatus domains uptimes) primaryInst
         let globalStatus = computeGlobalStatus iStatus
         return $ ReportData iStatus globalStatus
diff --git a/src/Ganeti/DataCollectors/Lv.hs b/src/Ganeti/DataCollectors/Lv.hs
index 8922425..4846988 100644
--- a/src/Ganeti/DataCollectors/Lv.hs
+++ b/src/Ganeti/DataCollectors/Lv.hs
@@ -49,6 +49,7 @@
 import Control.Monad
 import Data.Attoparsec.Text.Lazy as A
 import Data.List
+import Data.Maybe (mapMaybe)
 import Data.Text.Lazy (pack, unpack)
 import Network.BSD (getHostName)
 import System.Process
@@ -132,26 +133,30 @@
         ++ show contexts ++ "\n" ++ errorMessage
     A.Done _ lvinfoD -> return lvinfoD
 
--- | Get the list of instances on the current node along with their disks,
+-- | Get the list of real instances on the current node along with their disks,
 -- either from a provided file or by querying Confd.
-getInstDiskList :: Options -> IO [(Instance, [Disk])]
+getInstDiskList :: Options -> IO [(RealInstanceData, [Disk])]
 getInstDiskList opts = do
   instances <- maybe fromConfd fromFile $ optInstances opts
   exitIfBad "Unable to obtain the list of instances" instances
   where
-    fromConfdUnchecked :: IO (BT.Result [(Instance, [Disk])])
+    fromConfdUnchecked :: IO (BT.Result [(RealInstanceData, [Disk])])
     fromConfdUnchecked = do
       let srvAddr = optConfdAddr opts
           srvPort = optConfdPort opts
-      getHostName >>= \n -> BT.runResultT $ getInstanceDisks n srvAddr srvPort
+          toReal (RealInstance i, dsks) = Just (i, dsks)
+          toReal _ = Nothing
+      getHostName >>= \n -> BT.runResultT
+                            . liftM (mapMaybe toReal)
+                            $ getInstanceDisks n srvAddr srvPort
 
-    fromConfd :: IO (BT.Result [(Instance, [Disk])])
+    fromConfd :: IO (BT.Result [(RealInstanceData, [Disk])])
     fromConfd =
       liftM (either (BT.Bad . show) id)
       (E.try fromConfdUnchecked ::
-          IO (Either IOError (BT.Result [(Instance, [Disk])])))
+          IO (Either IOError (BT.Result [(RealInstanceData, [Disk])])))
 
-    fromFile :: FilePath -> IO (BT.Result [(Instance, [Disk])])
+    fromFile :: FilePath -> IO (BT.Result [(RealInstanceData, [Disk])])
     fromFile inputFile = do
       contents <-
         ((E.try $ readFile inputFile) :: IO (Either IOError String))
@@ -159,7 +164,7 @@
       return . fromJResult "Not a list of instances" $ J.decode contents
 
 -- | Adds the name of the instance to the information about one logical volume.
-addInstNameToOneLv :: [(Instance, [Disk])] -> LVInfo -> LVInfo
+addInstNameToOneLv :: [(RealInstanceData, [Disk])] -> LVInfo -> LVInfo
 addInstNameToOneLv instDiskList lvInfo =
  let lv = LogicalVolume (lviVgName lvInfo) (lviName lvInfo)
      instanceHasDisk = any (includesLogicalId lv) . snd
@@ -167,10 +172,10 @@
    in
      case rightInstance of
        Nothing -> lvInfo
-       Just (i, _) -> lvInfo { lviInstance = Just $ instName i }
+       Just (i, _) -> lvInfo { lviInstance = Just $ realInstName i }
 
 -- | Adds the name of the instance to the information about logical volumes.
-addInstNameToLv :: [(Instance, [Disk])] -> [LVInfo] -> [LVInfo]
+addInstNameToLv :: [(RealInstanceData, [Disk])] -> [LVInfo] -> [LVInfo]
 addInstNameToLv instDisksList = map (addInstNameToOneLv instDisksList)
 
 -- | This function computes the JSON representation of the LV status.
diff --git a/src/Ganeti/DataCollectors/Types.hs b/src/Ganeti/DataCollectors/Types.hs
index b1414c3..9170ad9 100644
--- a/src/Ganeti/DataCollectors/Types.hs
+++ b/src/Ganeti/DataCollectors/Types.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE TemplateHaskell #-}
+{-# LANGUAGE TemplateHaskell, CPP #-}
 {-# OPTIONS_GHC -fno-warn-orphans #-}
 
 {-| Implementation of the Ganeti data collector types.
@@ -53,7 +53,9 @@
   ) where
 
 import Control.DeepSeq (NFData, rnf)
+#if !MIN_VERSION_containers(0,5,0)
 import Control.Seq (using, seqFoldable, rdeepseq)
+#endif
 import Data.Char
 import Data.Ratio
 import qualified Data.Map as Map
@@ -85,8 +87,8 @@
     let s' = fromJSString s
     in case Map.lookup s' categoryNames of
          Just category -> Ok category
-         Nothing -> fail $ "Invalid category name " ++ s' ++ " for type\
-                           \ DCCategory"
+         Nothing -> fail $ "Invalid category name " ++ s' ++ " for type"
+                           ++ " DCCategory"
   readJSON v = fail $ "Invalid JSON value " ++ show v ++ " for type DCCategory"
 
 -- | The possible status codes of a data collector.
@@ -144,28 +146,29 @@
 -- | Type for the value field of the `CollectorMap` below.
 data CollectorData = CPULoadData (Seq.Seq (ClockTime, [Int]))
 
-{-
+instance NFData ClockTime where
+  rnf (TOD x y) = rnf x `seq` rnf y
 
-Naturally, we want to make CollectorData an instance of NFData as
-follows.
+#if MIN_VERSION_containers(0,5,0)
 
 instance NFData CollectorData where
   rnf (CPULoadData x) = rnf x
 
-However, Seq.Seq only became an instance of NFData in version 0.5.0.0
-of containers (Released 2012). So, for the moment, we use a generic
-way to reduce to normal form. In later versions of Ganeti, where we
-have the infra structure to do so, we will choose implementation depending
-on the version of the containers library available.
+#else
+
+{-
+
+In older versions of the containers library, Seq is not an
+instance of NFData, so use a generic way to reduce to normal
+form
 
 -}
 
-instance NFData ClockTime where
-  rnf (TOD x y) = rnf x `seq` rnf y
-
 instance NFData CollectorData where
   rnf (CPULoadData x) =  (x `using` seqFoldable rdeepseq) `seq` ()
 
+#endif
+
 -- | Type for the map storing the data of the statefull DataCollectors.
 type CollectorMap = Map.Map String CollectorData
 
diff --git a/src/Ganeti/HTools/Backend/IAlloc.hs b/src/Ganeti/HTools/Backend/IAlloc.hs
index 7ef51a1..c4a8459 100644
--- a/src/Ganeti/HTools/Backend/IAlloc.hs
+++ b/src/Ganeti/HTools/Backend/IAlloc.hs
@@ -114,8 +114,12 @@
   let getRunSt AdminOffline = StatusOffline
       getRunSt AdminDown = StatusDown
       getRunSt AdminUp = Running
-  return (n, Instance.create n mem disk disks vcpus (getRunSt state) tags
-             True 0 0 dt su nics)
+  -- Not forthcoming by default.
+  forthcoming <- extract "forthcoming" `orElse` Ok False
+  return
+    (n,
+     Instance.create n mem disk disks vcpus (getRunSt state) tags
+                     True 0 0 dt su nics forthcoming)
 
 -- | Parses an instance as found in the cluster instance list.
 parseInstance :: NameAssoc -- ^ The node name-to-index association list
@@ -143,6 +147,7 @@
 parseNode ktg n a = do
   let desc = "invalid data for node '" ++ n ++ "'"
       extract x = tryFromObj desc a x
+      extractDef def key = fromObjWithDefault a key def
   offline <- extract "offline"
   drained <- extract "drained"
   guuid   <- extract "group"
@@ -150,6 +155,9 @@
   let vm_capable' = fromMaybe True vm_capable
   gidx <- lookupGroup ktg n guuid
   ndparams <- extract "ndparams" >>= asJSObject
+  -- Despite the fact that tags field is reported by iallocator.py,
+  -- some tests don't contain tags field
+  tags <- extractDef [] "tags"
   excl_stor <- tryFromObj desc (fromJSObject ndparams) "exclusive_storage"
   let live = not offline && vm_capable'
       lvextract def = eitherLive live def . extract
@@ -164,7 +172,8 @@
   dfree  <- lvextract 0 "free_disk"
   ctotal <- lvextract 0.0 "total_cpus"
   cnos <- lvextract 0 "reserved_cpus"
-  let node = Node.create n mtotal mnode mfree dtotal dfree ctotal cnos
+  let node = flip Node.setNodeTags tags $
+             Node.create n mtotal mnode mfree dtotal dfree ctotal cnos
              (not live || drained) sptotal spfree gidx excl_stor
   return (n, node)
 
diff --git a/src/Ganeti/HTools/Backend/Luxi.hs b/src/Ganeti/HTools/Backend/Luxi.hs
index 9ee80ca..5a3cb1d 100644
--- a/src/Ganeti/HTools/Backend/Luxi.hs
+++ b/src/Ganeti/HTools/Backend/Luxi.hs
@@ -138,7 +138,8 @@
      ["name", "disk_usage", "be/memory", "be/vcpus",
       "status", "pnode", "snodes", "tags", "oper_ram",
       "be/auto_balance", "disk_template",
-      "be/spindle_use", "disk.sizes", "disk.spindles"] Qlang.EmptyFilter
+      "be/spindle_use", "disk.sizes", "disk.spindles",
+      "forthcoming"] Qlang.EmptyFilter
 
 -- | The input data for cluster query.
 queryClusterInfoMsg :: L.LuxiOp
@@ -180,7 +181,7 @@
 parseInstance ktn [ name, disk, mem, vcpus
                   , status, pnode, snodes, tags, oram
                   , auto_balance, disk_template, su
-                  , dsizes, dspindles ] = do
+                  , dsizes, dspindles, forthcoming ] = do
   xname <- annotateResult "Parsing new instance" (fromJValWithStatus name)
   let convert a = genericConvert "Instance" xname a
   xdisk <- convert "disk_usage" disk
@@ -200,9 +201,11 @@
   xsu <- convert "be/spindle_use" su
   xdsizes <- convert "disk.sizes" dsizes
   xdspindles <- convertArrayMaybe "Instance" xname "disk.spindles" dspindles
+  xforthcoming <- convert "forthcoming" forthcoming
   let disks = zipWith Instance.Disk xdsizes xdspindles
       inst = Instance.create xname xmem xdisk disks
              xvcpus xrunning xtags xauto_balance xpnode snode xdt xsu []
+             xforthcoming
   return (xname, inst)
 
 parseInstance _ v = fail ("Invalid instance query result: " ++ show v)
diff --git a/src/Ganeti/HTools/Backend/Rapi.hs b/src/Ganeti/HTools/Backend/Rapi.hs
index e829fc4..df93d6a 100644
--- a/src/Ganeti/HTools/Backend/Rapi.hs
+++ b/src/Ganeti/HTools/Backend/Rapi.hs
@@ -149,9 +149,11 @@
   auto_balance <- extract "auto_balance" beparams
   dt <- extract "disk_template" a
   su <- extract "spindle_use" beparams
+  -- Not forthcoming by default.
+  forthcoming <- extract "forthcoming" a `orElse` Ok False
   let disks = zipWith Instance.Disk dsizes dspindles
   let inst = Instance.create name mem disk disks vcpus running tags
-             auto_balance pnode snode dt su []
+             auto_balance pnode snode dt su [] forthcoming
   return (name, inst)
 
 -- | Construct a node from a JSON object.
diff --git a/src/Ganeti/HTools/Backend/Text.hs b/src/Ganeti/HTools/Backend/Text.hs
index 5dd8282..72b8ab2 100644
--- a/src/Ganeti/HTools/Backend/Text.hs
+++ b/src/Ganeti/HTools/Backend/Text.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE TupleSections #-}
+
 {-| Parsing data from text-files.
 
 This module holds the code for loading the cluster state from text
@@ -122,7 +124,7 @@
       snode = (if sidx == Node.noSecondary
                  then ""
                  else Container.nameOf nl sidx)
-  in printf "%s|%d|%d|%d|%s|%s|%s|%s|%s|%s|%d|%s"
+  in printf "%s|%d|%d|%d|%s|%s|%s|%s|%s|%s|%d|%s|%s"
        iname (Instance.mem inst) (Instance.dsk inst)
        (Instance.vcpus inst) (instanceStatusToRaw (Instance.runSt inst))
        (if Instance.autoBalance inst then "Y" else "N")
@@ -132,6 +134,7 @@
        (case Instance.getTotalSpindles inst of
           Nothing -> "-"
           Just x -> show x)
+       (if Instance.forthcoming inst then "Y" else "N")
 
 -- | Generate instance file data from instance objects.
 serializeInstances :: Node.List -> Instance.List -> String
@@ -278,7 +281,7 @@
                                                -- instance name and
                                                -- the instance object
 loadInst ktn [ name, mem, dsk, vcpus, status, auto_bal, pnode, snode
-             , dt, tags, su, spindles ] = do
+             , dt, tags, su, spindles, forthcoming_yn ] = do
   pidx <- lookupNode ktn name pnode
   sidx <- if null snode
             then return Node.noSecondary
@@ -298,15 +301,26 @@
   vspindles <- case spindles of
                  "-" -> return Nothing
                  _ -> liftM Just (tryRead name spindles)
+  forthcoming <- case forthcoming_yn of
+                   "Y" -> return True
+                   "N" -> return False
+                   x -> fail $ "Invalid forthcoming value '"
+                               ++ x ++ "' for instance " ++ name
   let disk = Instance.Disk dsize vspindles
   let vtags = commaSplit tags
       newinst = Instance.create name vmem dsize [disk] vvcpus vstatus vtags
-                auto_balance pidx sidx disk_template spindle_use []
+                auto_balance pidx sidx disk_template spindle_use [] forthcoming
   when (Instance.hasSecondary newinst && sidx == pidx) . fail $
     "Instance " ++ name ++ " has same primary and secondary node - " ++ pnode
   return (name, newinst)
 
 loadInst ktn [ name, mem, dsk, vcpus, status, auto_bal, pnode, snode
+             , dt, tags, su, spindles ] =
+  loadInst ktn [ name, mem, dsk, vcpus, status, auto_bal, pnode, snode
+               , dt, tags, su, spindles, "N" ] -- older versions were not
+                                               -- forthcoming
+
+loadInst ktn [ name, mem, dsk, vcpus, status, auto_bal, pnode, snode
              , dt, tags ] = loadInst ktn [ name, mem, dsk, vcpus, status,
                                            auto_bal, pnode, snode, dt, tags,
                                            "1" ]
diff --git a/src/Ganeti/HTools/Cluster.hs b/src/Ganeti/HTools/Cluster.hs
index bdf40a3..55cbb6b 100644
--- a/src/Ganeti/HTools/Cluster.hs
+++ b/src/Ganeti/HTools/Cluster.hs
@@ -62,6 +62,7 @@
   , printNodes
   , printInsts
   -- * Balacing functions
+  , setInstanceLocationScore
   , doNextBalance
   , tryBalance
   , compCV
@@ -95,8 +96,10 @@
 import Data.Maybe (fromJust, fromMaybe, isJust, isNothing)
 import Data.Ord (comparing)
 import Text.Printf (printf)
+import qualified Data.Set as Set
 
 import Ganeti.BasicTypes
+import qualified Ganeti.Constants as C
 import Ganeti.HTools.AlgorithmParams (AlgorithmOptions(..), defaultOptions)
 import qualified Ganeti.HTools.Container as Container
 import qualified Ganeti.HTools.Instance as Instance
@@ -108,7 +111,7 @@
 import qualified Ganeti.OpCodes as OpCodes
 import Ganeti.Utils
 import Ganeti.Utils.Statistics
-import Ganeti.Types (EvacMode(..), mkNonEmpty)
+import Ganeti.Types (EvacMode(..), mkNonEmpty, mkNonNegative)
 
 -- * Types
 
@@ -342,19 +345,27 @@
 -- with their statistical accumulation function and a bit to decide whether it
 -- is a statistics for online nodes.
 detailedCVInfoExt :: [((Double, String), ([Double] -> Statistics, Bool))]
-detailedCVInfoExt = [ ((1,  "free_mem_cv"), (getStdDevStatistics, True))
-                    , ((1,  "free_disk_cv"), (getStdDevStatistics, True))
+detailedCVInfoExt = [ ((0.5,  "free_mem_cv"), (getStdDevStatistics, True))
+                    , ((0.5,  "free_disk_cv"), (getStdDevStatistics, True))
                     , ((1,  "n1_cnt"), (getSumStatistics, True))
                     , ((1,  "reserved_mem_cv"), (getStdDevStatistics, True))
                     , ((4,  "offline_all_cnt"), (getSumStatistics, False))
                     , ((16, "offline_pri_cnt"), (getSumStatistics, False))
-                    , ((1,  "vcpu_ratio_cv"), (getStdDevStatistics, True))
+                    , ( (0.5,  "vcpu_ratio_cv")
+                      , (getStdDevStatistics, True))
                     , ((1,  "cpu_load_cv"), (getStdDevStatistics, True))
                     , ((1,  "mem_load_cv"), (getStdDevStatistics, True))
                     , ((1,  "disk_load_cv"), (getStdDevStatistics, True))
                     , ((1,  "net_load_cv"), (getStdDevStatistics, True))
                     , ((2,  "pri_tags_score"), (getSumStatistics, True))
-                    , ((1,  "spindles_cv"), (getStdDevStatistics, True))
+                    , ((0.5,  "spindles_cv"), (getStdDevStatistics, True))
+                    , ((0.5,  "free_mem_cv_forth"), (getStdDevStatistics, True))
+                    , ( (0.5,  "free_disk_cv_forth")
+                      , (getStdDevStatistics, True))
+                    , ( (0.5,  "vcpu_ratio_cv_forth")
+                      , (getStdDevStatistics, True))
+                    , ((0.5,  "spindles_cv_forth"), (getStdDevStatistics, True))
+                    , ((1,  "location_score"), (getSumStatistics, True))
                     ]
 
 -- | The names and weights of the individual elements in the CV list.
@@ -378,7 +389,9 @@
 compDetailedCVNode :: Node.Node -> [Double]
 compDetailedCVNode node =
   let mem = Node.pMem node
+      memF = Node.pMemForth node
       dsk = Node.pDsk node
+      dskF = Node.pDskForth node
       n1 = fromIntegral
            $ if Node.failN1 node
                then length (Node.sList node) + length (Node.pList node)
@@ -388,14 +401,19 @@
       isec = fromIntegral . length $ Node.sList node
       ioff = ipri + isec
       cpu = Node.pCpuEff node
+      cpuF = Node.pCpuEffForth node
       DynUtil c1 m1 d1 nn1 = Node.utilLoad node
       DynUtil c2 m2 d2 nn2 = Node.utilPool node
       (c_load, m_load, d_load, n_load) = (c1/c2, m1/m2, d1/d2, nn1/nn2)
       pri_tags = fromIntegral $ Node.conflictingPrimaries node
       spindles = Node.instSpindles node / Node.hiSpindles node
+      spindlesF = Node.instSpindlesForth node / Node.hiSpindles node
+      location_score = fromIntegral $ Node.locationScore node
   in [ mem, dsk, n1, res, ioff, ipri, cpu
      , c_load, m_load, d_load, n_load
      , pri_tags, spindles
+     , memF, dskF, cpuF, spindlesF
+     , location_score
      ]
 
 -- | Compute the statistics of a cluster.
@@ -452,6 +470,17 @@
 getOnline :: Node.List -> [Node.Node]
 getOnline = filter (not . Node.offline) . Container.elems
 
+-- | Sets the location score of an instance, given its primary
+-- and secondary node.
+setInstanceLocationScore :: Instance.Instance -- ^ the original instance
+                         -> Node.Node -- ^ the primary node of the instance
+                         -> Node.Node -- ^ the secondary node of the instance
+                         -> Instance.Instance -- ^ the instance with the
+                                              -- location score updated
+setInstanceLocationScore t p s =
+  t { Instance.locationScore =
+         Set.size $ Node.locationTags p `Set.intersection` Node.locationTags s }
+
 -- * Balancing functions
 
 -- | Compute best table. Note that the ordering of the arguments is important.
@@ -496,17 +525,18 @@
       tgt_n = Container.find new_pdx nl
       int_p = Node.removePri old_p inst
       int_s = Node.removeSec old_s inst
+      new_inst = Instance.setPri (setInstanceLocationScore inst tgt_n int_s)
+                 new_pdx
       force_p = Node.offline old_p || force
       new_nl = do -- OpResult
                   -- check that the current secondary can host the instance
                   -- during the migration
         Node.checkMigration old_p old_s
         Node.checkMigration old_s tgt_n
-        tmp_s <- Node.addPriEx force_p int_s inst
-        let tmp_s' = Node.removePri tmp_s inst
-        new_p <- Node.addPriEx force_p tgt_n inst
-        new_s <- Node.addSecEx force_p tmp_s' inst new_pdx
-        let new_inst = Instance.setPri inst new_pdx
+        tmp_s <- Node.addPriEx force_p int_s new_inst
+        let tmp_s' = Node.removePri tmp_s new_inst
+        new_p <- Node.addPriEx force_p tgt_n new_inst
+        new_s <- Node.addSecEx force_p tmp_s' new_inst new_pdx
         return (Container.add new_pdx new_p $
                 Container.addTwo old_pdx int_p old_sdx new_s nl,
                 new_inst, new_pdx, old_sdx)
@@ -518,13 +548,18 @@
       old_sdx = Instance.sNode inst
       old_s = Container.find old_sdx nl
       tgt_n = Container.find new_sdx nl
+      pnode = Container.find old_pdx nl
+      pnode' = Node.removePri pnode inst
       int_s = Node.removeSec old_s inst
       force_s = Node.offline old_s || force
-      new_inst = Instance.setSec inst new_sdx
-      new_nl = Node.addSecEx force_s tgt_n inst old_pdx >>=
-               \new_s -> return (Container.addTwo new_sdx
-                                 new_s old_sdx int_s nl,
-                                 new_inst, old_pdx, new_sdx)
+      new_inst = Instance.setSec (setInstanceLocationScore inst pnode tgt_n)
+                 new_sdx
+      new_nl = do
+        new_s <- Node.addSecEx force_s tgt_n new_inst old_pdx
+        pnode'' <- Node.addPriEx True pnode' new_inst
+        return (Container.add old_pdx pnode'' $
+                Container.addTwo new_sdx new_s old_sdx int_s nl,
+                new_inst, old_pdx, new_sdx)
   in new_nl
 
 -- Replace the secondary and failover (r:np, f)
@@ -533,12 +568,13 @@
       tgt_n = Container.find new_pdx nl
       int_p = Node.removePri old_p inst
       int_s = Node.removeSec old_s inst
+      new_inst = Instance.setBoth (setInstanceLocationScore inst tgt_n int_p)
+                 new_pdx old_pdx
       force_s = Node.offline old_s || force
       new_nl = do -- OpResult
         Node.checkMigration old_p tgt_n
-        new_p <- Node.addPriEx force tgt_n inst
-        new_s <- Node.addSecEx force_s int_p inst new_pdx
-        let new_inst = Instance.setBoth inst new_pdx old_pdx
+        new_p <- Node.addPriEx force tgt_n new_inst
+        new_s <- Node.addSecEx force_s int_p new_inst new_pdx
         return (Container.add new_pdx new_p $
                 Container.addTwo old_pdx new_s old_sdx int_s nl,
                 new_inst, new_pdx, old_pdx)
@@ -551,11 +587,12 @@
       int_p = Node.removePri old_p inst
       int_s = Node.removeSec old_s inst
       force_p = Node.offline old_p || force
+      new_inst = Instance.setBoth (setInstanceLocationScore inst int_s tgt_n)
+                 old_sdx new_sdx
       new_nl = do -- OpResult
         Node.checkMigration old_p old_s
-        new_p <- Node.addPriEx force_p int_s inst
-        new_s <- Node.addSecEx force_p tgt_n inst old_sdx
-        let new_inst = Instance.setBoth inst old_sdx new_sdx
+        new_p <- Node.addPriEx force_p int_s new_inst
+        new_s <- Node.addSecEx force_p tgt_n new_inst old_sdx
         return (Container.add new_sdx new_s $
                 Container.addTwo old_sdx new_p old_pdx int_p nl,
                 new_inst, old_sdx, new_sdx)
@@ -588,10 +625,11 @@
   in do
     Instance.instMatchesPolicy inst (Node.iPolicy tgt_p)
       (Node.exclStorage tgt_p)
-    new_p <- Node.addPriEx force tgt_p inst
-    new_s <- Node.addSec tgt_s inst new_pdx
-    let new_inst = Instance.setBoth inst new_pdx new_sdx
-        new_nl = Container.addTwo new_pdx new_p new_sdx new_s nl
+    let new_inst = Instance.setBoth (setInstanceLocationScore inst tgt_p tgt_s)
+                   new_pdx new_sdx
+    new_p <- Node.addPriEx force tgt_p new_inst
+    new_s <- Node.addSec tgt_s new_inst new_pdx
+    let new_nl = Container.addTwo new_pdx new_p new_sdx new_s nl
         new_stats = updateClusterStatisticsTwice stats
                       (tgt_p, new_p) (tgt_s, new_s)
     return (new_nl, new_inst, [new_p, new_s], compCVfromStats new_stats)
@@ -1689,7 +1727,7 @@
                       Bad msg -> error $ "Empty node name for idx " ++
                                  show n ++ ": " ++ msg ++ "??"
                       Ok ne -> Just ne
-      opF = OpCodes.OpInstanceMigrate
+      opF' = OpCodes.OpInstanceMigrate
               { OpCodes.opInstanceName        = iname
               , OpCodes.opInstanceUuid        = Nothing
               , OpCodes.opMigrationMode       = Nothing -- default
@@ -1704,6 +1742,20 @@
               , OpCodes.opIgnoreHvversions    = True
               }
       opFA n = opF { OpCodes.opTargetNode = lookNode n } -- not drbd
+      opFforced =
+        OpCodes.OpInstanceFailover
+          { OpCodes.opInstanceName        = iname
+          , OpCodes.opInstanceUuid        = Nothing
+          , OpCodes.opShutdownTimeout     =
+              fromJust $ mkNonNegative C.defaultShutdownTimeout
+          , OpCodes.opIgnoreConsistency = False
+          , OpCodes.opTargetNode = Nothing
+          , OpCodes.opTargetNodeUuid = Nothing
+          , OpCodes.opIgnoreIpolicy = False
+          , OpCodes.opIallocator = Nothing
+          , OpCodes.opMigrationCleanup = False
+          }
+      opF = if Instance.forthcoming inst then opFforced else opF'
       opR n = OpCodes.OpInstanceReplaceDisks
                 { OpCodes.opInstanceName     = iname
                 , OpCodes.opInstanceUuid     = Nothing
diff --git a/src/Ganeti/HTools/Instance.hs b/src/Ganeti/HTools/Instance.hs
index d58f49b..6cf062e 100644
--- a/src/Ganeti/HTools/Instance.hs
+++ b/src/Ganeti/HTools/Instance.hs
@@ -66,6 +66,7 @@
   , allNodes
   , usesLocalStorage
   , mirrorType
+  , usesMemory
   ) where
 
 import Control.Monad (liftM2)
@@ -102,8 +103,11 @@
   , spindleUse   :: Int       -- ^ The numbers of used spindles
   , allTags      :: [String]  -- ^ List of all instance tags
   , exclTags     :: [String]  -- ^ List of instance exclusion tags
+  , locationScore :: Int      -- ^ The number of common-failures between
+                              -- primary and secondary node of the instance
   , arPolicy     :: T.AutoRepairPolicy -- ^ Instance's auto-repair policy
   , nics         :: [Nic]     -- ^ NICs of the instance
+  , forthcoming  :: Bool      -- ^ Is the instance is forthcoming?
   } deriving (Show, Eq)
 
 instance T.Element Instance where
@@ -185,9 +189,9 @@
 -- later (via 'setIdx' for example).
 create :: String -> Int -> Int -> [Disk] -> Int -> T.InstanceStatus
        -> [String] -> Bool -> T.Ndx -> T.Ndx -> T.DiskTemplate -> Int
-       -> [Nic] -> Instance
+       -> [Nic] -> Bool -> Instance
 create name_init mem_init dsk_init disks_init vcpus_init run_init tags_init
-       auto_balance_init pn sn dt su nics_init =
+       auto_balance_init pn sn dt su nics_init forthcoming_init =
   Instance { name = name_init
            , alias = name_init
            , mem = mem_init
@@ -205,8 +209,10 @@
            , spindleUse = su
            , allTags = tags_init
            , exclTags = []
+           , locationScore = 0
            , arPolicy = T.ArNotEnabled
            , nics = nics_init
+           , forthcoming = forthcoming_init
            }
 
 -- | Changes the index.
@@ -384,3 +390,21 @@
 -- | A simple wrapper over 'T.templateMirrorType'.
 mirrorType :: Instance -> T.MirrorType
 mirrorType = T.templateMirrorType . diskTemplate
+
+
+-- | Whether the instance uses memory on its host node.
+-- Depends on the `InstanceStatus` and on whether the instance is forthcoming;
+-- instances that aren't running or existent don't use memory.
+usesMemory :: Instance -> Bool
+usesMemory inst
+  | forthcoming inst = False
+  | otherwise        = case runSt inst of
+      T.StatusDown    -> False
+      T.StatusOffline -> False
+      T.ErrorDown     -> False
+      T.ErrorUp       -> True
+      T.NodeDown      -> True -- value has little meaning when node is down
+      T.NodeOffline   -> True -- value has little meaning when node is offline
+      T.Running       -> True
+      T.UserDown      -> False
+      T.WrongNode     -> True
diff --git a/src/Ganeti/HTools/Loader.hs b/src/Ganeti/HTools/Loader.hs
index b588547..10df2c5 100644
--- a/src/Ganeti/HTools/Loader.hs
+++ b/src/Ganeti/HTools/Loader.hs
@@ -139,7 +139,7 @@
           . zip [0..] $ name_element
   in (M.fromList name_idx, Container.fromList idx_element)
 
--- | Given am indexed node list, and the name of the master, mark it as such. 
+-- | Given am indexed node list, and the name of the master, mark it as such.
 setMaster :: (Monad m) => NameAssoc -> Node.List -> String -> m Node.List
 setMaster node_names node_idx master = do
   kmaster <- maybe (fail $ "Master node " ++ master ++ " unknown") return $
@@ -147,6 +147,14 @@
   let mnode = Container.find kmaster node_idx
   return $ Container.add kmaster (Node.setMaster mnode True) node_idx
 
+-- | Given the nodes with the location tags already set correctly, compute
+-- the location score for an instance.
+setLocationScore :: Node.List -> Instance.Instance -> Instance.Instance
+setLocationScore nl inst =
+  let pnode = Container.find (Instance.pNode inst) nl
+      snode = Container.find (Instance.sNode inst) nl
+  in Cluster.setInstanceLocationScore inst pnode snode
+
 -- | For each instance, add its index to its primary and secondary nodes.
 fixNodes :: Node.List
          -> Instance.Instance
@@ -269,6 +277,14 @@
       rmigTags = Tags.getRecvMigRestrictions ctags ntags
   in Node.setRecvMigrationTags (Node.setMigrationTags node migTags) rmigTags
 
+-- | Set the location tags on a node given the cluster tags;
+-- this assumes that the node tags are already set on that node.
+addLocationTags :: [String] -- ^ cluster tags
+                -> Node.Node -> Node.Node
+addLocationTags ctags node =
+  let ntags = Node.nTags node
+  in Node.setLocationTags node $ Tags.getLocations ctags ntags
+
 -- | Initializer function that loads the data from a node and instance
 -- list and massages it into the correct format.
 mergeData :: [(String, DynUtil)]  -- ^ Instance utilisation data
@@ -299,14 +315,16 @@
       il4 = Container.map (computeAlias common_suffix .
                            updateExclTags allextags .
                            updateMovable selinst_names exinst_names) il3
-      nl2 = foldl' fixNodes nl (Container.elems il4)
-      nl3 = Container.map (setNodePolicy gl .
+      nl2 = Container.map (addLocationTags ctags) nl
+      il5 = Container.map (setLocationScore nl2) il4
+      nl3 = foldl' fixNodes nl2 (Container.elems il5)
+      nl4 = Container.map (setNodePolicy gl .
                            computeAlias common_suffix .
-                           (`Node.buildPeers` il4)) nl2
-      il5 = Container.map (disableSplitMoves nl3) il4
-      nl4 = Container.map (addMigrationTags ctags) nl3
+                           (`Node.buildPeers` il4)) nl3
+      il6 = Container.map (disableSplitMoves nl3) il5
+      nl5 = Container.map (addMigrationTags ctags) nl4
   in if' (null lkp_unknown)
-         (Ok cdata { cdNodes = nl4, cdInstances = il5 })
+         (Ok cdata { cdNodes = nl5, cdInstances = il6 })
          (Bad $ "Unknown instance(s): " ++ show(map lrContent lkp_unknown))
 
 -- | In a cluster description, clear dynamic utilisation information.
@@ -322,19 +340,14 @@
     Container.mapAccum
         (\ msgs node ->
              let nname = Node.name node
-                 nilst = map (`Container.find` il) (Node.pList node)
-                 dilst = filter Instance.instanceDown nilst
-                 adj_mem = sum . map Instance.mem $ dilst
                  delta_mem = truncate (Node.tMem node)
                              - Node.nMem node
                              - Node.fMem node
                              - nodeImem node il
-                             + adj_mem
                  delta_dsk = truncate (Node.tDsk node)
                              - Node.fDsk node
                              - nodeIdsk node il
-                 newn = Node.setFmem (Node.setXmem node delta_mem)
-                        (Node.fMem node - adj_mem)
+                 newn = node `Node.setXmem` delta_mem
                  umsg1 =
                    if delta_mem > 512 || delta_dsk > 1024
                       then printf "node %s is missing %d MB ram \
@@ -349,7 +362,7 @@
 nodeImem node il =
   let rfind = flip Container.find il
       il' = map rfind $ Node.pList node
-      oil' = filter Instance.notOffline il'
+      oil' = filter Instance.usesMemory il'
   in sum . map Instance.mem $ oil'
 
 
diff --git a/src/Ganeti/HTools/Node.hs b/src/Ganeti/HTools/Node.hs
index cb17f24..f47a45f 100644
--- a/src/Ganeti/HTools/Node.hs
+++ b/src/Ganeti/HTools/Node.hs
@@ -38,6 +38,7 @@
   ( Node(..)
   , List
   , pCpuEff
+  , pCpuEffForth
   -- * Constructor
   , create
   -- ** Finalization after data loading
@@ -46,8 +47,8 @@
   , setAlias
   , setOffline
   , setXmem
-  , setFmem
   , setPri
+  , calcFmemOfflineOrForthcoming
   , setSec
   , setMaster
   , setNodeTags
@@ -57,6 +58,7 @@
   , setCpuSpeed
   , setMigrationTags
   , setRecvMigrationTags
+  , setLocationTags
   -- * Tag maps
   , addTags
   , delTags
@@ -113,7 +115,9 @@
 import qualified Ganeti.Constants as C
 import qualified Ganeti.OpCodes as OpCodes
 import Ganeti.Types (OobCommand(..), TagKind(..), mkNonEmpty)
+import Ganeti.HTools.Container (Container)
 import qualified Ganeti.HTools.Container as Container
+import Ganeti.HTools.Instance (Instance)
 import qualified Ganeti.HTools.Instance as Instance
 import qualified Ganeti.HTools.PeerMap as P
 
@@ -132,27 +136,51 @@
   , tMem     :: Double    -- ^ Total memory (MiB)
   , nMem     :: Int       -- ^ Node memory (MiB)
   , fMem     :: Int       -- ^ Free memory (MiB)
+  , fMemForth :: Int      -- ^ Free memory (MiB) including forthcoming
+                          --   instances
   , xMem     :: Int       -- ^ Unaccounted memory (MiB)
   , tDsk     :: Double    -- ^ Total disk space (MiB)
   , fDsk     :: Int       -- ^ Free disk space (MiB)
+  , fDskForth :: Int      -- ^ Free disk space (MiB) including forthcoming
+                          --   instances
   , tCpu     :: Double    -- ^ Total CPU count
-  , tCpuSpeed :: Double    -- ^ Relative CPU speed
+  , tCpuSpeed :: Double   -- ^ Relative CPU speed
   , nCpu     :: Int       -- ^ VCPUs used by the node OS
   , uCpu     :: Int       -- ^ Used VCPU count
+  , uCpuForth :: Int      -- ^ Used VCPU count including forthcoming instances
   , tSpindles :: Int      -- ^ Node spindles (spindle_count node parameter,
                           -- or actual spindles, see note below)
   , fSpindles :: Int      -- ^ Free spindles (see note below)
+  , fSpindlesForth :: Int -- ^ Free spindles (see note below) including
+                          --   forthcoming instances
   , pList    :: [T.Idx]   -- ^ List of primary instance indices
+  , pListForth :: [T.Idx] -- ^ List of primary instance indices including
+                          --   forthcoming instances
   , sList    :: [T.Idx]   -- ^ List of secondary instance indices
+  , sListForth :: [T.Idx] -- ^ List of secondary instance indices including
+                          --   forthcoming instances
   , idx      :: T.Ndx     -- ^ Internal index for book-keeping
   , peers    :: P.PeerMap -- ^ Pnode to instance mapping
   , failN1   :: Bool      -- ^ Whether the node has failed n1
+  , failN1Forth :: Bool   -- ^ Whether the node has failed n1, including
+                          --   forthcoming instances
   , rMem     :: Int       -- ^ Maximum memory needed for failover by
                           -- primaries of this node
+  , rMemForth :: Int      -- ^ Maximum memory needed for failover by
+                          --   primaries of this node, including forthcoming
+                          --   instances
   , pMem     :: Double    -- ^ Percent of free memory
+  , pMemForth :: Double   -- ^ Percent of free memory including forthcoming
+                          --   instances
   , pDsk     :: Double    -- ^ Percent of free disk
+  , pDskForth :: Double   -- ^ Percent of free disk including forthcoming
+                          --   instances
   , pRem     :: Double    -- ^ Percent of reserved memory
+  , pRemForth :: Double   -- ^ Percent of reserved memory including
+                          --   forthcoming instances
   , pCpu     :: Double    -- ^ Ratio of virtual to physical CPUs
+  , pCpuForth :: Double   -- ^ Ratio of virtual to physical CPUs including
+                          --   forthcoming instances
   , mDsk     :: Double    -- ^ Minimum free disk ratio
   , loDsk    :: Int       -- ^ Autocomputed from mDsk low disk
                           -- threshold
@@ -161,6 +189,8 @@
   , hiSpindles :: Double  -- ^ Limit auto-computed from policy spindle_ratio
                           -- and the node spindle count (see note below)
   , instSpindles :: Double -- ^ Spindles used by instances (see note below)
+  , instSpindlesForth :: Double -- ^ Spindles used by instances (see note
+                                --   below) including forthcoming instances
   , offline  :: Bool      -- ^ Whether the node should not be used for
                           -- allocations and skipped from score
                           -- computations
@@ -168,12 +198,18 @@
   , nTags    :: [String]  -- ^ The node tags for this node
   , utilPool :: T.DynUtil -- ^ Total utilisation capacity
   , utilLoad :: T.DynUtil -- ^ Sum of instance utilisation
-  , pTags    :: TagMap    -- ^ Primary instance exclusion tags and their count
+  , utilLoadForth :: T.DynUtil -- ^ Sum of instance utilisation, including
+                               --   forthcoming instances
+  , pTags    :: TagMap    -- ^ Primary instance exclusion tags and their
+                          --   count, including forthcoming instances
   , group    :: T.Gdx     -- ^ The node's group (index)
   , iPolicy  :: T.IPolicy -- ^ The instance policy (of the node's group)
   , exclStorage :: Bool   -- ^ Effective value of exclusive_storage
   , migTags  :: Set.Set String -- ^ migration-relevant tags
   , rmigTags :: Set.Set String -- ^ migration tags able to receive
+  , locationTags :: Set.Set String -- ^ common-failure domains the node belongs
+                                   -- to
+  , locationScore :: Int
   } deriving (Show, Eq)
 {- A note on how we handle spindles
 
@@ -193,11 +229,16 @@
   setIdx = setIdx
   allNames n = [name n, alias n]
 
--- | Derived parameter: ratio of virutal to pysical CPUs, weighted
+-- | Derived parameter: ratio of virutal to physical CPUs, weighted
 -- by CPU speed.
 pCpuEff :: Node -> Double
 pCpuEff n = pCpu n / tCpuSpeed n
 
+-- | Derived parameter: ratio of virutal to physical CPUs, weighted
+-- by CPU speed and taking forthcoming instances into account.
+pCpuEffForth :: Node -> Double
+pCpuEffForth n = pCpuForth n / tCpuSpeed n
+
 -- | A simple name for the int, node association list.
 type AssocList = [(T.Ndx, Node)]
 
@@ -280,26 +321,41 @@
        , tMem = mem_t_init
        , nMem = mem_n_init
        , fMem = mem_f_init
+       , fMemForth = mem_f_init
        , tDsk = dsk_t_init
        , fDsk = dsk_f_init
+       , fDskForth = dsk_f_init
        , tCpu = cpu_t_init
        , tCpuSpeed = 1
        , nCpu = cpu_n_init
        , uCpu = cpu_n_init
+       , uCpuForth = cpu_n_init
        , tSpindles = spindles_t_init
        , fSpindles = spindles_f_init
+       , fSpindlesForth = spindles_f_init
        , pList = []
+       , pListForth = []
        , sList = []
+       , sListForth = []
        , failN1 = True
+       , failN1Forth = True
        , idx = -1
        , peers = P.empty
        , rMem = 0
+       , rMemForth = 0
        , pMem = fromIntegral mem_f_init / mem_t_init
+       , pMemForth = fromIntegral mem_f_init / mem_t_init
        , pDsk = if excl_stor
                 then computePDsk spindles_f_init $ fromIntegral spindles_t_init
                 else computePDsk dsk_f_init dsk_t_init
+       , pDskForth =
+           if excl_stor
+             then computePDsk spindles_f_init $ fromIntegral spindles_t_init
+             else computePDsk dsk_f_init dsk_t_init
        , pRem = 0
+       , pRemForth = 0
        , pCpu = fromIntegral cpu_n_init / cpu_t_init
+       , pCpuForth = fromIntegral cpu_n_init / cpu_t_init
        , offline = offline_init
        , isMaster = False
        , nTags = []
@@ -310,14 +366,18 @@
        , hiSpindles = computeHiSpindles (T.iPolicySpindleRatio T.defIPolicy)
                       spindles_t_init
        , instSpindles = 0
+       , instSpindlesForth = 0
        , utilPool = T.baseUtil
        , utilLoad = T.zeroUtil
+       , utilLoadForth = T.zeroUtil
        , pTags = Map.empty
        , group = group_init
        , iPolicy = T.defIPolicy
        , exclStorage = excl_stor
        , migTags = Set.empty
        , rmigTags = Set.empty
+       , locationTags = Set.empty
+       , locationScore = 0
        }
 
 -- | Conversion formula from mDsk\/tDsk to loDsk.
@@ -364,6 +424,10 @@
 setRecvMigrationTags :: Node -> Set.Set String -> Node
 setRecvMigrationTags t val = t { rmigTags = val }
 
+-- | Set the location tags
+setLocationTags :: Node -> Set.Set String -> Node
+setLocationTags t val = t { locationTags = val }
+
 -- | Sets the unnaccounted memory.
 setXmem :: Node -> Int -> Node
 setXmem t val = t { xMem = val }
@@ -398,15 +462,27 @@
   let mdata = map
               (\i_idx -> let inst = Container.find i_idx il
                              mem = if Instance.usesSecMem inst
+                                      -- TODO Use usesMemory here, or change
+                                      --      usesSecMem to return False on
+                                      --      forthcoming instances?
+                                      && not (Instance.forthcoming inst)
                                      then Instance.mem inst
                                      else 0
                          in (Instance.pNode inst, mem))
               (sList t)
       pmap = P.accumArray (+) mdata
       new_rmem = computeMaxRes pmap
-      new_failN1 = fMem t <= new_rmem
+      new_failN1 = fMem t < new_rmem
       new_prem = fromIntegral new_rmem / tMem t
-  in t {peers=pmap, failN1 = new_failN1, rMem = new_rmem, pRem = new_prem}
+  in t { peers = pmap
+       , failN1 = new_failN1
+       , rMem = new_rmem
+       , pRem = new_prem
+
+       -- TODO Set failN1Forth, rMemForth, pRemForth and peersForth.
+       --      Calculate it from an mdata_forth here that doesn't have the
+       --      `not (Instance.forthcoming inst)` filter.
+       }
 
 -- | Calculate the new spindle usage
 calcSpindleUse ::
@@ -420,6 +496,17 @@
       f :: Bool -> Double -> Double -> Double -- avoid monomorphism restriction
       f = if act then incIf else decIf
 
+-- | Calculate the new spindle usage including forthcoming instances.
+calcSpindleUseForth :: Bool -- Action: True = adding instance, False = removing
+                    -> Node -> Instance.Instance -> Double
+calcSpindleUseForth _ (Node {exclStorage = True}) _ = 0.0
+calcSpindleUseForth act n@(Node {exclStorage = False}) i =
+  f (Instance.usesLocalStorage i) (instSpindlesForth n)
+    (fromIntegral $ Instance.spindleUse i)
+    where
+      f :: Bool -> Double -> Double -> Double -- avoid monomorphism restriction
+      f = if act then incIf else decIf
+
 -- | Calculate the new number of free spindles
 calcNewFreeSpindles ::
                        Bool -- Action: True = adding instance, False = removing
@@ -432,27 +519,145 @@
                else fSpindles n -- No change, as we aren't sure
     Just s -> (if act then (-) else (+)) (fSpindles n) s
 
+-- | Calculate the new number of free spindles including forthcoming instances
+calcNewFreeSpindlesForth :: Bool -- Action: True = adding instance,
+                                 --         False = removing
+                         -> Node -> Instance.Instance -> Int
+calcNewFreeSpindlesForth _ (Node {exclStorage = False}) _ = 0
+calcNewFreeSpindlesForth act n@(Node {exclStorage = True}) i =
+  case Instance.getTotalSpindles i of
+    Nothing -> if act
+               then -1 -- Force a spindle error, so the instance don't go here
+               else fSpindlesForth n -- No change, as we aren't sure
+    Just s -> (if act then (-) else (+)) (fSpindlesForth n) s
+
+
+calcFmemOfflineOrForthcoming :: Node -> Container Instance -> Int
+calcFmemOfflineOrForthcoming node allInstances =
+  let nodeInstances = map (`Container.find` allInstances) (pList node)
+  in sum . map Instance.mem
+         . filter (not . Instance.usesMemory)
+         $ nodeInstances
+
 -- | Assigns an instance to a node as primary and update the used VCPU
 -- count, utilisation data and tags map.
 setPri :: Node -> Instance.Instance -> Node
-setPri t inst = t { pList = Instance.idx inst:pList t
-                  , uCpu = new_count
-                  , pCpu = fromIntegral new_count / tCpu t
-                  , utilLoad = utilLoad t `T.addUtil` Instance.util inst
-                  , pTags = addTags (pTags t) (Instance.exclTags inst)
-                  , instSpindles = calcSpindleUse True t inst
-                  }
-  where new_count = Instance.applyIfOnline inst (+ Instance.vcpus inst)
-                    (uCpu t )
+setPri t inst
+  -- Real instance, update real fields and forthcoming fields.
+  | not (Instance.forthcoming inst) =
+      updateForthcomingFields $
+        t { pList = Instance.idx inst:pList t
+          , uCpu = new_count
+          , pCpu = fromIntegral new_count / tCpu t
+          , utilLoad = utilLoad t `T.addUtil` Instance.util inst
+          , instSpindles = calcSpindleUse True t inst
+          , locationScore = locationScore t + Instance.locationScore inst
+          }
+
+  -- Forthcoming instance, update forthcoming fields only.
+  | otherwise = updateForthcomingOnlyFields $ updateForthcomingFields t
+
+  where
+    new_count = Instance.applyIfOnline inst (+ Instance.vcpus inst) (uCpu t)
+    new_count_forth = Instance.applyIfOnline inst (+ Instance.vcpus inst)
+                                             (uCpuForth t)
+
+    uses_disk = Instance.usesLocalStorage inst
+
+    -- Updates the *Forth fields that include real and forthcoming instances.
+    updateForthcomingFields node =
+
+      let new_fMemForth = decIf (not $ Instance.usesMemory inst)
+                                (fMemForth node)
+                                (Instance.mem inst)
+
+          new_pMemForth = fromIntegral new_fMemForth / tMem node
+
+      in node
+           { pTags = addTags (pTags node) (Instance.exclTags inst)
+
+           , pListForth = Instance.idx inst:pListForth node
+           , uCpuForth = new_count_forth
+           , pCpuForth = fromIntegral new_count_forth / tCpu node
+           , utilLoadForth = utilLoadForth node `T.addUtil` Instance.util inst
+
+           , fMemForth = new_fMemForth
+           , pMemForth = new_pMemForth
+
+           -- TODO Should this be in updateForthcomingOnlyFields?
+           , instSpindlesForth = calcSpindleUseForth True node inst
+
+           -- TODO Set failN1Forth, rMemForth, pRemForth
+           }
+
+    -- This updates the fields that we do not want to update if the instance
+    -- is real (not forthcoming), in contrast to `updateForthcomingFields`
+    -- which deals with the fields that have to be updated in either case.
+    updateForthcomingOnlyFields node =
+
+      let new_fDskForth = decIf uses_disk
+                                (fDskForth node)
+                                (Instance.dsk inst)
+
+          new_free_sp_forth = calcNewFreeSpindlesForth True node inst
+          new_pDskForth = computeNewPDsk node new_free_sp_forth new_fDskForth
+
+      in node
+           { fDskForth = new_fDskForth
+           , pDskForth = new_pDskForth
+           , fSpindlesForth = new_free_sp_forth
+           }
+
 
 -- | Assigns an instance to a node as secondary and updates disk utilisation.
 setSec :: Node -> Instance.Instance -> Node
-setSec t inst = t { sList = Instance.idx inst:sList t
-                  , utilLoad = old_load { T.dskWeight = T.dskWeight old_load +
-                                          T.dskWeight (Instance.util inst) }
-                  , instSpindles = calcSpindleUse True t inst
-                  }
-  where old_load = utilLoad t
+setSec t inst
+  -- Real instance, update real fields and forthcoming fields.
+  | not (Instance.forthcoming inst) =
+      updateForthcomingFields $
+        t { sList = Instance.idx inst:sList t
+          , utilLoad = old_load { T.dskWeight = T.dskWeight old_load +
+                                  T.dskWeight (Instance.util inst) }
+          , instSpindles = calcSpindleUse True t inst
+          }
+
+  -- Forthcoming instance, update forthcoming fields only.
+  | otherwise = updateForthcomingOnlyFields $ updateForthcomingFields t
+
+  where
+    old_load = utilLoad t
+    uses_disk = Instance.usesLocalStorage inst
+
+    -- Updates the *Forth fields that include real and forthcoming instances.
+    updateForthcomingFields node =
+
+      let old_load_forth = utilLoadForth node
+      in node
+           { sListForth = Instance.idx inst:sListForth node
+           , utilLoadForth = old_load_forth
+                               { T.dskWeight = T.dskWeight old_load_forth +
+                                               T.dskWeight (Instance.util inst)
+                               }
+
+           -- TODO Should this be in updateForthcomingOnlyFields?
+           , instSpindlesForth = calcSpindleUseForth True node inst
+
+           -- TODO Set failN1Forth, rMemForth, pRemForth and peersForth
+           }
+
+    updateForthcomingOnlyFields node =
+
+      let new_fDskForth = decIf uses_disk
+                                (fDskForth node)
+                                (Instance.dsk inst)
+          new_free_sp_forth = calcNewFreeSpindlesForth True node inst
+          new_pDskForth = computeNewPDsk node new_free_sp_forth new_fDskForth
+      in node
+           { fDskForth = new_fDskForth
+           , pDskForth = new_pDskForth
+           , fSpindlesForth = new_free_sp_forth
+           }
+
 
 -- | Computes the new 'pDsk' value, handling nodes without local disk
 -- storage (we consider all their disk unused).
@@ -484,69 +689,131 @@
 setCpuSpeed :: Node -> Double -> Node
 setCpuSpeed n f = n { tCpuSpeed = f }
 
--- | Sets the free memory.
-setFmem :: Node -> Int -> Node
-setFmem t new_mem =
-  let new_n1 = new_mem < rMem t
-      new_mp = fromIntegral new_mem / tMem t
-  in t { fMem = new_mem, failN1 = new_n1, pMem = new_mp }
-
 -- | Removes a primary instance.
 removePri :: Node -> Instance.Instance -> Node
 removePri t inst =
   let iname = Instance.idx inst
+      forthcoming = Instance.forthcoming inst
       i_online = Instance.notOffline inst
       uses_disk = Instance.usesLocalStorage inst
-      new_plist = delete iname (pList t)
-      new_mem = incIf i_online (fMem t) (Instance.mem inst)
-      new_dsk = incIf uses_disk (fDsk t) (Instance.dsk inst)
-      new_free_sp = calcNewFreeSpindles False t inst
-      new_inst_sp = calcSpindleUse False t inst
-      new_mp = fromIntegral new_mem / tMem t
-      new_dp = computeNewPDsk t new_free_sp new_dsk
-      new_failn1 = new_mem <= rMem t
-      new_ucpu = decIf i_online (uCpu t) (Instance.vcpus inst)
-      new_rcpu = fromIntegral new_ucpu / tCpu t
-      new_load = utilLoad t `T.subUtil` Instance.util inst
-  in t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
-       , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
-       , uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load
-       , pTags = delTags (pTags t) (Instance.exclTags inst)
-       , instSpindles = new_inst_sp, fSpindles = new_free_sp
-       }
+
+      updateForthcomingFields n =
+        let
+            new_plist_forth = delete iname (pListForth n)
+            new_mem_forth = fMemForth n + Instance.mem inst
+            new_dsk_forth = incIf uses_disk (fDskForth n) (Instance.dsk inst)
+            new_free_sp_forth = calcNewFreeSpindlesForth False n inst
+            new_inst_sp_forth = calcSpindleUseForth False n inst
+            new_mp_forth = fromIntegral new_mem_forth / tMem n
+            new_dp_forth = computeNewPDsk n new_free_sp_forth new_dsk_forth
+            new_ucpu_forth = decIf i_online (uCpuForth n) (Instance.vcpus inst)
+            new_rcpu_forth = fromIntegral new_ucpu_forth / tCpu n
+            new_load_forth = utilLoadForth n `T.subUtil` Instance.util inst
+
+        in n { pTags = delTags (pTags t) (Instance.exclTags inst)
+
+             , pListForth = new_plist_forth
+             , fMemForth = new_mem_forth
+             , fDskForth = new_dsk_forth
+             , pMemForth = new_mp_forth
+             , pDskForth = new_dp_forth
+             , uCpuForth = new_ucpu_forth
+             , pCpuForth = new_rcpu_forth
+             , utilLoadForth = new_load_forth
+             , instSpindlesForth = new_inst_sp_forth
+             , fSpindlesForth = new_free_sp_forth
+
+             -- TODO Set failN1Forth, rMemForth, pRemForth
+             }
+
+  in if forthcoming
+       then updateForthcomingFields t
+       else let
+                new_plist = delete iname (pList t)
+                new_mem = incIf (Instance.usesMemory inst) (fMem t)
+                                (Instance.mem inst)
+                new_dsk = incIf uses_disk (fDsk t) (Instance.dsk inst)
+                new_free_sp = calcNewFreeSpindles False t inst
+                new_inst_sp = calcSpindleUse False t inst
+                new_mp = fromIntegral new_mem / tMem t
+                new_dp = computeNewPDsk t new_free_sp new_dsk
+                new_failn1 = new_mem <= rMem t
+                new_ucpu = decIf i_online (uCpu t) (Instance.vcpus inst)
+                new_rcpu = fromIntegral new_ucpu / tCpu t
+                new_load = utilLoad t `T.subUtil` Instance.util inst
+
+            in updateForthcomingFields $
+                 t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
+                   , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
+                   , uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load
+                   , instSpindles = new_inst_sp, fSpindles = new_free_sp
+                   , locationScore = locationScore t
+                                     - Instance.locationScore inst
+                   }
 
 -- | Removes a secondary instance.
 removeSec :: Node -> Instance.Instance -> Node
 removeSec t inst =
   let iname = Instance.idx inst
+      forthcoming = Instance.forthcoming inst
       uses_disk = Instance.usesLocalStorage inst
       cur_dsk = fDsk t
       pnode = Instance.pNode inst
-      new_slist = delete iname (sList t)
-      new_dsk = incIf uses_disk cur_dsk (Instance.dsk inst)
-      new_free_sp = calcNewFreeSpindles False t inst
-      new_inst_sp = calcSpindleUse False t inst
-      old_peers = peers t
-      old_peem = P.find pnode old_peers
-      new_peem = decIf (Instance.usesSecMem inst) old_peem (Instance.mem inst)
-      new_peers = if new_peem > 0
-                    then P.add pnode new_peem old_peers
-                    else P.remove pnode old_peers
-      old_rmem = rMem t
-      new_rmem = if old_peem < old_rmem
-                   then old_rmem
-                   else computeMaxRes new_peers
-      new_prem = fromIntegral new_rmem / tMem t
-      new_failn1 = fMem t <= new_rmem
-      new_dp = computeNewPDsk t new_free_sp new_dsk
-      old_load = utilLoad t
-      new_load = old_load { T.dskWeight = T.dskWeight old_load -
-                                          T.dskWeight (Instance.util inst) }
-  in t { sList = new_slist, fDsk = new_dsk, peers = new_peers
-       , failN1 = new_failn1, rMem = new_rmem, pDsk = new_dp
-       , pRem = new_prem, utilLoad = new_load
-       , instSpindles = new_inst_sp, fSpindles = new_free_sp
-       }
+
+      updateForthcomingFields n =
+        let
+            new_slist_forth = delete iname (sListForth n)
+            new_dsk_forth = incIf uses_disk (fDskForth n) (Instance.dsk inst)
+            new_free_sp_forth = calcNewFreeSpindlesForth False n inst
+            new_inst_sp_forth = calcSpindleUseForth False n inst
+            new_dp_forth = computeNewPDsk n new_free_sp_forth new_dsk_forth
+            old_load_forth = utilLoadForth n
+            new_load_forth = old_load_forth
+                               { T.dskWeight = T.dskWeight old_load_forth -
+                                               T.dskWeight (Instance.util inst)
+                               }
+        in n { sListForth = new_slist_forth
+             , fDskForth = new_dsk_forth
+             , pDskForth = new_dp_forth
+             , utilLoadForth = new_load_forth
+             , instSpindlesForth = new_inst_sp_forth
+             , fSpindlesForth = new_free_sp_forth
+
+             -- TODO Set failN1Forth, rMemForth, pRemForth
+             }
+
+  in if forthcoming
+       then updateForthcomingFields t
+       else let
+                new_slist = delete iname (sList t)
+                new_dsk = incIf uses_disk cur_dsk (Instance.dsk inst)
+                new_free_sp = calcNewFreeSpindles False t inst
+                new_inst_sp = calcSpindleUse False t inst
+                old_peers = peers t
+                old_peem = P.find pnode old_peers
+                new_peem = decIf (Instance.usesSecMem inst) old_peem
+                                 (Instance.mem inst)
+                new_peers = if new_peem > 0
+                              then P.add pnode new_peem old_peers
+                              else P.remove pnode old_peers
+                old_rmem = rMem t
+                new_rmem = if old_peem < old_rmem
+                             then old_rmem
+                             else computeMaxRes new_peers
+                new_prem = fromIntegral new_rmem / tMem t
+                new_failn1 = fMem t <= new_rmem
+                new_dp = computeNewPDsk t new_free_sp new_dsk
+                old_load = utilLoad t
+                new_load = old_load
+                  { T.dskWeight = T.dskWeight old_load
+                                    - T.dskWeight (Instance.util inst)
+                  }
+            in updateForthcomingFields $
+                 t { sList = new_slist, fDsk = new_dsk, peers = new_peers
+                   , failN1 = new_failn1, rMem = new_rmem, pDsk = new_dp
+                   , pRem = new_prem, utilLoad = new_load
+                   , instSpindles = new_inst_sp, fSpindles = new_free_sp
+                   }
 
 -- | Adds a primary instance (basic version).
 addPri :: Node -> Instance.Instance -> T.OpResult Node
@@ -555,8 +822,9 @@
 -- | Adds a primary instance (extended version).
 addPriEx :: Bool               -- ^ Whether to override the N+1 and
                                -- other /soft/ checks, useful if we
-                               -- come from a worse status
-                               -- (e.g. offline)
+                               -- come from a worse status (e.g. offline).
+                               -- If this is True, forthcoming instances
+                               -- may exceed available Node resources.
          -> Node               -- ^ The target node
          -> Instance.Instance  -- ^ The instance to add
          -> T.OpResult Node    -- ^ The result of the operation,
@@ -564,43 +832,104 @@
                                -- or a failure mode
 addPriEx force t inst =
   let iname = Instance.idx inst
+      forthcoming = Instance.forthcoming inst
       i_online = Instance.notOffline inst
       uses_disk = Instance.usesLocalStorage inst
-      cur_dsk = fDsk t
-      new_mem = decIf i_online (fMem t) (Instance.mem inst)
-      new_dsk = decIf uses_disk cur_dsk (Instance.dsk inst)
-      new_free_sp = calcNewFreeSpindles True t inst
-      new_inst_sp = calcSpindleUse True t inst
-      new_failn1 = new_mem <= rMem t
-      new_ucpu = incIf i_online (uCpu t) (Instance.vcpus inst)
-      new_pcpu = fromIntegral new_ucpu / tCpu t
-      new_dp = computeNewPDsk t new_free_sp new_dsk
       l_cpu = T.iPolicyVcpuRatio $ iPolicy t
-      new_load = utilLoad t `T.addUtil` Instance.util inst
-      inst_tags = Instance.exclTags inst
       old_tags = pTags t
       strict = not force
-  in case () of
-       _ | new_mem <= 0 -> Bad T.FailMem
-         | uses_disk && new_dsk <= 0 -> Bad T.FailDisk
-         | uses_disk && new_dsk < loDsk t && strict -> Bad T.FailDisk
-         | uses_disk && exclStorage t && new_free_sp < 0 -> Bad T.FailSpindles
-         | uses_disk && new_inst_sp > hiSpindles t && strict -> Bad T.FailDisk
-         | new_failn1 && not (failN1 t) && strict -> Bad T.FailMem
-         | l_cpu >= 0 && l_cpu < new_pcpu && strict -> Bad T.FailCPU
-         | strict && rejectAddTags old_tags inst_tags -> Bad T.FailTags
-         | otherwise ->
-           let new_plist = iname:pList t
+
+      inst_tags = Instance.exclTags inst
+
+      new_mem_forth = fMemForth t - Instance.mem inst
+      new_mp_forth = fromIntegral new_mem_forth / tMem t
+      new_dsk_forth = decIf uses_disk (fDskForth t) (Instance.dsk inst)
+      new_free_sp_forth = calcNewFreeSpindlesForth True t inst
+      new_inst_sp_forth = calcSpindleUseForth True t inst
+      new_ucpu_forth = incIf i_online (uCpuForth t) (Instance.vcpus inst)
+      new_pcpu_forth = fromIntegral new_ucpu_forth / tCpu t
+      new_dp_forth = computeNewPDsk t new_free_sp_forth new_dsk_forth
+      new_load_forth = utilLoadForth t `T.addUtil` Instance.util inst
+      new_plist_forth = iname:pListForth t
+
+      updateForthcomingFields n =
+        n { pTags = addTags old_tags inst_tags
+
+          , pListForth = new_plist_forth
+          , fMemForth = new_mem_forth
+          , fDskForth = new_dsk_forth
+          , pMemForth = new_mp_forth
+          , pDskForth = new_dp_forth
+          , uCpuForth = new_ucpu_forth
+          , pCpuForth = new_pcpu_forth
+          , utilLoadForth = new_load_forth
+          , instSpindlesForth = new_inst_sp_forth
+          , fSpindlesForth = new_free_sp_forth
+
+          -- TODO Set failN1Forth, rMemForth, pRemForth
+          }
+
+      checkForthcomingViolation
+        | new_mem_forth <= 0                            = Bad T.FailMem
+        | uses_disk && new_dsk_forth <= 0               = Bad T.FailDisk
+        | uses_disk && new_dsk_forth < loDsk t          = Bad T.FailDisk
+        | uses_disk && exclStorage t
+                    && new_free_sp_forth < 0            = Bad T.FailSpindles
+        | uses_disk && new_inst_sp_forth > hiSpindles t = Bad T.FailDisk
+        -- TODO Check failN1 including forthcoming instances
+        | l_cpu >= 0 && l_cpu < new_pcpu_forth          = Bad T.FailCPU
+        | otherwise                                     = Ok ()
+
+  in
+    if forthcoming
+      then case strict of
+             True | Bad err <- checkForthcomingViolation -> Bad err
+
+             _ -> Ok $ updateForthcomingFields t
+
+      else let
+               new_mem = decIf (Instance.usesMemory inst) (fMem t)
+                               (Instance.mem inst)
+               new_dsk = decIf uses_disk (fDsk t) (Instance.dsk inst)
+               new_free_sp = calcNewFreeSpindles True t inst
+               new_inst_sp = calcSpindleUse True t inst
+               new_failn1 = new_mem <= rMem t
+               new_ucpu = incIf i_online (uCpu t) (Instance.vcpus inst)
+               new_pcpu = fromIntegral new_ucpu / tCpu t
+               new_dp = computeNewPDsk t new_free_sp new_dsk
+               new_load = utilLoad t `T.addUtil` Instance.util inst
+
+               new_plist = iname:pList t
                new_mp = fromIntegral new_mem / tMem t
-               r = t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
-                     , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
-                     , uCpu = new_ucpu, pCpu = new_pcpu
-                     , utilLoad = new_load
-                     , pTags = addTags old_tags inst_tags
-                     , instSpindles = new_inst_sp
-                     , fSpindles = new_free_sp
-                     }
-           in Ok r
+      in case () of
+        _ | new_mem <= 0 -> Bad T.FailMem
+          | uses_disk && new_dsk <= 0 -> Bad T.FailDisk
+          | strict && uses_disk && new_dsk < loDsk t -> Bad T.FailDisk
+          | uses_disk && exclStorage t && new_free_sp < 0 -> Bad T.FailSpindles
+          | strict && uses_disk && new_inst_sp > hiSpindles t -> Bad T.FailDisk
+          | strict && new_failn1 && not (failN1 t) -> Bad T.FailMem
+          | strict && l_cpu >= 0 && l_cpu < new_pcpu -> Bad T.FailCPU
+          | strict && rejectAddTags old_tags inst_tags -> Bad T.FailTags
+
+          -- When strict also check forthcoming limits, but after normal checks
+          | strict, Bad err <- checkForthcomingViolation -> Bad err
+
+          | otherwise ->
+              Ok . updateForthcomingFields $
+                t { pList = new_plist
+                  , fMem = new_mem
+                  , fDsk = new_dsk
+                  , failN1 = new_failn1
+                  , pMem = new_mp
+                  , pDsk = new_dp
+                  , uCpu = new_ucpu
+                  , pCpu = new_pcpu
+                  , utilLoad = new_load
+                  , instSpindles = new_inst_sp
+                  , fSpindles = new_free_sp
+                  , locationScore = locationScore t
+                                    + Instance.locationScore inst
+                  }
 
 -- | Adds a secondary instance (basic version).
 addSec :: Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
@@ -618,42 +947,91 @@
            -> Bool -> Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
 addSecExEx ignore_disks force t inst pdx =
   let iname = Instance.idx inst
+      forthcoming = Instance.forthcoming inst
       old_peers = peers t
-      old_mem = fMem t
-      new_dsk = fDsk t - Instance.dsk inst
-      new_free_sp = calcNewFreeSpindles True t inst
-      new_inst_sp = calcSpindleUse True t inst
+      strict = not force
+
       secondary_needed_mem = if Instance.usesSecMem inst
                                then Instance.mem inst
                                else 0
       new_peem = P.find pdx old_peers + secondary_needed_mem
       new_peers = P.add pdx new_peem old_peers
-      new_rmem = max (rMem t) new_peem
-      new_prem = fromIntegral new_rmem / tMem t
-      new_failn1 = old_mem <= new_rmem
-      new_dp = computeNewPDsk t new_free_sp new_dsk
-      old_load = utilLoad t
-      new_load = old_load { T.dskWeight = T.dskWeight old_load +
-                                          T.dskWeight (Instance.util inst) }
-      strict = not force
-  in case () of
-       _ | not (Instance.hasSecondary inst) -> Bad T.FailDisk
-         | not ignore_disks && new_dsk <= 0 -> Bad T.FailDisk
-         | new_dsk < loDsk t && strict -> Bad T.FailDisk
-         | exclStorage t && new_free_sp < 0 -> Bad T.FailSpindles
-         | new_inst_sp > hiSpindles t && strict -> Bad T.FailDisk
-         | secondary_needed_mem >= old_mem && strict -> Bad T.FailMem
-         | new_failn1 && not (failN1 t) && strict -> Bad T.FailMem
-         | otherwise ->
-           let new_slist = iname:sList t
-               r = t { sList = new_slist, fDsk = new_dsk
-                     , peers = new_peers, failN1 = new_failn1
-                     , rMem = new_rmem, pDsk = new_dp
-                     , pRem = new_prem, utilLoad = new_load
-                     , instSpindles = new_inst_sp
-                     , fSpindles = new_free_sp
-                     }
-           in Ok r
+
+      old_mem_forth = fMemForth t
+      new_dsk_forth = fDskForth t - Instance.dsk inst
+      new_free_sp_forth = calcNewFreeSpindlesForth True t inst
+      new_inst_sp_forth = calcSpindleUseForth True t inst
+      new_dp_forth = computeNewPDsk t new_free_sp_forth new_dsk_forth
+      old_load_forth = utilLoadForth t
+      new_load_forth = old_load_forth
+                         { T.dskWeight = T.dskWeight old_load_forth +
+                                         T.dskWeight (Instance.util inst)
+                         }
+      new_slist_forth = iname:sListForth t
+
+      updateForthcomingFields n =
+        n { sListForth = new_slist_forth
+          , fDskForth = new_dsk_forth
+          , pDskForth = new_dp_forth
+          , utilLoadForth = new_load_forth
+          , instSpindlesForth = new_inst_sp_forth
+          , fSpindlesForth = new_free_sp_forth
+
+          -- TODO Set failN1Forth, rMemForth, pRemForth
+          }
+
+      checkForthcomingViolation
+        | not (Instance.hasSecondary inst)       = Bad T.FailDisk
+        | new_dsk_forth <= 0                     = Bad T.FailDisk
+        | new_dsk_forth < loDsk t                = Bad T.FailDisk
+        | exclStorage t && new_free_sp_forth < 0 = Bad T.FailSpindles
+        | new_inst_sp_forth > hiSpindles t       = Bad T.FailDisk
+        | secondary_needed_mem >= old_mem_forth  = Bad T.FailMem
+        -- TODO Check failN1 including forthcoming instances
+        | otherwise                              = Ok ()
+
+  in if forthcoming
+      then case strict of
+             True | Bad err <- checkForthcomingViolation -> Bad err
+
+             _ -> Ok $ updateForthcomingFields t
+      else let
+               old_mem = fMem t
+               new_dsk = fDsk t - Instance.dsk inst
+               new_free_sp = calcNewFreeSpindles True t inst
+               new_inst_sp = calcSpindleUse True t inst
+               new_rmem = max (rMem t) new_peem
+               new_prem = fromIntegral new_rmem / tMem t
+               new_failn1 = old_mem <= new_rmem
+               new_dp = computeNewPDsk t new_free_sp new_dsk
+               old_load = utilLoad t
+               new_load = old_load
+                            { T.dskWeight = T.dskWeight old_load +
+                                            T.dskWeight (Instance.util inst)
+                            }
+               new_slist = iname:sList t
+      in case () of
+        _ | not (Instance.hasSecondary inst) -> Bad T.FailDisk
+          | not ignore_disks && new_dsk <= 0 -> Bad T.FailDisk
+          | strict && new_dsk < loDsk t -> Bad T.FailDisk
+          | exclStorage t && new_free_sp < 0 -> Bad T.FailSpindles
+          | strict && new_inst_sp > hiSpindles t -> Bad T.FailDisk
+          | strict && secondary_needed_mem >= old_mem -> Bad T.FailMem
+          | strict && new_failn1 && not (failN1 t) -> Bad T.FailMem
+
+          -- When strict also check forthcoming limits, but after normal checks
+          | strict, Bad err <- checkForthcomingViolation -> Bad err
+
+          | otherwise ->
+              Ok . updateForthcomingFields $
+                t { sList = new_slist, fDsk = new_dsk
+                  , peers = new_peers, failN1 = new_failn1
+                  , rMem = new_rmem, pDsk = new_dp
+                  , pRem = new_prem, utilLoad = new_load
+                  , instSpindles = new_inst_sp
+                  , fSpindles = new_free_sp
+                  }
+
 
 -- | Predicate on whether migration is supported between two nodes.
 checkMigration :: Node -> Node -> T.OpResult ()
@@ -667,7 +1045,7 @@
 -- | Computes the amount of available disk on a given node.
 availDisk :: Node -> Int
 availDisk t =
-  let _f = fDsk t
+  let _f = fDsk t -- TODO Shall we use fDiskForth here?
       _l = loDsk t
   in if _f < _l
        then 0
diff --git a/src/Ganeti/HTools/Program/Hbal.hs b/src/Ganeti/HTools/Program/Hbal.hs
index 12d50dc..f8fa48d 100644
--- a/src/Ganeti/HTools/Program/Hbal.hs
+++ b/src/Ganeti/HTools/Program/Hbal.hs
@@ -327,13 +327,13 @@
       ini_tbl = Cluster.Table nl il ini_cv []
       min_cv = optMinScore opts
 
-  checkNeedRebalance opts ini_cv
-
   if verbose > 2
     then printf "Initial coefficients: overall %.8f\n%s"
            ini_cv (Cluster.printStats "  " nl)::IO ()
     else printf "Initial score: %.8f\n" ini_cv
 
+  checkNeedRebalance opts ini_cv
+
   putStrLn "Trying to minimize the CV..."
   let imlen = maximum . map (length . Instance.alias) $ Container.elems il
       nmlen = maximum . map (length . Node.alias) $ Container.elems nl
diff --git a/src/Ganeti/HTools/Program/Hspace.hs b/src/Ganeti/HTools/Program/Hspace.hs
index 5a1b8b6..9db1441 100644
--- a/src/Ganeti/HTools/Program/Hspace.hs
+++ b/src/Ganeti/HTools/Program/Hspace.hs
@@ -441,7 +441,7 @@
 instFromSpec spx dt su =
   Instance.create "new" (rspecMem spx) (rspecDsk spx)
     [Instance.Disk (rspecDsk spx) (Just $ rspecSpn spx)]
-    (rspecCpu spx) Running [] True (-1) (-1) dt su []
+    (rspecCpu spx) Running [] True (-1) (-1) dt su [] False
 
 combineTiered :: AlgorithmOptions
               -> Maybe Int
diff --git a/src/Ganeti/HTools/Program/Hsqueeze.hs b/src/Ganeti/HTools/Program/Hsqueeze.hs
index 3fd8816..db6c473 100644
--- a/src/Ganeti/HTools/Program/Hsqueeze.hs
+++ b/src/Ganeti/HTools/Program/Hsqueeze.hs
@@ -219,6 +219,7 @@
     Running [] False Node.noSecondary Node.noSecondary DTExt
     (floor (f * fromIntegral (iSpecSpindleUse spec)))
     []
+    False
 
 -- | Get opcodes for the given move job.
 getMoveOpCodes :: Node.List
diff --git a/src/Ganeti/HTools/Tags.hs b/src/Ganeti/HTools/Tags.hs
index 18d378b..8e466a3 100644
--- a/src/Ganeti/HTools/Tags.hs
+++ b/src/Ganeti/HTools/Tags.hs
@@ -45,6 +45,7 @@
   , autoRepairTagSuspended
   , getMigRestrictions
   , getRecvMigRestrictions
+  , getLocations
   ) where
 
 import Control.Monad (guard, (>=>))
@@ -75,6 +76,10 @@
 allowMigrationPrefix :: String
 allowMigrationPrefix = "htools:allowmigration:"
 
+-- | The prefix for location tags.
+locationPrefix :: String
+locationPrefix = "htools:nlocation:"
+
 -- | The tag to be added to nodes that were shutdown by hsqueeze.
 standbyAuto :: String
 standbyAuto = "htools:standby:auto"
@@ -101,19 +106,23 @@
 hasStandbyTag :: Node.Node -> Bool
 hasStandbyTag = any (standbyPrefix `isPrefixOf`) . Node.nTags
 
--- * Migration restriction tags
+-- * Utility functions
 
--- | Given the cluster tags extract the migration restrictions
--- from a node tag, as a list.
-getMigRestrictionsList :: [String] -> [String] -> [String]
-getMigRestrictionsList ctags ntags =
-  mapMaybe (stripPrefix migrationPrefix) ctags >>= \ prefix ->
-  filter ((prefix ++ ":") `isPrefixOf`) ntags
+-- | Htools standard tag extraction. Given a set of cluster tags,
+-- take those starting with a specific prefix, strip the prefix
+-- and append a colon, and then take those node tags starting with
+-- one of those strings.
+getTags :: String -> [String] -> [String] -> S.Set String
+getTags prefix ctags ntags = S.fromList
+  (mapMaybe (stripPrefix prefix) ctags >>= \ p ->
+    filter ((p ++ ":") `isPrefixOf`) ntags)
+
+-- * Migration restriction tags
 
 -- | Given the cluster tags extract the migration restrictions
 -- from a node tag.
 getMigRestrictions :: [String] -> [String] -> S.Set String
-getMigRestrictions ctags = S.fromList . getMigRestrictionsList ctags
+getMigRestrictions = getTags migrationPrefix
 
 -- | Maybe split a string on the first single occurence of "::" return
 -- the parts before and after.
@@ -139,4 +148,11 @@
 getRecvMigRestrictions ctags ntags =
   let migs = migrations ctags
       closure tag = (:) tag . map fst $ filter ((==) tag . snd) migs
-  in S.fromList $ getMigRestrictionsList ctags ntags >>= closure
+  in S.fromList $ S.elems (getMigRestrictions ctags ntags) >>= closure
+
+-- * Location tags
+
+-- | Given the cluster tags, extract the node location tags
+-- from the node tags.
+getLocations :: [String] -> [String] -> S.Set String
+getLocations = getTags locationPrefix
diff --git a/src/Ganeti/JSON.hs b/src/Ganeti/JSON.hs
index c97c8c0..18fddb4 100644
--- a/src/Ganeti/JSON.hs
+++ b/src/Ganeti/JSON.hs
@@ -77,6 +77,8 @@
   , Tuple5(..)
   , nestedAccessByKey
   , nestedAccessByKeyDotted
+  , branchOnField
+  , addField
   )
   where
 
@@ -94,6 +96,7 @@
 import Text.Printf (printf)
 
 import qualified Text.JSON as J
+import qualified Text.JSON.Types as JT
 import Text.JSON.Pretty (pp_value)
 
 -- Note: this module should not import any Ganeti-specific modules
@@ -512,3 +515,25 @@
 nestedAccessByKeyDotted :: String -> J.JSValue -> J.Result J.JSValue
 nestedAccessByKeyDotted s =
   nestedAccessByKey (map T.unpack . T.splitOn (T.pack ".") . T.pack $ s)
+
+
+-- | Branch decoding on a field in a JSON object.
+branchOnField :: String -- ^ fieldname to branch on
+              -> (J.JSValue -> J.Result a)
+                  -- ^ decoding function if field is present and @true@; field
+                  -- will already be removed in the input
+              -> (J.JSValue -> J.Result a)
+                 -- ^ decoding function otherwise
+              -> J.JSValue -> J.Result a
+branchOnField k ifTrue ifFalse (J.JSObject jobj) =
+  let fields = J.fromJSObject jobj
+      jobj' = J.JSObject . J.toJSObject $ filter ((/=) k . fst) fields
+  in if lookup k fields == Just (J.JSBool True)
+       then ifTrue jobj'
+       else ifFalse jobj'
+branchOnField k _ _ _ = J.Error $ "Need an object to branch on key " ++ k
+
+-- | Add a field to a JSON object; to nothing, if the argument is not an object.
+addField :: (String, J.JSValue) -> J.JSValue -> J.JSValue
+addField (n,v) (J.JSObject obj) = J.JSObject $ JT.set_field obj n v
+addField _ jsval = jsval
diff --git a/src/Ganeti/Lens.hs b/src/Ganeti/Lens.hs
index f53d4d0..c7951e6 100644
--- a/src/Ganeti/Lens.hs
+++ b/src/Ganeti/Lens.hs
@@ -44,14 +44,6 @@
   , atSet
   ) where
 
-
--- The following macro is just a temporary solution for 2.12 and 2.13.
--- Since 2.14 cabal creates proper macros for all dependencies.
-#define MIN_VERSION_lens(maj,min,rev) \
-  (((maj)<LENS_MAJOR)|| \
-   (((maj)==LENS_MAJOR)&&((min)<=LENS_MINOR))|| \
-   (((maj)==LENS_MAJOR)&&((min)==LENS_MINOR)&&((rev)<=LENS_REV)))
-
 import Control.Applicative ((<$>), WrappedMonad(..))
 import Control.Lens
 import Control.Monad
diff --git a/src/Ganeti/Locking/Locks.hs b/src/Ganeti/Locking/Locks.hs
index 248b83d..e5bf524 100644
--- a/src/Ganeti/Locking/Locks.hs
+++ b/src/Ganeti/Locking/Locks.hs
@@ -61,8 +61,6 @@
                  | BGL
                  | InstanceLockSet
                  | Instance String
-                 | NodeAllocLockSet
-                 | NAL
                  | NodeGroupLockSet
                  | NodeGroup String
                  | NodeLockSet
@@ -82,8 +80,6 @@
 lockName BGL = "cluster/BGL"
 lockName ClusterLockSet = "cluster/[lockset]"
 lockName InstanceLockSet = "instance/[lockset]"
-lockName NodeAllocLockSet = "node-alloc/[lockset]"
-lockName NAL = "node-alloc/NAL"
 lockName (Instance uuid) = "instance/" ++ uuid
 lockName NodeGroupLockSet = "nodegroup/[lockset]"
 lockName (NodeGroup uuid) = "nodegroup/" ++ uuid
@@ -103,8 +99,6 @@
 lockFromName (stripPrefix "instance/" -> Just uuid) = return $ Instance uuid
 lockFromName "nodegroup/[lockset]" = return NodeGroupLockSet
 lockFromName (stripPrefix "nodegroup/" -> Just uuid) = return $ NodeGroup uuid
-lockFromName "node-alloc/[lockset]" = return NodeAllocLockSet
-lockFromName "node-alloc/NAL" = return NAL
 lockFromName "node-res/[lockset]" = return NodeResLockSet
 lockFromName (stripPrefix "node-res/" -> Just uuid) = return $ NodeRes uuid
 lockFromName "node/[lockset]" = return NodeLockSet
@@ -121,7 +115,6 @@
 -- | The levels, the locks belong to.
 data LockLevel = LevelCluster
                | LevelInstance
-               | LevelNodeAlloc
                | LevelNodeGroup
                | LevelNode
                | LevelNodeRes
@@ -134,7 +127,6 @@
 lockLevelName :: LockLevel -> String
 lockLevelName LevelCluster = "cluster"
 lockLevelName LevelInstance = "instance"
-lockLevelName LevelNodeAlloc = "node-alloc"
 lockLevelName LevelNodeGroup = "nodegroup"
 lockLevelName LevelNode = "node"
 lockLevelName LevelNodeRes = "node-res"
@@ -145,7 +137,6 @@
 lockLevelFromName :: String -> J.Result LockLevel
 lockLevelFromName "cluster" = return LevelCluster
 lockLevelFromName "instance" = return LevelInstance
-lockLevelFromName "node-alloc" = return LevelNodeAlloc
 lockLevelFromName "nodegroup" = return LevelNodeGroup
 lockLevelFromName "node" = return LevelNode
 lockLevelFromName "node-res" = return LevelNodeRes
@@ -162,8 +153,6 @@
 lockLevel BGL = LevelCluster
 lockLevel ClusterLockSet = LevelCluster
 lockLevel InstanceLockSet = LevelInstance
-lockLevel NodeAllocLockSet = LevelNodeAlloc
-lockLevel NAL = LevelNodeAlloc
 lockLevel (Instance _) = LevelInstance
 lockLevel NodeGroupLockSet = LevelNodeGroup
 lockLevel (NodeGroup _) = LevelNodeGroup
@@ -179,7 +168,6 @@
   lockImplications BGL = [ClusterLockSet]
   lockImplications (Instance _) = [InstanceLockSet]
   lockImplications (NodeGroup _) = [NodeGroupLockSet]
-  lockImplications NAL = [NodeAllocLockSet]
   lockImplications (NodeRes _) = [NodeResLockSet]
   lockImplications (Node _) = [NodeLockSet]
   lockImplications (Network _) = [NetworkLockSet]
diff --git a/src/Ganeti/Logging/WriterLog.hs b/src/Ganeti/Logging/WriterLog.hs
index f718210..25310bc 100644
--- a/src/Ganeti/Logging/WriterLog.hs
+++ b/src/Ganeti/Logging/WriterLog.hs
@@ -47,13 +47,6 @@
   , execWriterLog
   ) where
 
--- The following macro is just a temporary solution for 2.12 and 2.13.
--- Since 2.14 cabal creates proper macros for all dependencies.
-#define MIN_VERSION_monad_control(maj,min,rev) \
-  (((maj)<MONAD_CONTROL_MAJOR)|| \
-   (((maj)==MONAD_CONTROL_MAJOR)&&((min)<=MONAD_CONTROL_MINOR))|| \
-   (((maj)==MONAD_CONTROL_MAJOR)&&((min)==MONAD_CONTROL_MINOR)&& \
-    ((rev)<=MONAD_CONTROL_REV)))
 
 import Control.Applicative
 import Control.Monad
diff --git a/src/Ganeti/Objects.hs b/src/Ganeti/Objects.hs
index 23060c5..0fa59a8 100644
--- a/src/Ganeti/Objects.hs
+++ b/src/Ganeti/Objects.hs
@@ -1,10 +1,7 @@
-{-# LANGUAGE TemplateHaskell, StandaloneDeriving #-}
+{-# LANGUAGE TemplateHaskell, FunctionalDependencies #-}
 
 {-| Implementation of the Ganeti config objects.
 
-Some object fields are not implemented yet, and as such they are
-commented out below.
-
 -}
 
 {-
@@ -43,36 +40,24 @@
   , OsParamsPrivate
   , PartialNicParams(..)
   , FilledNicParams(..)
-  , fillNicParams
   , allNicParamFields
   , PartialNic(..)
   , FileDriver(..)
-  , DRBDSecret
   , DataCollectorConfig(..)
-  , LogicalVolume(..)
-  , DiskLogicalId(..)
-  , Disk(..)
-  , includesLogicalId
   , DiskTemplate(..)
   , PartialBeParams(..)
   , FilledBeParams(..)
-  , fillBeParams
-  , allBeParamFields
-  , Instance(..)
   , PartialNDParams(..)
   , FilledNDParams(..)
-  , fillNDParams
   , allNDParamFields
   , Node(..)
   , AllocPolicy(..)
   , FilledISpecParams(..)
   , PartialISpecParams(..)
-  , fillISpecParams
   , allISpecParamFields
   , MinMaxISpecs(..)
   , FilledIPolicy(..)
   , PartialIPolicy(..)
-  , fillIPolicy
   , GroupDiskParams
   , NodeGroup(..)
   , FilterAction(..)
@@ -94,10 +79,10 @@
   , UidRange
   , Cluster(..)
   , ConfigData(..)
-  , TimeStampObject(..)
-  , UuidObject(..)
-  , SerialNoObject(..)
-  , TagsObject(..)
+  , TimeStampObject(..) -- re-exported from Types
+  , UuidObject(..) -- re-exported from Types
+  , SerialNoObject(..) -- re-exported from Types
+  , TagsObject(..) -- re-exported from Types
   , DictObject(..) -- re-exported from THH
   , TagSet -- re-exported from THH
   , Network(..)
@@ -115,22 +100,22 @@
   , nextIp4Address
   , IAllocatorParams
   , MasterNetworkParameters(..)
+  , module Ganeti.PartialParams
+  , module Ganeti.Objects.Disk
+  , module Ganeti.Objects.Instance
   ) where
 
 import Control.Applicative
 import Control.Arrow (first)
 import Control.Monad.State
-import Data.Char
-import Data.List (foldl', isPrefixOf, isInfixOf, intercalate)
+import Data.List (foldl', intercalate)
 import Data.Maybe
 import qualified Data.Map as Map
 import Data.Monoid
 import Data.Ord (comparing)
 import Data.Ratio (numerator, denominator)
-import qualified Data.Set as Set
 import Data.Tuple (swap)
 import Data.Word
-import System.Time (ClockTime(..))
 import Text.JSON (showJSON, readJSON, JSON, JSValue(..), fromJSString,
                   toJSString)
 import qualified Text.JSON as J
@@ -140,12 +125,15 @@
 import qualified Ganeti.ConstantUtils as ConstantUtils
 import Ganeti.JSON
 import Ganeti.Objects.BitArray (BitArray)
+import Ganeti.Objects.Disk
+import Ganeti.Objects.Nic
+import Ganeti.Objects.Instance
 import Ganeti.Query.Language
+import Ganeti.PartialParams
 import Ganeti.Types
 import Ganeti.THH
 import Ganeti.THH.Field
-import Ganeti.Utils (sepSplit, tryRead, parseUnitAssumeBinary)
-import Ganeti.Utils.Validate
+import Ganeti.Utils (sepSplit, tryRead)
 
 -- * Generic definitions
 
@@ -156,32 +144,6 @@
   let updated = Map.union custom defaults
   in foldl' (flip Map.delete) updated skip_keys
 
--- | The hypervisor parameter type. This is currently a simple map,
--- without type checking on key/value pairs.
-type HvParams = Container JSValue
-
--- | The OS parameters type. This is, and will remain, a string
--- container, since the keys are dynamically declared by the OSes, and
--- the values are always strings.
-type OsParams = Container String
-type OsParamsPrivate = Container (Private String)
-
--- | Class of objects that have timestamps.
-class TimeStampObject a where
-  cTimeOf :: a -> ClockTime
-  mTimeOf :: a -> ClockTime
-
--- | Class of objects that have an UUID.
-class UuidObject a where
-  uuidOf :: a -> String
-
--- | Class of object that have a serial number.
-class SerialNoObject a where
-  serialOf :: a -> Int
-
--- | Class of objects that have tags.
-class TagsObject a where
-  tagsOf :: a -> Set.Set String
 
 -- * Network definitions
 
@@ -312,28 +274,10 @@
   cTimeOf = networkCtime
   mTimeOf = networkMtime
 
--- * NIC definitions
-
-$(buildParam "Nic" "nicp"
-  [ simpleField "mode" [t| NICMode |]
-  , simpleField "link" [t| String  |]
-  , simpleField "vlan" [t| String |]
-  ])
-
-$(buildObject "PartialNic" "nic" $
-  [ simpleField "mac" [t| String |]
-  , optionalField $ simpleField "ip" [t| String |]
-  , simpleField "nicparams" [t| PartialNicParams |]
-  , optionalField $ simpleField "network" [t| String |]
-  , optionalField $ simpleField "name" [t| String |]
-  ] ++ uuidFields)
-
-instance UuidObject PartialNic where
-  uuidOf = nicUuid
-
-type MicroSeconds = Integer
 
 -- * Datacollector definitions
+type MicroSeconds = Integer
+
 -- | The configuration regarding a single data collector.
 $(buildObject "DataCollectorConfig" "dataCollector" [
   simpleField "active" [t| Bool|],
@@ -348,293 +292,6 @@
     }
   mappend _ a = a
 
--- * Disk definitions
-
--- | Constant for the dev_type key entry in the disk config.
-devType :: String
-devType = "dev_type"
-
--- | The disk parameters type.
-type DiskParams = Container JSValue
-
--- | An alias for DRBD secrets
-type DRBDSecret = String
-
--- Represents a group name and a volume name.
---
--- From @man lvm@:
---
--- The following characters are valid for VG and LV names: a-z A-Z 0-9 + _ . -
---
--- VG  and LV names cannot begin with a hyphen.  There are also various reserved
--- names that are used internally by lvm that can not be used as LV or VG names.
--- A VG cannot be  called  anything  that exists in /dev/ at the time of
--- creation, nor can it be called '.' or '..'.  A LV cannot be called '.' '..'
--- 'snapshot' or 'pvmove'. The LV name may also not contain the strings '_mlog'
--- or '_mimage'
-data LogicalVolume = LogicalVolume { lvGroup :: String
-                                   , lvVolume :: String
-                                   }
-  deriving (Eq, Ord)
-
-instance Show LogicalVolume where
-  showsPrec _ (LogicalVolume g v) =
-    showString g . showString "/" . showString v
-
--- | Check the constraints for a VG/LV names (except the @\/dev\/@ check).
-instance Validatable LogicalVolume where
-  validate (LogicalVolume g v) = do
-      let vgn = "Volume group name"
-      -- Group name checks
-      nonEmpty vgn g
-      validChars vgn g
-      notStartsDash vgn g
-      notIn vgn g [".", ".."]
-      -- Volume name checks
-      let lvn = "Volume name"
-      nonEmpty lvn v
-      validChars lvn v
-      notStartsDash lvn v
-      notIn lvn v [".", "..", "snapshot", "pvmove"]
-      reportIf ("_mlog" `isInfixOf` v) $ lvn ++ " must not contain '_mlog'."
-      reportIf ("_mimage" `isInfixOf` v) $ lvn ++ "must not contain '_mimage'."
-    where
-      nonEmpty prefix x = reportIf (null x) $ prefix ++ " must be non-empty"
-      notIn prefix x =
-        mapM_ (\y -> reportIf (x == y)
-                              $ prefix ++ " must not be '" ++ y ++ "'")
-      notStartsDash prefix x = reportIf ("-" `isPrefixOf` x)
-                                 $ prefix ++ " must not start with '-'"
-      validChars prefix x =
-        reportIf (not . all validChar $ x)
-                 $ prefix ++ " must consist only of [a-z][A-Z][0-9][+_.-]"
-      validChar c = isAsciiLower c || isAsciiUpper c || isDigit c
-                    || (c `elem` "+_.-")
-
-instance J.JSON LogicalVolume where
-  showJSON = J.showJSON . show
-  readJSON (J.JSString s) | (g, _ : l) <- break (== '/') (J.fromJSString s) =
-    either fail return . evalValidate . validate' $ LogicalVolume g l
-  readJSON v = fail $ "Invalid JSON value " ++ show v
-                      ++ " for a logical volume"
-
--- | The disk configuration type. This includes the disk type itself,
--- for a more complete consistency. Note that since in the Python
--- code-base there's no authoritative place where we document the
--- logical id, this is probably a good reference point.
-data DiskLogicalId
-  = LIDPlain LogicalVolume  -- ^ Volume group, logical volume
-  | LIDDrbd8 String String Int Int Int (Private DRBDSecret)
-  -- ^ NodeA, NodeB, Port, MinorA, MinorB, Secret
-  | LIDFile FileDriver String -- ^ Driver, path
-  | LIDSharedFile FileDriver String -- ^ Driver, path
-  | LIDGluster FileDriver String -- ^ Driver, path
-  | LIDBlockDev BlockDriver String -- ^ Driver, path (must be under /dev)
-  | LIDRados String String -- ^ Unused, path
-  | LIDExt String String -- ^ ExtProvider, unique name
-    deriving (Show, Eq)
-
--- | Mapping from a logical id to a disk type.
-lidDiskType :: DiskLogicalId -> DiskTemplate
-lidDiskType (LIDPlain {}) = DTPlain
-lidDiskType (LIDDrbd8 {}) = DTDrbd8
-lidDiskType (LIDFile  {}) = DTFile
-lidDiskType (LIDSharedFile  {}) = DTSharedFile
-lidDiskType (LIDGluster  {}) = DTGluster
-lidDiskType (LIDBlockDev {}) = DTBlock
-lidDiskType (LIDRados {}) = DTRbd
-lidDiskType (LIDExt {}) = DTExt
-
--- | Builds the extra disk_type field for a given logical id.
-lidEncodeType :: DiskLogicalId -> [(String, JSValue)]
-lidEncodeType v = [(devType, showJSON . lidDiskType $ v)]
-
--- | Custom encoder for DiskLogicalId (logical id only).
-encodeDLId :: DiskLogicalId -> JSValue
-encodeDLId (LIDPlain (LogicalVolume vg lv)) =
-  JSArray [showJSON vg, showJSON lv]
-encodeDLId (LIDDrbd8 nodeA nodeB port minorA minorB (Private key)) =
-  JSArray [ showJSON nodeA, showJSON nodeB, showJSON port
-          , showJSON minorA, showJSON minorB, showJSON key ]
-encodeDLId (LIDRados pool name) = JSArray [showJSON pool, showJSON name]
-encodeDLId (LIDFile driver name) = JSArray [showJSON driver, showJSON name]
-encodeDLId (LIDSharedFile driver name) =
-  JSArray [showJSON driver, showJSON name]
-encodeDLId (LIDGluster driver name) = JSArray [showJSON driver, showJSON name]
-encodeDLId (LIDBlockDev driver name) = JSArray [showJSON driver, showJSON name]
-encodeDLId (LIDExt extprovider name) =
-  JSArray [showJSON extprovider, showJSON name]
-
--- | Custom encoder for DiskLogicalId, composing both the logical id
--- and the extra disk_type field.
-encodeFullDLId :: DiskLogicalId -> (JSValue, [(String, JSValue)])
-encodeFullDLId v = (encodeDLId v, lidEncodeType v)
-
--- | Custom decoder for DiskLogicalId. This is manual for now, since
--- we don't have yet automation for separate-key style fields.
-decodeDLId :: [(String, JSValue)] -> JSValue -> J.Result DiskLogicalId
-decodeDLId obj lid = do
-  dtype <- fromObj obj devType
-  case dtype of
-    DTDrbd8 ->
-      case lid of
-        JSArray [nA, nB, p, mA, mB, k] -> do
-          nA' <- readJSON nA
-          nB' <- readJSON nB
-          p'  <- readJSON p
-          mA' <- readJSON mA
-          mB' <- readJSON mB
-          k'  <- readJSON k
-          return . LIDDrbd8 nA' nB' p' mA' mB' $ Private k'
-        _ -> fail "Can't read logical_id for DRBD8 type"
-    DTPlain ->
-      case lid of
-        JSArray [vg, lv] -> do
-          vg' <- readJSON vg
-          lv' <- readJSON lv
-          return $ LIDPlain (LogicalVolume vg' lv')
-        _ -> fail "Can't read logical_id for plain type"
-    DTFile ->
-      case lid of
-        JSArray [driver, path] -> do
-          driver' <- readJSON driver
-          path'   <- readJSON path
-          return $ LIDFile driver' path'
-        _ -> fail "Can't read logical_id for file type"
-    DTSharedFile ->
-      case lid of
-        JSArray [driver, path] -> do
-          driver' <- readJSON driver
-          path'   <- readJSON path
-          return $ LIDSharedFile driver' path'
-        _ -> fail "Can't read logical_id for shared file type"
-    DTGluster ->
-      case lid of
-        JSArray [driver, path] -> do
-          driver' <- readJSON driver
-          path'   <- readJSON path
-          return $ LIDGluster driver' path'
-        _ -> fail "Can't read logical_id for shared file type"
-    DTBlock ->
-      case lid of
-        JSArray [driver, path] -> do
-          driver' <- readJSON driver
-          path'   <- readJSON path
-          return $ LIDBlockDev driver' path'
-        _ -> fail "Can't read logical_id for blockdev type"
-    DTRbd ->
-      case lid of
-        JSArray [driver, path] -> do
-          driver' <- readJSON driver
-          path'   <- readJSON path
-          return $ LIDRados driver' path'
-        _ -> fail "Can't read logical_id for rdb type"
-    DTExt ->
-      case lid of
-        JSArray [extprovider, name] -> do
-          extprovider' <- readJSON extprovider
-          name'   <- readJSON name
-          return $ LIDExt extprovider' name'
-        _ -> fail "Can't read logical_id for extstorage type"
-    DTDiskless ->
-      fail "Retrieved 'diskless' disk."
-
--- | Disk data structure.
---
--- This is declared manually as it's a recursive structure, and our TH
--- code currently can't build it.
-data Disk = Disk
-  { diskLogicalId  :: DiskLogicalId
-  , diskChildren   :: [Disk]
-  , diskIvName     :: String
-  , diskSize       :: Int
-  , diskMode       :: DiskMode
-  , diskName       :: Maybe String
-  , diskSpindles   :: Maybe Int
-  , diskParams     :: Maybe DiskParams
-  , diskUuid       :: String
-  , diskSerial     :: Int
-  , diskCtime      :: ClockTime
-  , diskMtime      :: ClockTime
-  } deriving (Show, Eq)
-
-$(buildObjectSerialisation "Disk" $
-  [ customField 'decodeDLId 'encodeFullDLId ["dev_type"] $
-      simpleField "logical_id"    [t| DiskLogicalId   |]
-  , defaultField  [| [] |] $ simpleField "children" [t| [Disk] |]
-  , defaultField [| "" |] $ simpleField "iv_name" [t| String |]
-  , simpleField "size" [t| Int |]
-  , defaultField [| DiskRdWr |] $ simpleField "mode" [t| DiskMode |]
-  , optionalField $ simpleField "name" [t| String |]
-  , optionalField $ simpleField "spindles" [t| Int |]
-  , optionalField $ simpleField "params" [t| DiskParams |]
-  ]
-  ++ uuidFields
-  ++ serialFields
-  ++ timeStampFields)
-
-instance UuidObject Disk where
-  uuidOf = diskUuid
-
--- | Determines whether a disk or one of his children has the given logical id
--- (determined by the volume group name and by the logical volume name).
--- This can be true only for DRBD or LVM disks.
-includesLogicalId :: LogicalVolume -> Disk -> Bool
-includesLogicalId lv disk =
-  case diskLogicalId disk of
-    LIDPlain lv' -> lv' == lv
-    LIDDrbd8 {} ->
-      any (includesLogicalId lv) $ diskChildren disk
-    _ -> False
-
--- * Instance definitions
-
-$(buildParam "Be" "bep"
-  [ specialNumericalField 'parseUnitAssumeBinary
-      $ simpleField "minmem"      [t| Int  |]
-  , specialNumericalField 'parseUnitAssumeBinary
-      $ simpleField "maxmem"      [t| Int  |]
-  , simpleField "vcpus"           [t| Int  |]
-  , simpleField "auto_balance"    [t| Bool |]
-  , simpleField "always_failover" [t| Bool |]
-  , simpleField "spindle_use"     [t| Int  |]
-  ])
-
-$(buildObject "Instance" "inst" $
-  [ simpleField "name"             [t| String             |]
-  , simpleField "primary_node"     [t| String             |]
-  , simpleField "os"               [t| String             |]
-  , simpleField "hypervisor"       [t| Hypervisor         |]
-  , simpleField "hvparams"         [t| HvParams           |]
-  , simpleField "beparams"         [t| PartialBeParams    |]
-  , simpleField "osparams"         [t| OsParams           |]
-  , simpleField "osparams_private" [t| OsParamsPrivate    |]
-  , simpleField "admin_state"      [t| AdminState         |]
-  , simpleField "admin_state_source" [t| AdminStateSource   |]
-  , simpleField "nics"             [t| [PartialNic]       |]
-  , simpleField "disks"            [t| [String]           |]
-  , simpleField "disk_template"    [t| DiskTemplate       |]
-  , simpleField "disks_active"     [t| Bool               |]
-  , optionalField $ simpleField "network_port" [t| Int  |]
-  ]
-  ++ timeStampFields
-  ++ uuidFields
-  ++ serialFields
-  ++ tagsFields)
-
-instance TimeStampObject Instance where
-  cTimeOf = instCtime
-  mTimeOf = instMtime
-
-instance UuidObject Instance where
-  uuidOf = instUuid
-
-instance SerialNoObject Instance where
-  serialOf = instSerial
-
-instance TagsObject Instance where
-  tagsOf = instTags
-
 -- * IPolicy definitions
 
 $(buildParam "ISpec" "ispec"
@@ -678,8 +335,9 @@
   ])
 
 -- | Custom filler for the ipolicy types.
-fillIPolicy :: FilledIPolicy -> PartialIPolicy -> FilledIPolicy
-fillIPolicy (FilledIPolicy { ipolicyMinMaxISpecs  = fminmax
+instance PartialParams FilledIPolicy PartialIPolicy where
+  fillParams
+            (FilledIPolicy { ipolicyMinMaxISpecs  = fminmax
                            , ipolicyStdSpec       = fstd
                            , ipolicySpindleRatio  = fspindleRatio
                            , ipolicyVcpuRatio     = fvcpuRatio
@@ -689,15 +347,34 @@
                             , ipolicySpindleRatioP  = pspindleRatio
                             , ipolicyVcpuRatioP     = pvcpuRatio
                             , ipolicyDiskTemplatesP = pdiskTemplates}) =
-  FilledIPolicy { ipolicyMinMaxISpecs  = fromMaybe fminmax pminmax
-                , ipolicyStdSpec       = case pstd of
-                                         Nothing -> fstd
-                                         Just p -> fillISpecParams fstd p
+    FilledIPolicy
+                { ipolicyMinMaxISpecs  = fromMaybe fminmax pminmax
+                , ipolicyStdSpec       = maybe fstd (fillParams fstd) pstd
                 , ipolicySpindleRatio  = fromMaybe fspindleRatio pspindleRatio
                 , ipolicyVcpuRatio     = fromMaybe fvcpuRatio pvcpuRatio
                 , ipolicyDiskTemplates = fromMaybe fdiskTemplates
                                          pdiskTemplates
                 }
+  toPartial (FilledIPolicy { ipolicyMinMaxISpecs  = fminmax
+                           , ipolicyStdSpec       = fstd
+                           , ipolicySpindleRatio  = fspindleRatio
+                           , ipolicyVcpuRatio     = fvcpuRatio
+                           , ipolicyDiskTemplates = fdiskTemplates}) =
+    PartialIPolicy
+                { ipolicyMinMaxISpecsP  = Just fminmax
+                , ipolicyStdSpecP       = Just $ toPartial fstd
+                , ipolicySpindleRatioP  = Just fspindleRatio
+                , ipolicyVcpuRatioP     = Just fvcpuRatio
+                , ipolicyDiskTemplatesP = Just fdiskTemplates
+                }
+  toFilled (PartialIPolicy { ipolicyMinMaxISpecsP  = pminmax
+                           , ipolicyStdSpecP       = pstd
+                           , ipolicySpindleRatioP  = pspindleRatio
+                           , ipolicyVcpuRatioP     = pvcpuRatio
+                           , ipolicyDiskTemplatesP = pdiskTemplates}) =
+    FilledIPolicy <$> pminmax <*> (toFilled =<< pstd) <*>  pspindleRatio
+                  <*> pvcpuRatio <*> pdiskTemplates
+
 -- * Node definitions
 
 $(buildParam "ND" "ndp"
diff --git a/src/Ganeti/Objects/Disk.hs b/src/Ganeti/Objects/Disk.hs
new file mode 100644
index 0000000..181bf51
--- /dev/null
+++ b/src/Ganeti/Objects/Disk.hs
@@ -0,0 +1,274 @@
+{-# LANGUAGE TemplateHaskell, FunctionalDependencies #-}
+
+{-| Implementation of the Ganeti Disk config object.
+
+-}
+
+{-
+
+Copyright (C) 2014 Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-}
+
+module Ganeti.Objects.Disk where
+
+import Control.Applicative ((<*>), (<$>))
+import Data.Char (isAsciiLower, isAsciiUpper, isDigit)
+import Data.List (isPrefixOf, isInfixOf)
+import Language.Haskell.TH.Syntax
+import Text.JSON (showJSON, readJSON, JSValue(..))
+import qualified Text.JSON as J
+
+import Ganeti.JSON (Container, fromObj)
+import Ganeti.THH
+import Ganeti.THH.Field
+import Ganeti.Types
+import Ganeti.Utils.Validate
+
+-- | Constant for the dev_type key entry in the disk config.
+devType :: String
+devType = "dev_type"
+
+-- | The disk parameters type.
+type DiskParams = Container JSValue
+
+-- | An alias for DRBD secrets
+type DRBDSecret = String
+
+-- Represents a group name and a volume name.
+--
+-- From @man lvm@:
+--
+-- The following characters are valid for VG and LV names: a-z A-Z 0-9 + _ . -
+--
+-- VG  and LV names cannot begin with a hyphen.  There are also various reserved
+-- names that are used internally by lvm that can not be used as LV or VG names.
+-- A VG cannot be  called  anything  that exists in /dev/ at the time of
+-- creation, nor can it be called '.' or '..'.  A LV cannot be called '.' '..'
+-- 'snapshot' or 'pvmove'. The LV name may also not contain the strings '_mlog'
+-- or '_mimage'
+data LogicalVolume = LogicalVolume { lvGroup :: String
+                                   , lvVolume :: String
+                                   }
+  deriving (Eq, Ord)
+
+instance Show LogicalVolume where
+  showsPrec _ (LogicalVolume g v) =
+    showString g . showString "/" . showString v
+
+-- | Check the constraints for a VG/LV names (except the @\/dev\/@ check).
+instance Validatable LogicalVolume where
+  validate (LogicalVolume g v) = do
+      let vgn = "Volume group name"
+      -- Group name checks
+      nonEmpty vgn g
+      validChars vgn g
+      notStartsDash vgn g
+      notIn vgn g [".", ".."]
+      -- Volume name checks
+      let lvn = "Volume name"
+      nonEmpty lvn v
+      validChars lvn v
+      notStartsDash lvn v
+      notIn lvn v [".", "..", "snapshot", "pvmove"]
+      reportIf ("_mlog" `isInfixOf` v) $ lvn ++ " must not contain '_mlog'."
+      reportIf ("_mimage" `isInfixOf` v) $ lvn ++ "must not contain '_mimage'."
+    where
+      nonEmpty prefix x = reportIf (null x) $ prefix ++ " must be non-empty"
+      notIn prefix x =
+        mapM_ (\y -> reportIf (x == y)
+                              $ prefix ++ " must not be '" ++ y ++ "'")
+      notStartsDash prefix x = reportIf ("-" `isPrefixOf` x)
+                                 $ prefix ++ " must not start with '-'"
+      validChars prefix x =
+        reportIf (not . all validChar $ x)
+                 $ prefix ++ " must consist only of [a-z][A-Z][0-9][+_.-]"
+      validChar c = isAsciiLower c || isAsciiUpper c || isDigit c
+                    || (c `elem` "+_.-")
+
+instance J.JSON LogicalVolume where
+  showJSON = J.showJSON . show
+  readJSON (J.JSString s) | (g, _ : l) <- break (== '/') (J.fromJSString s) =
+    either fail return . evalValidate . validate' $ LogicalVolume g l
+  readJSON v = fail $ "Invalid JSON value " ++ show v
+                      ++ " for a logical volume"
+
+-- | The disk configuration type. This includes the disk type itself,
+-- for a more complete consistency. Note that since in the Python
+-- code-base there's no authoritative place where we document the
+-- logical id, this is probably a good reference point. There is a bijective
+-- correspondence between the 'DiskLogicalId' constructors and 'DiskTemplate'.
+data DiskLogicalId
+  = LIDPlain LogicalVolume  -- ^ Volume group, logical volume
+  | LIDDrbd8 String String Int Int Int (Private DRBDSecret)
+  -- ^ NodeA, NodeB, Port, MinorA, MinorB, Secret
+  | LIDFile FileDriver String -- ^ Driver, path
+  | LIDSharedFile FileDriver String -- ^ Driver, path
+  | LIDGluster FileDriver String -- ^ Driver, path
+  | LIDBlockDev BlockDriver String -- ^ Driver, path (must be under /dev)
+  | LIDRados String String -- ^ Unused, path
+  | LIDExt String String -- ^ ExtProvider, unique name
+    deriving (Show, Eq)
+
+-- | Mapping from a logical id to a disk type.
+lidDiskType :: DiskLogicalId -> DiskTemplate
+lidDiskType (LIDPlain {}) = DTPlain
+lidDiskType (LIDDrbd8 {}) = DTDrbd8
+lidDiskType (LIDFile  {}) = DTFile
+lidDiskType (LIDSharedFile  {}) = DTSharedFile
+lidDiskType (LIDGluster  {}) = DTGluster
+lidDiskType (LIDBlockDev {}) = DTBlock
+lidDiskType (LIDRados {}) = DTRbd
+lidDiskType (LIDExt {}) = DTExt
+
+-- | Builds the extra disk_type field for a given logical id.
+lidEncodeType :: DiskLogicalId -> [(String, JSValue)]
+lidEncodeType v = [(devType, showJSON . lidDiskType $ v)]
+
+-- | Custom encoder for DiskLogicalId (logical id only).
+encodeDLId :: DiskLogicalId -> JSValue
+encodeDLId (LIDPlain (LogicalVolume vg lv)) =
+  JSArray [showJSON vg, showJSON lv]
+encodeDLId (LIDDrbd8 nodeA nodeB port minorA minorB key) =
+  JSArray [ showJSON nodeA, showJSON nodeB, showJSON port
+          , showJSON minorA, showJSON minorB, showJSON key ]
+encodeDLId (LIDRados pool name) = JSArray [showJSON pool, showJSON name]
+encodeDLId (LIDFile driver name) = JSArray [showJSON driver, showJSON name]
+encodeDLId (LIDSharedFile driver name) =
+  JSArray [showJSON driver, showJSON name]
+encodeDLId (LIDGluster driver name) = JSArray [showJSON driver, showJSON name]
+encodeDLId (LIDBlockDev driver name) = JSArray [showJSON driver, showJSON name]
+encodeDLId (LIDExt extprovider name) =
+  JSArray [showJSON extprovider, showJSON name]
+
+-- | Custom encoder for DiskLogicalId, composing both the logical id
+-- and the extra disk_type field.
+encodeFullDLId :: DiskLogicalId -> (JSValue, [(String, JSValue)])
+encodeFullDLId v = (encodeDLId v, lidEncodeType v)
+
+-- | Custom decoder for DiskLogicalId. This is manual for now, since
+-- we don't have yet automation for separate-key style fields.
+decodeDLId :: [(String, JSValue)] -> JSValue -> J.Result DiskLogicalId
+decodeDLId obj lid = do
+  dtype <- fromObj obj devType
+  case dtype of
+    DTDrbd8 ->
+      case lid of
+        JSArray [nA, nB, p, mA, mB, k] ->
+          LIDDrbd8
+            <$> readJSON nA
+            <*> readJSON nB
+            <*> readJSON p
+            <*> readJSON mA
+            <*> readJSON mB
+            <*> readJSON k
+        _ -> fail "Can't read logical_id for DRBD8 type"
+    DTPlain ->
+      case lid of
+        JSArray [vg, lv] -> LIDPlain <$>
+          (LogicalVolume <$> readJSON vg <*> readJSON lv)
+        _ -> fail "Can't read logical_id for plain type"
+    DTFile ->
+      case lid of
+        JSArray [driver, path] ->
+          LIDFile
+            <$> readJSON driver
+            <*> readJSON path
+        _ -> fail "Can't read logical_id for file type"
+    DTSharedFile ->
+      case lid of
+        JSArray [driver, path] ->
+          LIDSharedFile
+            <$> readJSON driver
+            <*> readJSON path
+        _ -> fail "Can't read logical_id for shared file type"
+    DTGluster ->
+      case lid of
+        JSArray [driver, path] ->
+          LIDGluster
+            <$> readJSON driver
+            <*> readJSON path
+        _ -> fail "Can't read logical_id for shared file type"
+    DTBlock ->
+      case lid of
+        JSArray [driver, path] ->
+          LIDBlockDev
+            <$> readJSON driver
+            <*> readJSON path
+        _ -> fail "Can't read logical_id for blockdev type"
+    DTRbd ->
+      case lid of
+        JSArray [driver, path] ->
+          LIDRados
+            <$> readJSON driver
+            <*> readJSON path
+        _ -> fail "Can't read logical_id for rdb type"
+    DTExt ->
+      case lid of
+        JSArray [extprovider, name] ->
+          LIDExt
+            <$> readJSON extprovider
+            <*> readJSON name
+        _ -> fail "Can't read logical_id for extstorage type"
+    DTDiskless ->
+      fail "Retrieved 'diskless' disk."
+
+-- | Disk data structure.
+
+$(buildObjectWithForthcoming "Disk" "disk" $
+  [ customField 'decodeDLId 'encodeFullDLId ["dev_type"] $
+      simpleField "logical_id"    [t| DiskLogicalId   |]
+  , defaultField  [| [] |]
+      $ simpleField "children" (return . AppT ListT . ConT $ mkName "Disk")
+  , defaultField  [| [] |] $ simpleField "nodes" [t| [String] |]
+  , defaultField [| "" |] $ simpleField "iv_name" [t| String |]
+  , simpleField "size" [t| Int |]
+  , defaultField [| DiskRdWr |] $ simpleField "mode" [t| DiskMode |]
+  , optionalField $ simpleField "name" [t| String |]
+  , optionalField $ simpleField "spindles" [t| Int |]
+  , optionalField $ simpleField "params" [t| DiskParams |]
+  ]
+  ++ uuidFields
+  ++ serialFields
+  ++ timeStampFields)
+
+instance UuidObject Disk where
+  uuidOf = diskUuid
+
+instance ForthcomingObject Disk where
+  isForthcoming = diskForthcoming
+
+-- | Determines whether a disk or one of his children has the given logical id
+-- (determined by the volume group name and by the logical volume name).
+-- This can be true only for DRBD or LVM disks.
+includesLogicalId :: LogicalVolume -> Disk -> Bool
+includesLogicalId lv disk =
+  case diskLogicalId disk of
+    Just (LIDPlain lv') -> lv' == lv
+    Just (LIDDrbd8 {}) ->
+      any (includesLogicalId lv) $ diskChildren disk
+    _ -> False
diff --git a/src/Ganeti/Objects/Instance.hs b/src/Ganeti/Objects/Instance.hs
new file mode 100644
index 0000000..238898f
--- /dev/null
+++ b/src/Ganeti/Objects/Instance.hs
@@ -0,0 +1,100 @@
+{-# LANGUAGE TemplateHaskell, FunctionalDependencies #-}
+
+{-| Implementation of the Ganeti Instance config object.
+
+-}
+
+{-
+
+Copyright (C) 2014 Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-}
+
+module Ganeti.Objects.Instance where
+
+import Data.Monoid
+
+import Ganeti.JSON (emptyContainer)
+import Ganeti.Objects.Nic
+import Ganeti.THH
+import Ganeti.THH.Field
+import Ganeti.Types
+import Ganeti.Utils (parseUnitAssumeBinary)
+
+$(buildParam "Be" "bep"
+  [ specialNumericalField 'parseUnitAssumeBinary
+      $ simpleField "minmem"      [t| Int  |]
+  , specialNumericalField 'parseUnitAssumeBinary
+      $ simpleField "maxmem"      [t| Int  |]
+  , simpleField "vcpus"           [t| Int  |]
+  , simpleField "auto_balance"    [t| Bool |]
+  , simpleField "always_failover" [t| Bool |]
+  , simpleField "spindle_use"     [t| Int  |]
+  ])
+
+$(buildObjectWithForthcoming "Instance" "inst" $
+  [ simpleField "name"             [t| String             |]
+  , simpleField "primary_node"     [t| String             |]
+  , simpleField "os"               [t| String             |]
+  , simpleField "hypervisor"       [t| Hypervisor         |]
+  , defaultField [| emptyContainer |]
+      $ simpleField "hvparams"     [t| HvParams           |]
+  , defaultField [| mempty |]
+      $ simpleField "beparams"     [t| PartialBeParams    |]
+  , defaultField [| emptyContainer |]
+      $ simpleField "osparams"     [t| OsParams           |]
+  , defaultField [| emptyContainer |]
+      $ simpleField "osparams_private" [t| OsParamsPrivate |]
+  , simpleField "admin_state"      [t| AdminState         |]
+  , simpleField "admin_state_source" [t| AdminStateSource   |]
+  , defaultField [| [] |]
+      $ simpleField "nics"         [t| [PartialNic]       |]
+  , defaultField [| [] |]
+      $ simpleField "disks"        [t| [String]           |]
+  , simpleField "disks_active"     [t| Bool               |]
+  , optionalField $ simpleField "network_port" [t| Int  |]
+  ]
+  ++ timeStampFields
+  ++ uuidFields
+  ++ serialFields
+  ++ tagsFields)
+
+instance TimeStampObject Instance where
+  cTimeOf = instCtime
+  mTimeOf = instMtime
+
+instance UuidObject Instance where
+  uuidOf = instUuid
+
+instance SerialNoObject Instance where
+  serialOf = instSerial
+
+instance TagsObject Instance where
+  tagsOf = instTags
+
+instance ForthcomingObject Instance where
+  isForthcoming = instForthcoming
diff --git a/src/Ganeti/Objects/Nic.hs b/src/Ganeti/Objects/Nic.hs
new file mode 100644
index 0000000..1c6f9bb
--- /dev/null
+++ b/src/Ganeti/Objects/Nic.hs
@@ -0,0 +1,59 @@
+{-# LANGUAGE TemplateHaskell, FunctionalDependencies #-}
+
+{-| Implementation of the Ganeti Instance config object.
+
+-}
+
+{-
+
+Copyright (C) 2014 Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-}
+
+module Ganeti.Objects.Nic where
+
+import Ganeti.THH
+import Ganeti.THH.Field
+import Ganeti.Types
+
+$(buildParam "Nic" "nicp"
+  [ simpleField "mode" [t| NICMode |]
+  , simpleField "link" [t| String  |]
+  , simpleField "vlan" [t| String |]
+  ])
+
+$(buildObject "PartialNic" "nic" $
+  [ simpleField "mac" [t| String |]
+  , optionalField $ simpleField "ip" [t| String |]
+  , simpleField "nicparams" [t| PartialNicParams |]
+  , optionalField $ simpleField "network" [t| String |]
+  , optionalField $ simpleField "name" [t| String |]
+  ] ++ uuidFields)
+
+instance UuidObject PartialNic where
+  uuidOf = nicUuid
+
diff --git a/src/Ganeti/OpCodes.hs b/src/Ganeti/OpCodes.hs
index d318829..37b645e 100644
--- a/src/Ganeti/OpCodes.hs
+++ b/src/Ganeti/OpCodes.hs
@@ -437,6 +437,7 @@
      , pRemoteNodeUuid
      , pIallocator
      , pEvacMode
+     , pIgnoreSoftErrors
      ],
      "node_name")
   , ("OpInstanceCreate",
@@ -480,6 +481,8 @@
      , pSrcPath
      , pBackupCompress
      , pStartInstance
+     , pForthcoming
+     , pCommit
      , pInstTags
      , pInstanceCommunication
      , pHelperStartupTimeout
@@ -628,7 +631,7 @@
      ],
      "instance_name")
   , ("OpInstanceActivateDisks",
-     [t| [(NonEmptyString, NonEmptyString, NonEmptyString)] |],
+     [t| [(NonEmptyString, NonEmptyString, Maybe NonEmptyString)] |],
      OpDoc.opInstanceActivateDisks,
      [ pInstanceName
      , pInstanceUuid
@@ -816,6 +819,7 @@
      , pZeroFreeSpace
      , pZeroingTimeoutFixed
      , pZeroingTimeoutPerMiB
+     , pLongSleep
      ],
      "instance_name")
   , ("OpBackupRemove",
@@ -881,7 +885,7 @@
      , pIAllocatorMemory
      , pIAllocatorVCpus
      , pIAllocatorOs
-     , pDiskTemplate
+     , pOptDiskTemplate
      , pIAllocatorInstances
      , pIAllocatorEvacMode
      , pTargetGroups
diff --git a/src/Ganeti/OpParams.hs b/src/Ganeti/OpParams.hs
index ee5aa27..79d476e 100644
--- a/src/Ganeti/OpParams.hs
+++ b/src/Ganeti/OpParams.hs
@@ -190,6 +190,7 @@
   , pRemoteNode
   , pRemoteNodeUuid
   , pEvacMode
+  , pIgnoreSoftErrors
   , pInstCreateMode
   , pNoInstall
   , pInstOs
@@ -205,6 +206,8 @@
   , pSrcNodeUuid
   , pSrcPath
   , pStartInstance
+  , pForthcoming
+  , pCommit
   , pInstTags
   , pMultiAllocInstances
   , pTempOsParams
@@ -299,6 +302,7 @@
   , pSshKeys
   , pNodeSetup
   , pVerifyClutter
+  , pLongSleep
   ) where
 
 import Control.Monad (liftM, mplus)
@@ -1085,6 +1089,12 @@
   withDoc "Node evacuation mode" .
   renameField "EvacMode" $ simpleField "mode" [t| EvacMode |]
 
+pIgnoreSoftErrors :: Field
+pIgnoreSoftErrors =
+  withDoc "Ignore soft htools errors" .
+  optionalField $
+  booleanField "ignore_soft_errors"
+
 pInstanceName :: Field
 pInstanceName =
   withDoc "A required instance name (for single-instance LUs)" $
@@ -1281,6 +1291,16 @@
   withDoc "Whether to start instance after creation" $
   defaultTrue "start"
 
+pForthcoming :: Field
+pForthcoming =
+  withDoc "Whether to only reserve resources" $
+  defaultFalse "forthcoming"
+
+pCommit :: Field
+pCommit =
+  withDoc "Commit the already reserved instance" $
+  defaultFalse "commit"
+
 -- FIXME: unify/simplify with pTags, once that migrates to NonEmpty String"
 pInstTags :: Field
 pInstTags =
@@ -1892,3 +1912,9 @@
   withDoc "Whether to check for clutter in the 'authorized_keys' file." .
   defaultField [| False |] $
   simpleField "verify_clutter" [t| Bool |]
+
+pLongSleep :: Field
+pLongSleep =
+  withDoc "Whether to allow long instance shutdowns during exports" .
+  defaultField [| False |] $
+  simpleField "long_sleep" [t| Bool |]
\ No newline at end of file
diff --git a/src/Ganeti/PartialParams.hs b/src/Ganeti/PartialParams.hs
new file mode 100644
index 0000000..3c79810
--- /dev/null
+++ b/src/Ganeti/PartialParams.hs
@@ -0,0 +1,72 @@
+{-# LANGUAGE FunctionalDependencies #-}
+
+{-| Common functions for partial parameters -}
+
+{-
+
+Copyright (C) 2012 Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-}
+
+module Ganeti.PartialParams
+  ( PartialParams(..)
+  , isComplete
+  ) where
+
+import Data.Maybe (isJust)
+
+-- | Represents that data type @p@ provides partial values for
+-- data type @f@.
+--
+-- Note: To avoid needless type annotations, the functional dependencies
+-- currently include @f -> p@. However, in theory it'd be possible for one
+-- filled data type to have several partially filled ones.
+--
+-- Laws:
+--
+-- 1. @fillParams (fillParams f p) p = fillParams f p@.
+-- 2. @fillParams _ (toPartial x) = x@.
+-- 3. @toFilled (toPartial x) = Just x@.
+--
+-- If @p@ is also a 'Monoid' (or just 'Semigroup'), 'fillParams' is a monoid
+-- (semigroup) action on @f@, therefore it should additionally satisfy:
+--
+-- - @fillParams f mempty = f@
+-- - @fillParams f (p1 <> p2) = fillParams (fillParams f p1) p2@
+class PartialParams f p | p -> f, f -> p where
+  -- | Fill @f@ with any data that are set in @p@.
+  -- Leave other parts of @f@ unchanged.
+  fillParams :: f -> p -> f
+  -- | Fill all fields of @p@ from @f@.
+  toPartial :: f -> p
+  -- | If all fields of @p@ are filled, convert it into @f@.
+  toFilled :: p -> Maybe f
+
+-- | Returns 'True' if a given partial parameters are complete.
+-- See 'toFilled'.
+isComplete :: (PartialParams f p) => p -> Bool
+isComplete = isJust . toFilled
diff --git a/src/Ganeti/Query/Common.hs b/src/Ganeti/Query/Common.hs
index 62bd96f..6f8e126 100644
--- a/src/Ganeti/Query/Common.hs
+++ b/src/Ganeti/Query/Common.hs
@@ -47,6 +47,7 @@
   , timeStampFields
   , uuidFields
   , serialFields
+  , forthcomingFields
   , tagsFields
   , dictFieldGetter
   , buildNdParamField
@@ -161,6 +162,15 @@
      FieldSimple (rsNormal . TimeAsDoubleJSON . mTimeOf), QffNormal)
   ]
 
+-- | The list of the field for the property of being forthcoming.
+forthcomingFields :: (ForthcomingObject a) => String -> FieldList a b
+forthcomingFields name =
+  [ ( FieldDefinition "forthcoming" "Forthcoming" QFTBool
+      $ "whether the " ++ name ++ " is forthcoming"
+    , FieldSimple (rsNormal . isForthcoming), QffNormal
+    )
+  ]
+
 -- | The list of UUID fields.
 uuidFields :: (UuidObject a) => String -> FieldList a b
 uuidFields name =
diff --git a/src/Ganeti/Query/Group.hs b/src/Ganeti/Query/Group.hs
index a224aff..c15906c 100644
--- a/src/Ganeti/Query/Group.hs
+++ b/src/Ganeti/Query/Group.hs
@@ -35,6 +35,8 @@
 module Ganeti.Query.Group
   (fieldsMap) where
 
+import Data.Maybe (mapMaybe)
+
 import Ganeti.Config
 import Ganeti.Objects
 import Ganeti.Query.Language
@@ -79,7 +81,7 @@
      QffNormal)
   , (FieldDefinition "pinst_list" "InstanceList" QFTOther
        "List of primary instances",
-     FieldConfig (\cfg -> rsNormal . niceSort . map instName . fst .
+     FieldConfig (\cfg -> rsNormal . niceSort . mapMaybe instName . fst .
                           getGroupInstances cfg . groupUuid), QffNormal)
   ] ++
   map buildNdParamField allNDParamFields ++
diff --git a/src/Ganeti/Query/Instance.hs b/src/Ganeti/Query/Instance.hs
index 132b31b..328bb9b 100644
--- a/src/Ganeti/Query/Instance.hs
+++ b/src/Ganeti/Query/Instance.hs
@@ -42,6 +42,7 @@
   ) where
 
 import Control.Applicative
+import Control.Monad (liftM, (>=>))
 import Data.Either
 import Data.List
 import Data.Maybe
@@ -108,32 +109,32 @@
   -- Simple fields
   [ (FieldDefinition "admin_state" "InstanceState" QFTText
      "Desired state of instance",
-     FieldSimple (rsNormal . adminStateToRaw . instAdminState), QffNormal)
+     FieldSimple (rsMaybeNoData . liftM adminStateToRaw . instAdminState),
+     QffNormal)
   , (FieldDefinition "admin_state_source" "InstanceStateSource" QFTText
      "Who last changed the desired state of the instance",
-     FieldSimple (rsNormal . adminStateSourceToRaw . instAdminStateSource),
+     FieldSimple (rsMaybeNoData . liftM adminStateSourceToRaw
+                  . instAdminStateSource),
      QffNormal)
   , (FieldDefinition "admin_up" "Autostart" QFTBool
      "Desired state of instance",
-     FieldSimple (rsNormal . (== AdminUp) . instAdminState), QffNormal)
-  , (FieldDefinition "disk_template" "Disk_template" QFTText
-     "Instance disk template",
-     FieldSimple (rsNormal . instDiskTemplate), QffNormal)
+     FieldSimple (rsMaybeNoData . liftM (== AdminUp) . instAdminState),
+     QffNormal)
   , (FieldDefinition "disks_active" "DisksActive" QFTBool
      "Desired state of instance disks",
-     FieldSimple (rsNormal . instDisksActive), QffNormal)
+     FieldSimple (rsMaybeNoData . instDisksActive), QffNormal)
   , (FieldDefinition "name" "Instance" QFTText
      "Instance name",
-     FieldSimple (rsNormal . instName), QffHostname)
+     FieldSimple (rsMaybeNoData . instName), QffHostname)
   , (FieldDefinition "hypervisor" "Hypervisor" QFTText
      "Hypervisor name",
-     FieldSimple (rsNormal . instHypervisor), QffNormal)
+     FieldSimple (rsMaybeNoData . instHypervisor), QffNormal)
   , (FieldDefinition "network_port" "Network_port" QFTOther
      "Instance network port if available (e.g. for VNC console)",
      FieldSimple (rsMaybeUnavail . instNetworkPort), QffNormal)
   , (FieldDefinition "os" "OS" QFTText
      "Operating system",
-     FieldSimple (rsNormal . instOs), QffNormal)
+     FieldSimple (rsMaybeNoData . instOs), QffNormal)
   , (FieldDefinition "pnode" "Primary_node" QFTText
      "Primary node",
      FieldConfig getPrimaryNodeName, QffHostname)
@@ -186,7 +187,7 @@
   map (buildHvParamField hvParamGetter)
       (C.toList C.hvsParameters \\ C.toList C.hvcGlobals) ++
 
-  -- Aggregate disk parameter fields
+  -- disk parameter fields
   [ (FieldDefinition "disk_usage" "DiskUsage" QFTUnit
      "Total disk space used by instance on each of its nodes; this is not the\
      \ disk size visible to the instance, but the usage on the node",
@@ -195,8 +196,7 @@
      "Number of disks",
      FieldSimple (rsNormal . length . instDisks), QffNormal)
   , (FieldDefinition "disk.sizes" "Disk_sizes" QFTOther
-     "List of disk sizes",
-     FieldConfig getDiskSizes, QffNormal)
+     "List of disk sizes", FieldConfig getDiskSizes, QffNormal)
   , (FieldDefinition "disk.spindles" "Disk_spindles" QFTOther
      "List of disk spindles",
      FieldConfig getDiskSpindles, QffNormal)
@@ -206,16 +206,22 @@
   , (FieldDefinition "disk.uuids" "Disk_UUIDs" QFTOther
      "List of disk UUIDs",
      FieldConfig getDiskUuids, QffNormal)
+    -- For pre-2.14 backwards compatibility
+  , (FieldDefinition "disk_template" "Disk_template" QFTText
+     "Instance disk template",
+     FieldConfig getDiskTemplate, QffNormal)
   ] ++
 
   -- Per-disk parameter fields
   instantiateIndexedFields C.maxDisks
   [ (fieldDefinitionCompleter "disk.size/%d" "Disk/%d" QFTUnit
     "Disk size of %s disk",
-    getIndexedConfField getInstDisksFromObj diskSize, QffNormal)
+    getIndexedOptionalConfField getInstDisksFromObj diskSize,
+    QffNormal)
   , (fieldDefinitionCompleter "disk.spindles/%d" "DiskSpindles/%d" QFTNumber
     "Spindles of %s disk",
-    getIndexedOptionalConfField getInstDisksFromObj diskSpindles, QffNormal)
+    getIndexedOptionalConfField getInstDisksFromObj diskSpindles,
+    QffNormal)
   , (fieldDefinitionCompleter "disk.name/%d" "DiskName/%d" QFTText
     "Name of %s disk",
     getIndexedOptionalConfField getInstDisksFromObj diskName, QffNormal)
@@ -262,8 +268,7 @@
      (nicAggDescPrefix ++ "link"),
      FieldConfig (\cfg -> rsNormal . map
        (nicpLink . fillNicParamsFromConfig cfg . nicNicparams)
-       . instNics),
-     QffNormal)
+       . instNics), QffNormal)
   , (FieldDefinition "nic.networks" "NIC_networks" QFTOther
      "List containing each interface's network",
      FieldSimple (rsNormal . map (MaybeForJSON . nicNetwork) . instNics),
@@ -330,6 +335,7 @@
   timeStampFields ++
   serialFields "Instance" ++
   uuidFields "Instance" ++
+  forthcomingFields "Instance" ++
   tagsFields
 
 -- * Helper functions for node property retrieval
@@ -360,7 +366,7 @@
 
 -- | Fill partial NIC params by using the defaults from the configuration.
 fillNicParamsFromConfig :: ConfigData -> PartialNicParams -> FilledNicParams
-fillNicParamsFromConfig cfg = fillNicParams (getDefaultNicParams cfg)
+fillNicParamsFromConfig cfg = fillParams (getDefaultNicParams cfg)
 
 -- | Retrieves the default network interface parameters.
 getDefaultNicParams :: ConfigData -> FilledNicParams
@@ -372,20 +378,23 @@
 -- visible to the instance.
 getDiskSizeRequirements :: ConfigData -> Instance -> ResultEntry
 getDiskSizeRequirements cfg inst =
-  rsErrorNoData . liftA (sum . map getSizes) . getInstDisksFromObj cfg $ inst
+  rsErrorNoData . liftA (sum . map getSize) . getInstDisksFromObj cfg $ inst
  where
-  getSizes :: Disk -> Int
-  getSizes disk =
-    case instDiskTemplate inst of
-      DTDrbd8 -> diskSize disk + C.drbdMetaSize
-      DTDiskless -> 0
-      DTBlock    -> 0
-      _          -> diskSize disk
+  diskType x = lidDiskType <$> diskLogicalId x
+  getSize :: Disk -> Int
+  getSize disk =
+    let dt = diskType disk
+    in case dt of
+         Just DTDrbd8    -> fromMaybe 0 (diskSize disk) + C.drbdMetaSize
+         Just DTDiskless -> 0
+         Just DTBlock    -> 0
+         _               -> fromMaybe 0 (diskSize disk)
 
 -- | Get a list of disk sizes for an instance
 getDiskSizes :: ConfigData -> Instance -> ResultEntry
 getDiskSizes cfg =
-  rsErrorNoData . liftA (map diskSize) . getInstDisksFromObj cfg
+  rsErrorNoData . liftA (map $ MaybeForJSON . diskSize)
+  . getInstDisksFromObj cfg
 
 -- | Get a list of disk spindles
 getDiskSpindles :: ConfigData -> Instance -> ResultEntry
@@ -454,7 +463,7 @@
                            -> FieldGetter Instance Runtime
 getOptionalIndexedNicField =
   getIndexedFieldWithDefault
-    (map nicNicparams . instNics) (\x _ -> getDefaultNicParams x) fillNicParams
+    (map nicNicparams . instNics) (\x _ -> getDefaultNicParams x) fillParams
 
 -- | Creates a function which produces a 'FieldGetter' when fed an index. Works
 -- for fields that should be filled out through the use of a default.
@@ -550,7 +559,9 @@
 
 -- | Helper function for primary node retrieval
 getPrimaryNode :: ConfigData -> Instance -> ErrorResult Node
-getPrimaryNode cfg = getInstPrimaryNode cfg . instName
+getPrimaryNode cfg = maybe (Bad $ ParameterError "no primary node") return
+                       . instName
+                     >=> getInstPrimaryNode cfg
 
 -- | Get primary node hostname
 getPrimaryNodeName :: ConfigData -> Instance -> ResultEntry
@@ -577,7 +588,8 @@
 getSecondaryNodes :: ConfigData -> Instance -> ErrorResult [Node]
 getSecondaryNodes cfg inst = do
   pNode <- getPrimaryNode cfg inst
-  allNodes <- getInstAllNodes cfg $ instName inst
+  iname <- maybe (Bad $ ParameterError "no name") return $ instName inst
+  allNodes <- getInstAllNodes cfg iname
   return $ delete pNode allNodes
 
 -- | Get attributes of the secondary nodes
@@ -688,7 +700,9 @@
 -- | Checks if the primary node of an instance is offline
 isPrimaryOffline :: ConfigData -> Instance -> Bool
 isPrimaryOffline cfg inst =
-  let pNodeResult = getNode cfg $ instPrimaryNode inst
+  let pNodeResult = maybe (Bad $ ParameterError "no primary node") return
+                          (instPrimaryNode inst)
+                    >>= getNode cfg
   in case pNodeResult of
      Ok pNode -> nodeOffline pNode
      Bad    _ -> error "Programmer error - result assumed to be OK is Bad!"
@@ -707,11 +721,11 @@
   | otherwise =
     case instanceState of
       InstanceStateRunning
-        | adminState == AdminUp -> Running
+        | adminState == Just AdminUp -> Running
         | otherwise -> ErrorUp
       InstanceStateShutdown
-        | adminState == AdminUp && allowDown -> UserDown
-        | adminState == AdminUp -> ErrorDown
+        | adminState == Just AdminUp && allowDown -> UserDown
+        | adminState == Just AdminUp -> ErrorDown
         | otherwise -> StatusDown
   where adminState = instAdminState inst
         instanceState = instInfoState instInfo
@@ -721,7 +735,7 @@
 
         allowDown =
           userShutdownEnabled cfg &&
-          (instHypervisor inst /= Kvm ||
+          (instHypervisor inst /= Just Kvm ||
            (Map.member C.hvKvmUserShutdown hvparams &&
             hvparams Map.! C.hvKvmUserShutdown == J.JSBool True))
 
@@ -729,11 +743,12 @@
 deadInstanceStatus :: ConfigData -> Instance -> InstanceStatus
 deadInstanceStatus cfg inst =
   case instAdminState inst of
-    AdminUp -> ErrorDown
-    AdminDown | wasCleanedUp && userShutdownEnabled cfg -> UserDown
-              | otherwise -> StatusDown
-    AdminOffline -> StatusOffline
-  where wasCleanedUp = instAdminStateSource inst == UserSource
+    Just AdminUp -> ErrorDown
+    Just AdminDown | wasCleanedUp && userShutdownEnabled cfg -> UserDown
+                   | otherwise -> StatusDown
+    Just AdminOffline -> StatusOffline
+    Nothing -> StatusDown
+  where wasCleanedUp = instAdminStateSource inst == Just UserSource
 
 -- | Determines the status of the instance, depending on whether it is possible
 -- to communicate with its primary node, on which node it is, and its
@@ -793,7 +808,7 @@
     Left  _err    -> Nothing
     Right allInfo ->
       let instances = rpcResAllInstInfoInstances allInfo
-          maybeMatch = pickPairUnique (instName inst) instances
+          maybeMatch = instName inst >>= (`pickPairUnique` instances)
       in snd <$> maybeMatch
 
 -- | Retrieves the instance information if it is present anywhere in the all
@@ -803,21 +818,24 @@
                 -> Instance
                 -> ERpcError (Maybe (InstanceInfo, Bool))
 getInstanceInfo uuidList inst =
-  let pNodeUuid = instPrimaryNode inst
-      primarySearchResult =
-        pickPairUnique pNodeUuid uuidList >>= findInfoInNodeResult inst . snd
-  in case primarySearchResult of
-       Just instInfo -> Right . Just $ (instInfo, True)
-       Nothing       ->
-         let allSearchResult =
-               getFirst . mconcat $ map
-               (First . findInfoInNodeResult inst . snd) uuidList
-         in case allSearchResult of
-              Just instInfo -> Right . Just $ (instInfo, False)
-              Nothing       ->
-                case checkForNodeError uuidList pNodeUuid of
-                  Just err -> Left err
-                  Nothing  -> Right Nothing
+  case instPrimaryNode inst of
+    Nothing -> Right Nothing
+    Just pNodeUuid ->
+      let primarySearchResult =
+            pickPairUnique pNodeUuid uuidList >>= findInfoInNodeResult inst
+                                                    . snd
+      in case primarySearchResult of
+           Just instInfo -> Right . Just $ (instInfo, True)
+           Nothing       ->
+             let allSearchResult =
+                   getFirst . mconcat $ map
+                   (First . findInfoInNodeResult inst . snd) uuidList
+             in case allSearchResult of
+                  Just instInfo -> Right . Just $ (instInfo, False)
+                  Nothing       ->
+                    case checkForNodeError uuidList pNodeUuid of
+                      Just err -> Left err
+                      Nothing  -> Right Nothing
 
 -- | Retrieves the console information if present anywhere in the given results
 getConsoleInfo :: [(String, ERpcError RpcResultInstanceConsoleInfo)]
@@ -826,7 +844,7 @@
 getConsoleInfo uuidList inst =
   let allValidResults = concatMap rpcResInstConsInfoInstancesInfo .
                         rights . map snd $ uuidList
-  in snd <$> pickPairUnique (instName inst) allValidResults
+  in snd <$> (instName inst >>= flip pickPairUnique allValidResults)
 
 -- | Extracts all the live information that can be extracted.
 extractLiveInfo :: [(Node, ERpcError RpcResultAllInstancesInfo)]
@@ -871,15 +889,18 @@
             [] -> error "Programmer error: group must have one or more members"
             paramGroup@(y:_) ->
               let node = instConsInfoParamsNode y
-                  packer z = (instName $ instConsInfoParamsInstance z, z)
-              in (node, RpcCallInstanceConsoleInfo . map packer $ paramGroup)
+                  packer z = do
+                              name <- instName $ instConsInfoParamsInstance z
+                              return (name, z)
+              in (node, RpcCallInstanceConsoleInfo . mapMaybe packer
+                          $ paramGroup)
          ) groupedParams
 
 -- | Retrieves a list of all the hypervisors and params used by the given
 -- instances.
 getHypervisorSpecs :: ConfigData -> [Instance] -> [(Hypervisor, HvParams)]
 getHypervisorSpecs cfg instances =
-  let hvs = nub . map instHypervisor $ instances
+  let hvs = nub . mapMaybe instHypervisor $ instances
       hvParamMap = (fromContainer . clusterHvparams . configCluster $ cfg)
   in zip hvs . map ((Map.!) hvParamMap) $ hvs
 
@@ -894,8 +915,11 @@
                             RpcResultError $ "Live data disabled"
   | otherwise = do
       let hvSpecs = getHypervisorSpecs cfg instances
-          instanceNodes = nub . justOk $
-                            map (getNode cfg . instPrimaryNode) instances
+          instanceNodes =
+            nub . justOk
+                $ map ( maybe (Bad $ ParameterError "no primary node") return
+                       . instPrimaryNode
+                       >=> getNode cfg) instances
           goodNodes = nodesWithValidConfig cfg instanceNodes
       instInfoRes <- executeRpcCall goodNodes (RpcCallAllInstancesInfo hvSpecs)
       consInfoRes <-
@@ -907,3 +931,18 @@
           else return [] -- The information is not necessary
       return . zip instances .
         map (extractLiveInfo instInfoRes consInfoRes) $ instances
+
+-- | An aggregate disk attribute for backward compatibility.
+getDiskTemplate :: ConfigData -> Instance -> ResultEntry
+getDiskTemplate cfg inst =
+  let disks = getInstDisksFromObj cfg inst
+      getDt x = lidDiskType <$> diskLogicalId x
+      disk_types :: ErrorResult [DiskTemplate]
+      disk_types = nub <$> catMaybes <$> map getDt <$> disks
+      mix :: [DiskTemplate] -> J.JSValue
+      mix []  = J.showJSON C.dtDiskless
+      mix [t] = J.showJSON t
+      mix _   = J.showJSON C.dtMixed
+  in case mix <$> disk_types of
+       Ok t -> rsNormal t
+       Bad _ -> rsNoData
diff --git a/src/Ganeti/Query/Language.hs b/src/Ganeti/Query/Language.hs
index aac2d06..882a9da 100644
--- a/src/Ganeti/Query/Language.hs
+++ b/src/Ganeti/Query/Language.hs
@@ -73,7 +73,7 @@
 import Text.JSON.Pretty (pp_value)
 import Text.JSON.Types
 import Text.JSON
-#ifndef NO_REGEX_PCRE
+#ifdef VERSION_regex_pcre
 import qualified Text.Regex.PCRE as PCRE
 #endif
 
@@ -168,7 +168,7 @@
 -- * Sub data types for query2 queries and responses.
 
 -- | Internal type of a regex expression (not exported).
-#ifndef NO_REGEX_PCRE
+#ifdef VERSION_regex_pcre
 type RegexType = PCRE.Regex
 #else
 type RegexType = ()
@@ -339,7 +339,7 @@
 -- regular expression on the initialisation of the data structure;
 -- this might fail, if the RE is not well-formed.
 mkRegex :: (Monad m) => String -> m FilterRegex
-#ifndef NO_REGEX_PCRE
+#ifdef VERSION_regex_pcre
 mkRegex str = do
   compiled <- case PCRE.getVersion of
                 Nothing -> fail $ "regex-pcre library compiled without" ++
diff --git a/src/Ganeti/Query/Network.hs b/src/Ganeti/Query/Network.hs
index 68dc925..46b6a61 100644
--- a/src/Ganeti/Query/Network.hs
+++ b/src/Ganeti/Query/Network.hs
@@ -148,7 +148,7 @@
 -- | Retrieves the network's instances' names.
 getInstances :: ConfigData -> String -> [String]
 getInstances cfg network_uuid =
-  map instName (filter (instIsConnected network_uuid)
+  mapMaybe instName (filter (instIsConnected network_uuid)
     ((Map.elems . fromContainer . configInstances) cfg))
 
 -- | Helper function that checks if an instance is linked to the given network.
diff --git a/src/Ganeti/Query/Node.hs b/src/Ganeti/Query/Node.hs
index c76dff8..17c3469 100644
--- a/src/Ganeti/Query/Node.hs
+++ b/src/Ganeti/Query/Node.hs
@@ -233,11 +233,11 @@
      FieldConfig (\cfg -> rsNormal . getNumInstances snd cfg), QffNormal)
   , (FieldDefinition "pinst_list" "PriInstances" QFTOther
        "List of instances with this node as primary",
-     FieldConfig (\cfg -> rsNormal . niceSort . map instName . fst .
+     FieldConfig (\cfg -> rsNormal . niceSort . mapMaybe instName . fst .
                           getNodeInstances cfg . nodeUuid), QffNormal)
   , (FieldDefinition "sinst_list" "SecInstances" QFTOther
        "List of instances with this node as secondary",
-     FieldConfig (\cfg -> rsNormal . niceSort . map instName . snd .
+     FieldConfig (\cfg -> rsNormal . niceSort . mapMaybe instName . snd .
                           getNodeInstances cfg . nodeUuid), QffNormal)
   , (FieldDefinition "role" "Role" QFTText nodeRoleDoc,
      FieldConfig ((rsNormal .) . getNodeRole), QffNormal)
diff --git a/src/Ganeti/Query/Query.hs b/src/Ganeti/Query/Query.hs
index 51ab108..31aff65 100644
--- a/src/Ganeti/Query/Query.hs
+++ b/src/Ganeti/Query/Query.hs
@@ -317,7 +317,8 @@
 
 queryInner cfg live (Query (ItemTypeOpCode QRInstance) fields qfilter) wanted =
   genericQuery Instance.fieldsMap (CollectorFieldAware Instance.collectLiveData)
-               instName configInstances getInstance cfg live fields qfilter
+               (fromMaybe "" . instName) configInstances getInstance cfg live
+               fields qfilter
                wanted
 
 queryInner cfg live (Query (ItemTypeOpCode QRGroup) fields qfilter) wanted =
diff --git a/src/Ganeti/THH.hs b/src/Ganeti/THH.hs
index e716aca..91f4c53 100644
--- a/src/Ganeti/THH.hs
+++ b/src/Ganeti/THH.hs
@@ -62,19 +62,24 @@
                   , withDoc
                   , defaultField
                   , notSerializeDefaultField
+                  , presentInForthcoming
                   , optionalField
                   , optionalNullSerField
+                  , makeOptional
                   , renameField
                   , customField
                   , buildObject
+                  , buildObjectWithForthcoming
                   , buildObjectSerialisation
                   , buildParam
                   , genException
                   , excErrMsg
                   ) where
 
-import Control.Arrow ((&&&))
+import Control.Arrow ((&&&), second)
 import Control.Applicative
+import Control.Lens.Type (Lens')
+import Control.Lens (lens, set, element)
 import Control.Monad
 import Control.Monad.Base () -- Needed to prevent spurious GHC linking errors.
 import Control.Monad.Writer (tell)
@@ -87,6 +92,7 @@
 import Data.List
 import Data.Maybe
 import qualified Data.Map as M
+import Data.Monoid
 import qualified Data.Set as S
 import Language.Haskell.TH
 import Language.Haskell.TH.Syntax (lift)
@@ -95,6 +101,7 @@
 import Text.JSON.Pretty (pp_value)
 
 import Ganeti.JSON
+import Ganeti.PartialParams
 import Ganeti.PyValue
 import Ganeti.THH.PyType
 
@@ -185,6 +192,7 @@
                      -- ^ determines if a field is optional, and if yes,
                      -- how
                    , fieldDoc         :: String
+                   , fieldPresentInForthcoming :: Bool
                    }
 
 -- | Generates a simple field.
@@ -200,6 +208,7 @@
         , fieldConstr      = Nothing
         , fieldIsOptional  = NotOptional
         , fieldDoc         = ""
+        , fieldPresentInForthcoming = False
         }
 
 -- | Generate an AndRestArguments catch-all field.
@@ -215,6 +224,7 @@
         , fieldConstr      = Nothing
         , fieldIsOptional  = AndRestArguments
         , fieldDoc         = ""
+        , fieldPresentInForthcoming = True
         }
 
 withDoc :: String -> Field -> Field
@@ -237,6 +247,10 @@
   field { fieldDefault = Just defval
         , fieldSerializeDefault = False }
 
+-- | Mark a field as present in the forthcoming variant.
+presentInForthcoming :: Field -> Field
+presentInForthcoming field = field { fieldPresentInForthcoming = True }
+
 -- | Marks a field optional (turning its base type into a Maybe).
 optionalField :: Field -> Field
 optionalField field = field { fieldIsOptional = OptionalOmitNull }
@@ -246,6 +260,15 @@
 optionalNullSerField :: Field -> Field
 optionalNullSerField field = field { fieldIsOptional = OptionalSerializeNull }
 
+-- | Make a field optional, if it isn't already.
+makeOptional :: Field -> Field
+makeOptional field = if  and [ fieldIsOptional field == NotOptional
+                             , isNothing $ fieldDefault field
+                             , not $ fieldPresentInForthcoming field
+                             ]
+                        then optionalField field
+                        else field
+
 -- | Sets custom functions on a field.
 customField :: Name      -- ^ The name of the read function
             -> Name      -- ^ The name of the show function
@@ -364,10 +387,6 @@
 ensureUpper [] = []
 ensureUpper (x:xs) = toUpper x:xs
 
--- | Helper for quoted expressions.
-varNameE :: String -> Q Exp
-varNameE = varE . mkName
-
 -- | fromObj (Ganeti specific) as an expression, for reuse.
 fromObjE :: Q Exp
 fromObjE = varE 'fromObj
@@ -392,7 +411,7 @@
 appConsApp :: Name -> [Exp] -> Exp
 appConsApp cname =
   foldl (\accu e -> InfixE (Just accu) (VarE '(<*>)) (Just e))
-          (AppE (VarE 'pure) (ConE cname))
+        (AppE (VarE 'pure) (ConE cname))
 
 -- | Builds a field for a normal constructor.
 buildConsField :: Q Type -> StrictTypeQ
@@ -505,7 +524,7 @@
   let name = mkName sname
       ddecl = strADTDecl name (map fst cons)
       -- process cons in the format expected by genToRaw
-      cons' = map (\(a, b) -> (a, fn b)) cons
+      cons' = map (second fn) cons
   toraw <- genToRaw traw (toRawName sname) name cons'
   fromraw <- genFromRaw traw (fromRawName sname) name cons'
   return $ ddecl:toraw ++ fromraw
@@ -946,6 +965,158 @@
   ser_decls <- buildObjectSerialisation sname fields
   return $ declD:ser_decls
 
+-- | Build an accessor function for a field of an object
+-- that can have a forthcoming variant.
+buildAccessor :: Name -- ^ name of the forthcoming constructor
+              -> String -- ^ prefix for the forthcoming field
+              -> Name -- ^ name of the real constructor
+              -> String -- ^ prefix for the real field
+              -> Name -- ^ name of the generated accessor
+              -> String -- ^ prefix of the generated accessor
+              -> Field -- ^ field description
+              -> Q [Dec]
+buildAccessor fnm fpfx rnm rpfx nm pfx field = do
+  let optField = makeOptional field
+  x <- newName "x"
+  (rpfx_name, _, _) <- fieldTypeInfo rpfx field
+  (fpfx_name, _, ftype) <- fieldTypeInfo fpfx optField
+  (pfx_name, _, _) <- fieldTypeInfo pfx field
+  let r_body_core = AppE (VarE rpfx_name) $ VarE x
+      r_body = if fieldIsOptional field == fieldIsOptional optField
+                 then r_body_core
+                 else AppE (VarE 'return) r_body_core
+      f_body = AppE (VarE fpfx_name) $ VarE x
+  return $ [ SigD pfx_name $ ArrowT `AppT` ConT nm `AppT` ftype
+           , FunD pfx_name
+             [ Clause [ConP rnm [VarP x]] (NormalB r_body) []
+             , Clause [ConP fnm [VarP x]] (NormalB f_body) []
+             ]]
+
+-- | Build lense declartions for a field, if the type of the field
+-- is the same in the forthcoming and the real variant.
+buildLens :: (Name, Name) -- ^ names of the forthcoming constructors
+          -> (Name, Name) -- ^ names of the real constructors
+          -> Name -- ^ name of the type
+          -> String -- ^ the field prefix
+          -> Int -- ^ arity
+          -> (Field, Int) -- ^ the Field to generate the lens for, and its
+                          -- position
+          -> Q [Dec]
+buildLens (fnm, fdnm) (rnm, rdnm) nm pfx ar (field, i) = do
+  let optField = makeOptional field
+  if fieldIsOptional field /= fieldIsOptional optField
+     then return []
+     else do
+       let lensnm = mkName $ pfx ++ fieldRecordName  field ++ "L"
+       (accnm, _, ftype) <- fieldTypeInfo pfx field
+       vars <- replicateM ar (newName "x")
+       var <- newName "val"
+       context <- newName "val"
+       let body cn cdn = NormalB
+                           . (ConE cn `AppE`)
+                           . foldl (\e (j, x) -> AppE e . VarE
+                                                   $ if i == j then var else x)
+                             (ConE cdn)
+                          $ zip [0..] vars
+       let setterE = LamE [VarP context, VarP var] $ CaseE (VarE context)
+                        [ Match (ConP fnm [ConP fdnm . set (element i) WildP
+                                             $ map VarP vars])
+                                (body fnm fdnm) []
+                        , Match (ConP rnm [ConP rdnm . set (element i) WildP
+                                             $ map VarP vars])
+                                (body rnm rdnm) []
+                        ]
+       return [ SigD lensnm $ ConT ''Lens' `AppT` ConT nm `AppT` ftype
+              , ValD (VarP lensnm)
+                     (NormalB  $ VarE 'lens `AppE` VarE accnm `AppE` setterE) []
+              ]
+
+-- | Build an object that can have a forthcoming variant.
+-- This will create 3 data types: two objects, prefixed by
+-- "Real" and "Forthcoming", respectively, and a sum type
+-- of those. The JSON representation of the latter will
+-- be a JSON object, dispatching on the "forthcoming" key.
+buildObjectWithForthcoming ::
+  String -- ^ Name of the newly defined type
+  -> String -- ^ base prefix for field names; for the real and fortcoming
+            -- variant, with base prefix will be prefixed with "real"
+            -- and forthcoming, respectively.
+  -> [Field] -- ^ List of fields in the real version
+  -> Q [Dec]
+buildObjectWithForthcoming sname field_pfx fields = do
+  let capitalPrefix = ensureUpper field_pfx
+      forth_nm = "Forthcoming" ++ sname
+      forth_data_nm = forth_nm ++ "Data"
+      forth_pfx = "forthcoming" ++ capitalPrefix
+      real_nm =  "Real" ++ sname
+      real_data_nm = real_nm ++ "Data"
+      real_pfx = "real" ++ capitalPrefix
+  concreteDecls <- buildObject real_data_nm real_pfx fields
+  forthcomingDecls <- buildObject forth_data_nm forth_pfx
+                      (map makeOptional fields)
+  let name = mkName sname
+      real_d = NormalC (mkName real_nm)
+                 [(NotStrict, ConT (mkName real_data_nm))]
+      forth_d = NormalC (mkName forth_nm)
+                  [(NotStrict, ConT (mkName forth_data_nm))]
+      declD = DataD [] name [] [real_d, forth_d] [''Show, ''Eq]
+
+  read_body <- [| branchOnField "forthcoming"
+                  (liftM $(conE $ mkName forth_nm) . JSON.readJSON)
+                  (liftM $(conE $ mkName real_nm) . JSON.readJSON) |]
+  x <- newName "x"
+  show_real_body <- [| JSON.showJSON $(varE x) |]
+  show_forth_body <- [| addField ("forthcoming", JSON.JSBool True)
+                          $ JSON.showJSON $(varE x) |]
+  let rdjson = FunD 'JSON.readJSON [Clause [] (NormalB read_body) []]
+      shjson = FunD 'JSON.showJSON
+                 [ Clause [ConP (mkName real_nm) [VarP x]]
+                    (NormalB show_real_body) []
+                 , Clause [ConP (mkName forth_nm) [VarP x]]
+                    (NormalB show_forth_body) []
+                 ]
+      instJSONdecl = InstanceD [] (AppT (ConT ''JSON.JSON) (ConT name))
+                     [rdjson, shjson]
+  accessors <- liftM concat . flip mapM fields
+                 $ buildAccessor (mkName forth_nm) forth_pfx
+                                 (mkName real_nm) real_pfx
+                                 name field_pfx
+  lenses <- liftM concat . flip mapM (zip fields [0..])
+              $ buildLens (mkName forth_nm, mkName forth_data_nm)
+                          (mkName real_nm, mkName real_data_nm)
+                          name field_pfx (length fields)
+  xs <- newName "xs"
+  fromDictWKeysbody <- [| if ("forthcoming", JSON.JSBool True) `elem` $(varE xs)
+                            then liftM $(conE $ mkName forth_nm)
+                                   (fromDictWKeys $(varE xs))
+                            else liftM $(conE $ mkName real_nm)
+                                   (fromDictWKeys $(varE xs)) |]
+  todictx_r <- [| toDict $(varE x) |]
+  todictx_f <- [| ("forthcoming", JSON.JSBool True) : toDict $(varE x) |]
+  let todict = FunD 'toDict [ Clause [ConP (mkName real_nm) [VarP x]]
+                               (NormalB todictx_r) []
+                            , Clause [ConP (mkName forth_nm) [VarP x]]
+                               (NormalB todictx_f) []
+                            ]
+      fromdict = FunD 'fromDictWKeys [ Clause [VarP xs]
+                                       (NormalB fromDictWKeysbody) [] ]
+      instDict = InstanceD [] (AppT (ConT ''DictObject) (ConT name))
+                 [todict, fromdict]
+  instArray <- genArrayObjectInstance name
+                 (simpleField "forthcoming" [t| Bool |] : fields)
+  let forthPredName = mkName $ field_pfx ++ "Forthcoming"
+  let forthPredDecls = [ SigD forthPredName
+                           $ ArrowT `AppT` ConT name `AppT` ConT ''Bool
+                       , FunD forthPredName
+                         [ Clause [ConP (mkName real_nm) [WildP]]
+                                   (NormalB $ ConE 'False) []
+                         , Clause [ConP (mkName forth_nm) [WildP]]
+                                   (NormalB $ ConE 'True) []
+                         ]
+                       ]
+  return $ concreteDecls ++ forthcomingDecls ++ [declD, instJSONdecl]
+           ++ forthPredDecls ++ accessors ++ lenses ++ [instDict, instArray]
+
 -- | Generates an object definition: data type and its JSON instance.
 buildObjectSerialisation :: String -> [Field] -> Q [Dec]
 buildObjectSerialisation sname fields = do
@@ -1138,13 +1309,17 @@
 paramTypeNames root = ("Filled"  ++ root ++ "Params",
                        "Partial" ++ root ++ "Params")
 
+-- | Compute the name of a full and a partial parameter field.
+paramFieldNames :: String -> Field -> (Name, Name)
+paramFieldNames field_pfx fd =
+  let base = field_pfx ++ fieldRecordName fd
+   in (mkName base, mkName (base ++ "P"))
+
 -- | Compute information about the type of a parameter field.
-paramFieldTypeInfo :: String -> Field -> Q (Name, Strict, Type)
+paramFieldTypeInfo :: String -> Field -> VarStrictTypeQ
 paramFieldTypeInfo field_pfx fd = do
   t <- actualFieldType fd
-  let n = mkName . (++ "P") . (field_pfx ++) .
-          fieldRecordName $ fd
-  return (n, NotStrict, AppT (ConT ''Maybe) t)
+  return (snd $ paramFieldNames field_pfx fd, NotStrict, AppT (ConT ''Maybe) t)
 
 -- | Build a parameter declaration.
 --
@@ -1153,6 +1328,7 @@
 -- fields are optional. Due to the current record syntax issues, the
 -- fields need to be named differrently for the two structures, so the
 -- partial ones get a /P/ suffix.
+-- Also generate a default value for the partial parameters.
 buildParam :: String -> String -> [Field] -> Q [Dec]
 buildParam sname field_pfx fields = do
   let (sname_f, sname_p) = paramTypeNames sname
@@ -1216,37 +1392,56 @@
       loadexp = [| $(varE 'maybeFromObj) $objvar $objfield |]
   loadFnOpt field loadexp objvar
 
--- | Builds a simple declaration of type @n_x = fromMaybe f_x p_x@.
-buildFromMaybe :: String -> Q Dec
-buildFromMaybe fname =
-  valD (varP (mkName $ "n_" ++ fname))
-         (normalB [| $(varE 'fromMaybe)
-                        $(varNameE $ "f_" ++ fname)
-                        $(varNameE $ "p_" ++ fname) |]) []
-
 -- | Builds a function that executes the filling of partial parameter
 -- from a full copy (similar to Python's fillDict).
 fillParam :: String -> String -> [Field] -> Q [Dec]
 fillParam sname field_pfx fields = do
-  let fnames = map (\fd -> field_pfx ++ fieldRecordName fd) fields
-      (sname_f, sname_p) = paramTypeNames sname
-      oname_f = "fobj"
-      oname_p = "pobj"
+  let (sname_f, sname_p) = paramTypeNames sname
       name_f = mkName sname_f
       name_p = mkName sname_p
-      fun_name = mkName $ "fill" ++ sname ++ "Params"
-      le_full = ValD (ConP name_f (map (VarP . mkName . ("f_" ++)) fnames))
-                (NormalB . VarE . mkName $ oname_f) []
-      le_part = ValD (ConP name_p (map (VarP . mkName . ("p_" ++)) fnames))
-                (NormalB . VarE . mkName $ oname_p) []
-      obj_new = appCons name_f $ map (VarE . mkName . ("n_" ++)) fnames
-  le_new <- mapM buildFromMaybe fnames
-  funt <- [t| $(conT name_f) -> $(conT name_p) -> $(conT name_f) |]
-  let sig = SigD fun_name funt
-      fclause = Clause [VarP (mkName oname_f), VarP (mkName oname_p)]
-                (NormalB $ LetE (le_full:le_part:le_new) obj_new) []
-      fun = FunD fun_name [fclause]
-  return [sig, fun]
+  let (fnames, pnames) = unzip $ map (paramFieldNames field_pfx) fields
+  -- due to apparent bugs in some older GHC versions, we need to add these
+  -- prefixes to avoid "binding shadows ..." errors
+  fbinds <- mapM (newName . ("f_" ++) . nameBase) fnames
+  let fConP = ConP name_f (map VarP fbinds)
+  pbinds <- mapM (newName . ("p_" ++) . nameBase) pnames
+  let pConP = ConP name_p (map VarP pbinds)
+  -- PartialParams instance --------
+  -- fillParams
+  let fromMaybeExp fn pn = AppE (AppE (VarE 'fromMaybe) (VarE fn)) (VarE pn)
+      fupdates = appCons name_f $ zipWith fromMaybeExp fbinds pbinds
+      fclause = Clause [fConP, pConP] (NormalB fupdates) []
+  -- toPartial
+  let tpupdates = appCons name_p $ map (AppE (ConE 'Just) . VarE) fbinds
+      tpclause = Clause [fConP] (NormalB tpupdates) []
+  -- toFilled
+  let tfupdates = appConsApp name_f $ map VarE pbinds
+      tfclause = Clause [pConP] (NormalB tfupdates) []
+  -- the instance
+  let instType = AppT (AppT (ConT ''PartialParams) (ConT name_f)) (ConT name_p)
+  -- Monoid instance for the partial part ----
+  -- mempty
+  let memptyExp = appCons name_p $ map (const $ VarE 'empty) fields
+      memptyClause = Clause [] (NormalB memptyExp) []
+  -- mappend
+  pbinds2 <- mapM (newName . ("p2_" ++) . nameBase) pnames
+  let pConP2 = ConP name_p (map VarP pbinds2)
+  -- note the reversal of 'l' and 'r' in the call to <|>
+  -- as we want the result to be the rightmost value
+  let altExp = zipWith (\l r -> AppE (AppE (VarE '(<|>)) (VarE r)) (VarE l))
+      mappendExp = appCons name_p $ altExp pbinds pbinds2
+      mappendClause = Clause [pConP, pConP2] (NormalB mappendExp) []
+  let monoidType = AppT (ConT ''Monoid) (ConT name_p)
+  -- the instances combined
+  return [ InstanceD [] instType
+                     [ FunD 'fillParams [fclause]
+                     , FunD 'toPartial [tpclause]
+                     , FunD 'toFilled [tfclause]
+                     ]
+         , InstanceD [] monoidType
+                     [ FunD 'mempty [memptyClause]
+                     , FunD 'mappend [mappendClause]
+                     ]]
 
 -- * Template code for exceptions
 
diff --git a/src/Ganeti/THH/Field.hs b/src/Ganeti/THH/Field.hs
index b1a8b9f..42c8fc1 100644
--- a/src/Ganeti/THH/Field.hs
+++ b/src/Ganeti/THH/Field.hs
@@ -113,11 +113,12 @@
 -- | Serial number fields description.
 serialFields :: [Field]
 serialFields =
-    [ renameField  "Serial" $ simpleField "serial_no" [t| Int |] ]
+    [ presentInForthcoming . renameField  "Serial"
+        $ simpleField "serial_no" [t| Int |] ]
 
 -- | UUID fields description.
 uuidFields :: [Field]
-uuidFields = [ simpleField "uuid" [t| String |] ]
+uuidFields = [ presentInForthcoming $ simpleField "uuid" [t| String |] ]
 
 -- | Tag set type alias.
 type TagSet = Set.Set String
diff --git a/src/Ganeti/THH/HsRPC.hs b/src/Ganeti/THH/HsRPC.hs
index 20c7089..89a30d7 100644
--- a/src/Ganeti/THH/HsRPC.hs
+++ b/src/Ganeti/THH/HsRPC.hs
@@ -42,14 +42,6 @@
   , mkRpcCalls
   ) where
 
--- The following macro is just a temporary solution for 2.12 and 2.13.
--- Since 2.14 cabal creates proper macros for all dependencies.
-#define MIN_VERSION_monad_control(maj,min,rev) \
-  (((maj)<MONAD_CONTROL_MAJOR)|| \
-   (((maj)==MONAD_CONTROL_MAJOR)&&((min)<=MONAD_CONTROL_MINOR))|| \
-   (((maj)==MONAD_CONTROL_MAJOR)&&((min)==MONAD_CONTROL_MINOR)&& \
-    ((rev)<=MONAD_CONTROL_REV)))
-
 import Control.Applicative
 import Control.Monad
 import Control.Monad.Base
diff --git a/src/Ganeti/Types.hs b/src/Ganeti/Types.hs
index 9ceb8e8..4d430cb 100644
--- a/src/Ganeti/Types.hs
+++ b/src/Ganeti/Types.hs
@@ -173,6 +173,14 @@
   , hotplugActionToRaw
   , Private(..)
   , showPrivateJSObject
+  , HvParams
+  , OsParams
+  , OsParamsPrivate
+  , TimeStampObject(..)
+  , UuidObject(..)
+  , ForthcomingObject(..)
+  , SerialNoObject(..)
+  , TagsObject(..)
   ) where
 
 import Control.Applicative
@@ -180,6 +188,8 @@
 import qualified Text.JSON as JSON
 import Text.JSON (JSON, readJSON, showJSON)
 import Data.Ratio (numerator, denominator)
+import qualified Data.Set as Set
+import System.Time (ClockTime)
 
 import qualified Ganeti.ConstantUtils as ConstantUtils
 import Ganeti.JSON
@@ -298,7 +308,9 @@
 
 -- * Ganeti types
 
--- | Instance disk template type.
+-- | Instance disk template type. The disk template is a name for the
+-- constructor of the disk configuration 'DiskLogicalId' used for
+-- serialization, configuration values, etc.
 $(THH.declareLADT ''String "DiskTemplate"
        [ ("DTDiskless",   "diskless")
        , ("DTFile",       "file")
@@ -419,7 +431,9 @@
 -- | Dynamic device modification, just add/remove version.
 $(THH.declareLADT ''String "DdmSimple"
      [ ("DdmSimpleAdd",    "add")
+     , ("DdmSimpleAttach", "attach")
      , ("DdmSimpleRemove", "remove")
+     , ("DdmSimpleDetach", "detach")
      ])
 $(THH.makeJSONInstance ''DdmSimple)
 
@@ -428,7 +442,9 @@
 -- TODO: DDM_SWAP, DDM_MOVE?
 $(THH.declareLADT ''String "DdmFull"
      [ ("DdmFullAdd",    "add")
+     , ("DdmFullAttach", "attach")
      , ("DdmFullRemove", "remove")
+     , ("DdmFullDetach", "detach")
      , ("DdmFullModify", "modify")
      ])
 $(THH.makeJSONInstance ''DdmFull)
@@ -540,10 +556,7 @@
 showSULvm :: StorageType -> StorageKey -> SPExclusiveStorage -> String
 showSULvm st sk es = show (storageTypeToRaw st, sk, [es])
 
--- | Mapping from disk templates to storage types
--- FIXME: This is semantically the same as the constant
--- C.diskTemplatesStorageType, remove this when python constants
--- are generated from haskell constants
+-- | Mapping from disk templates to storage types.
 diskTemplateToStorageType :: DiskTemplate -> StorageType
 diskTemplateToStorageType DTExt = StorageExt
 diskTemplateToStorageType DTFile = StorageFile
@@ -950,3 +963,37 @@
                        [(String, a)] -> JSON.JSObject (Private JSON.JSValue)
 showPrivateJSObject value = JSON.toJSObject $ map f value
   where f (k, v) = (k, Private $ JSON.showJSON v)
+
+
+-- | The hypervisor parameter type. This is currently a simple map,
+-- without type checking on key/value pairs.
+type HvParams = Container JSON.JSValue
+
+-- | The OS parameters type. This is, and will remain, a string
+-- container, since the keys are dynamically declared by the OSes, and
+-- the values are always strings.
+type OsParams = Container String
+type OsParamsPrivate = Container (Private String)
+
+
+-- | Class of objects that have timestamps.
+class TimeStampObject a where
+  cTimeOf :: a -> ClockTime
+  mTimeOf :: a -> ClockTime
+
+-- | Class of objects that have an UUID.
+class UuidObject a where
+  uuidOf :: a -> String
+
+-- | Class of objects that can be forthcoming.
+class ForthcomingObject a where
+  isForthcoming :: a -> Bool
+
+-- | Class of object that have a serial number.
+class SerialNoObject a where
+  serialOf :: a -> Int
+
+-- | Class of objects that have tags.
+class TagsObject a where
+  tagsOf :: a -> Set.Set String
+
diff --git a/src/Ganeti/Utils.hs b/src/Ganeti/Utils.hs
index 63373cc..c6ddb0d 100644
--- a/src/Ganeti/Utils.hs
+++ b/src/Ganeti/Utils.hs
@@ -102,7 +102,7 @@
 import Control.Exception (try, bracket)
 import Control.Monad
 import Control.Monad.Error
-import qualified Data.Attoparsec as A
+import qualified Data.Attoparsec.ByteString as A
 import qualified Data.ByteString.UTF8 as UTF8
 import Data.Char (toUpper, isAlphaNum, isDigit, isSpace)
 import qualified Data.Either as E
diff --git a/src/Ganeti/Utils/AsyncWorker.hs b/src/Ganeti/Utils/AsyncWorker.hs
index a0b2624..a21b2ad 100644
--- a/src/Ganeti/Utils/AsyncWorker.hs
+++ b/src/Ganeti/Utils/AsyncWorker.hs
@@ -19,6 +19,10 @@
 - If the caller uses 'triggerAndWait', the call will return just after the
   earliest action following the trigger is finished.
 
+- If the caller uses 'triggerWithResult', it will recive an 'Async' value that
+  can be used to wait for the result (which will be available once the earliest
+  action following the trigger finishes).
+
 - If the worker finishes an action and there are no pending triggers since the
   start of the last action, it becomes idle and waits for a new trigger.
 
@@ -60,10 +64,17 @@
   , mkAsyncWorker_
   , trigger
   , trigger_
+  , triggerWithResult
+  , triggerWithResult_
+  , triggerWithResultMany
+  , triggerWithResultMany_
   , triggerAndWait
   , triggerAndWait_
   , triggerAndWaitMany
   , triggerAndWaitMany_
+  , Async
+  , wait
+  , waitMany
   ) where
 
 import Control.Monad
@@ -72,11 +83,12 @@
 import Control.Concurrent (ThreadId)
 import Control.Concurrent.Lifted (fork, yield)
 import Control.Concurrent.MVar.Lifted
-import Data.Functor.Identity
 import Data.Monoid
 import qualified Data.Traversable as T
 import Data.IORef.Lifted
 
+-- * The definition and construction of asynchronous workers
+
 -- Represents the state of the requests to the worker. The worker is either
 -- 'Idle', or has 'Pending' triggers to process. After the corresponding
 -- action is run, all the 'MVar's in the list are notified with the result.
@@ -134,6 +146,20 @@
                => m a -> m (AsyncWorker () a)
 mkAsyncWorker_ = mkAsyncWorker . const
 
+-- * Triggering workers and obtaining their results
+
+-- | An asynchronous result that will eventually yield a value.
+newtype Async a = Async { asyncResult :: MVar a }
+
+-- | Waits for an asynchronous result to finish and yield a value.
+wait :: (MonadBase IO m) => Async a -> m a
+wait = readMVar . asyncResult
+
+-- | Waits for all asynchronous results in a collection to finish and yield a
+-- value.
+waitMany :: (MonadBase IO m, T.Traversable t) => t (Async a) -> m (t a)
+waitMany = T.mapM wait
+
 -- An internal function for triggering a worker, optionally registering
 -- a callback 'MVar'
 triggerInternal :: (MonadBase IO m, Monoid i)
@@ -153,6 +179,40 @@
 trigger_ :: (MonadBase IO m) => AsyncWorker () a -> m ()
 trigger_ = trigger ()
 
+-- | Trigger a worker and wait until the action following this trigger
+-- finishes. The returned `Async` value can be used to wait for the result of
+-- the action.
+triggerWithResult :: (MonadBase IO m, Monoid i)
+                  => i -> AsyncWorker i a -> m (Async a)
+triggerWithResult i worker = do
+    result <- newEmptyMVar
+    triggerInternal i (Just result) worker
+    return $ Async result
+
+-- | Trigger a worker and wait until the action following this trigger
+-- finishes.
+--
+-- See 'triggerWithResult'.
+triggerWithResult_ :: (MonadBase IO m) => AsyncWorker () a -> m (Async a)
+triggerWithResult_ = triggerWithResult ()
+
+-- | Trigger a list of workers and wait until all the actions following these
+-- triggers finish. The returned collection of `Async` values can be used to
+-- wait for the results of the actions.
+triggerWithResultMany :: (T.Traversable t, MonadBase IO m, Monoid i)
+                      => i -> t (AsyncWorker i a) -> m (t (Async a))
+triggerWithResultMany i = T.mapM (triggerWithResult i)
+--
+-- | Trigger a list of workers with no inputs and wait until all the actions
+-- following these triggers finish.
+--
+-- See 'triggerWithResultMany'.
+triggerWithResultMany_ :: (T.Traversable t, MonadBase IO m)
+                       => t (AsyncWorker () a) -> m (t (Async a))
+triggerWithResultMany_ = triggerWithResultMany ()
+
+-- * Helper functions for waiting for results just after triggering workers
+
 -- | Trigger a list of workers and wait until all the actions following these
 -- triggers finish. Returns the results of the actions.
 --
@@ -161,16 +221,8 @@
 -- sequentially, while the former runs them in parallel.
 triggerAndWaitMany :: (T.Traversable t, MonadBase IO m, Monoid i)
                    => i -> t (AsyncWorker i a) -> m (t a)
-triggerAndWaitMany i workers =
-    let trig w = do
-                  result <- newEmptyMVar
-                  triggerInternal i (Just result) w
-                  return result
-    in T.mapM trig workers >>= T.mapM takeMVar
+triggerAndWaitMany i = waitMany <=< triggerWithResultMany i
 
--- | Trigger a list of workers with no input and wait until all the actions
--- following these triggers finish. Returns the results of the actions.
---
 -- See 'triggetAndWaitMany'.
 triggerAndWaitMany_ :: (T.Traversable t, MonadBase IO m)
                     => t (AsyncWorker () a) -> m (t a)
@@ -179,7 +231,7 @@
 -- | Trigger a worker and wait until the action following this trigger
 -- finishes. Return the result of the action.
 triggerAndWait :: (MonadBase IO m, Monoid i) => i -> AsyncWorker i a -> m a
-triggerAndWait i = liftM runIdentity . triggerAndWaitMany i . Identity
+triggerAndWait i = wait <=< triggerWithResult i
 
 -- | Trigger a worker with no input and wait until the action following this
 -- trigger finishes. Return the result of the action.
diff --git a/src/Ganeti/WConfd/ConfigModifications.hs b/src/Ganeti/WConfd/ConfigModifications.hs
new file mode 100644
index 0000000..aa11b2a
--- /dev/null
+++ b/src/Ganeti/WConfd/ConfigModifications.hs
@@ -0,0 +1,83 @@
+{-# LANGUAGE TemplateHaskell #-}
+
+{-|  The WConfd functions for direct configuration manipulation
+
+This module contains the client functions exported by WConfD for
+specific configuration manipulation.
+
+-}
+
+{-
+
+Copyright (C) 2014 Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-}
+
+module Ganeti.WConfd.ConfigModifications where
+
+import Control.Lens.Setter ((.~))
+import Control.Lens.Traversal (mapMOf)
+import Data.Maybe (isJust)
+import Language.Haskell.TH (Name)
+
+import Ganeti.JSON (alterContainerL)
+import Ganeti.Locking.Locks (ClientId, ciIdentifier)
+import Ganeti.Logging.Lifted (logDebug)
+import Ganeti.Objects
+import Ganeti.Objects.Lens
+import Ganeti.WConfd.ConfigState (csConfigDataL)
+import Ganeti.WConfd.Monad (WConfdMonad, modifyConfigWithLock)
+import qualified Ganeti.WConfd.TempRes as T
+
+
+-- | Add a new instance to the configuration, release DRBD minors,
+-- and commit temporary IPs, all while temporarily holding the config
+-- lock. Return True upon success and False if the config lock was not
+-- available and the client should retry.
+--
+-- TODO: add verifications to this call; the client should have a lock
+-- on the name of the instance, and no instance with the given uuid should
+-- exist.
+addInstance :: Instance -> ClientId -> WConfdMonad Bool
+addInstance inst cid = do
+  logDebug $ "AddInstance: client " ++ show (ciIdentifier cid)
+             ++ " adding instance " ++ uuidOf inst
+             ++ " with name " ++ show (instName inst)
+  let addInst = csConfigDataL . configInstancesL . alterContainerL (uuidOf inst)
+                  .~ Just inst
+      commitRes tr = mapMOf csConfigDataL $ T.commitReservedIps cid tr
+  r <- modifyConfigWithLock
+         (\tr cs -> commitRes tr $ addInst  cs)
+         . T.releaseDRBDMinors $ uuidOf inst
+  logDebug $ "AddInstance: result of config modification is " ++ show r
+  return $ isJust r
+
+-- * The list of functions exported to RPC.
+
+exportedFunctions :: [Name]
+exportedFunctions = [ 'addInstance
+                    ]
diff --git a/src/Ganeti/WConfd/ConfigVerify.hs b/src/Ganeti/WConfd/ConfigVerify.hs
index ba42310..8b85027 100644
--- a/src/Ganeti/WConfd/ConfigVerify.hs
+++ b/src/Ganeti/WConfd/ConfigVerify.hs
@@ -121,7 +121,7 @@
     checkUUIDKeys "disk" disks
     -- UUID references
     checkUUIDRefs "node" "nodegroup" (return . nodeGroup) nodes nodegroups
-    checkUUIDRefs "instance" "primary node" (return . instPrimaryNode)
+    checkUUIDRefs "instance" "primary node" (maybe [] return . instPrimaryNode)
                                             instances nodes
     checkUUIDRefs "instance" "disks" instDisks instances disks
 
diff --git a/src/Ganeti/WConfd/Core.hs b/src/Ganeti/WConfd/Core.hs
index c538d94..8c0306c 100644
--- a/src/Ganeti/WConfd/Core.hs
+++ b/src/Ganeti/WConfd/Core.hs
@@ -2,15 +2,16 @@
 
 {-| The Ganeti WConfd core functions.
 
-As TemplateHaskell require that splices be defined in a separate
-module, we combine all the TemplateHaskell functionality that HTools
-needs in this module (except the one for unittests).
+This module defines all the functions that WConfD exports for
+RPC calls. They are in a separate module so that in a later
+stage, TemplateHaskell can generate, e.g., the python interface
+for those.
 
 -}
 
 {-
 
-Copyright (C) 2013 Google Inc.
+Copyright (C) 2013, 2014 Google Inc.
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -68,6 +69,7 @@
 import Ganeti.WConfd.Language
 import Ganeti.WConfd.Monad
 import qualified Ganeti.WConfd.TempRes as T
+import qualified Ganeti.WConfd.ConfigModifications as CM
 import qualified Ganeti.WConfd.ConfigWriter as CW
 
 -- * Functions available to the RPC module
@@ -137,6 +139,22 @@
   :: ClientId -> WConfdMonad ()
 unlockConfig cid = freeLocksLevel cid LevelConfig
 
+-- | Write the configuration, if the config lock is held exclusively,
+-- and release the config lock. It the caller does not have the config
+-- lock, return False.
+writeConfigAndUnlock :: ClientId -> ConfigData -> WConfdMonad Bool
+writeConfigAndUnlock cid cdata = do
+  la <- readLockAllocation
+  if L.holdsLock cid ConfigLock L.OwnExclusive la
+    then do
+      CW.writeConfig cdata
+      unlockConfig cid
+      return True
+    else do
+      logWarning $ show cid ++ " tried writeConfigAndUnlock without owning"
+                   ++ " the config lock"
+      return False
+
 -- | Force the distribution of configuration without actually modifying it.
 -- It is not necessary to hold a lock for this operation.
 flushConfig :: WConfdMonad ()
@@ -156,22 +174,22 @@
 -- Allocate a drbd minor.
 --
 -- The free minor will be automatically computed from the existing devices.
--- A node can be given multiple times in order to allocate multiple minors.
+-- A node can not be given multiple times.
 -- The result is the list of minors, in the same order as the passed nodes.
 allocateDRBDMinor
-  :: T.InstanceUUID -> [T.NodeUUID] -> WConfdMonad [T.DRBDMinor]
-allocateDRBDMinor inst nodes =
-  modifyTempResStateErr (\cfg -> T.allocateDRBDMinor cfg inst nodes)
+  :: T.DiskUUID -> [T.NodeUUID] -> WConfdMonad [T.DRBDMinor]
+allocateDRBDMinor disk nodes =
+  modifyTempResStateErr (\cfg -> T.allocateDRBDMinor cfg disk nodes)
 
--- Release temporary drbd minors allocated for a given instance using
+-- Release temporary drbd minors allocated for a given disk using
 -- 'allocateDRBDMinor'.
 --
 -- This should be called on the error paths, on the success paths
 -- it's automatically called by the ConfigWriter add and update
 -- functions.
 releaseDRBDMinors
-  :: T.InstanceUUID -> WConfdMonad ()
-releaseDRBDMinors inst = modifyTempResState (const $ T.releaseDRBDMinors inst)
+  :: T.DiskUUID -> WConfdMonad ()
+releaseDRBDMinors disk = modifyTempResState (const $ T.releaseDRBDMinors disk)
 
 -- *** MACs
 
@@ -370,6 +388,7 @@
                     , 'verifyConfig
                     , 'lockConfig
                     , 'unlockConfig
+                    , 'writeConfigAndUnlock
                     , 'flushConfig
                     -- temporary reservations (common)
                     , 'dropAllReservations
@@ -406,3 +425,4 @@
                     , 'guardedOpportunisticLockUnion
                     , 'hasPendingRequest
                     ]
+                    ++ CM.exportedFunctions
diff --git a/src/Ganeti/WConfd/Monad.hs b/src/Ganeti/WConfd/Monad.hs
index 59ec4ce..85f29ba 100644
--- a/src/Ganeti/WConfd/Monad.hs
+++ b/src/Ganeti/WConfd/Monad.hs
@@ -53,9 +53,11 @@
   , WConfdMonad
   , daemonHandle
   , modifyConfigState
+  , modifyConfigStateWithImmediate
   , forceConfigStateDistribution
   , readConfigState
   , modifyConfigDataErr_
+  , modifyConfigWithLock
   , modifyLockWaiting
   , modifyLockWaiting_
   , readLockWaiting
@@ -65,16 +67,10 @@
   , readTempResState
   ) where
 
--- The following macro is just a temporary solution for 2.12 and 2.13.
--- Since 2.14 cabal creates proper macros for all dependencies.
-#define MIN_VERSION_monad_control(maj,min,rev) \
-  (((maj)<MONAD_CONTROL_MAJOR)|| \
-   (((maj)==MONAD_CONTROL_MAJOR)&&((min)<=MONAD_CONTROL_MINOR))|| \
-   (((maj)==MONAD_CONTROL_MAJOR)&&((min)==MONAD_CONTROL_MINOR)&& \
-    ((rev)<=MONAD_CONTROL_REV)))
-
 import Control.Applicative
 import Control.Arrow ((&&&), second)
+import Control.Concurrent (forkIO, myThreadId)
+import Control.Exception.Lifted (bracket)
 import Control.Monad
 import Control.Monad.Base
 import Control.Monad.Error
@@ -86,16 +82,17 @@
 import Data.Monoid (Any(..))
 import qualified Data.Set as S
 import Data.Tuple (swap)
-import System.Time (getClockTime)
+import System.Posix.Process (getProcessID)
+import System.Time (getClockTime, ClockTime)
 import qualified Text.JSON as J
 
 import Ganeti.BasicTypes
 import Ganeti.Errors
 import Ganeti.JQueue (notifyJob)
 import Ganeti.Lens
-import Ganeti.Locking.Allocation (LockAllocation)
+import qualified Ganeti.Locking.Allocation as LA
 import Ganeti.Locking.Locks
-import Ganeti.Locking.Waiting (getAllocation)
+import qualified Ganeti.Locking.Waiting as LW
 import Ganeti.Logging
 import Ganeti.Logging.WriterLog
 import Ganeti.Objects (ConfigData)
@@ -153,8 +150,7 @@
                -> ResultG DaemonHandle
 mkDaemonHandle cpath cstat lstat trstat
                saveWorkerFn distMCsWorkerFn distSSConfWorkerFn
-               saveLockWorkerFn saveTempResWorkerFn
-               livelock = do
+               saveLockWorkerFn saveTempResWorkerFn livelock = do
   ds <- newIORef $ DaemonState cstat lstat trstat
   let readConfigIO = dsConfigState `liftM` readIORef ds :: IO ConfigState
 
@@ -218,44 +214,64 @@
 readConfigState = liftM dsConfigState . readIORef . dhDaemonState
                   =<< daemonHandle
 
+-- | From a result of a configuration change, determine if the
+-- configuration was changed and if full distribution is needed.
+-- If so, also bump the serial number.
+unpackConfigResult :: ClockTime -> ConfigState
+                      -> (a, ConfigState) -> ((a, Bool, Bool), ConfigState)
+unpackConfigResult now cs (r, cs')
+                     | cs /= cs' = ( (r, True, needsFullDist cs cs')
+                                   , over csConfigDataL (bumpSerial now) cs'
+                                   )
+                     | otherwise = ((r, False, False), cs')
+
 -- | Atomically modifies the configuration state in the WConfdMonad
--- with a computation that can possibly fail.
-modifyConfigStateErr
+-- with a computation that can possibly fail; immediately afterwards,
+-- while config write is still going on, do the followup action. Return
+-- only after replication is finished.
+modifyConfigStateErrWithImmediate
   :: (TempResState -> ConfigState -> AtomicModifyMonad (a, ConfigState))
+  -> WConfdMonad ()
   -> WConfdMonad a
-modifyConfigStateErr f = do
+modifyConfigStateErrWithImmediate f immediateFollowup = do
   dh <- daemonHandle
   now <- liftIO getClockTime
 
-  -- If the configuration is modified, we also bump its serial number.
-  -- In order to determine if we need to save, we report if it's modified
-  -- as well as if it needs to be distributed synchronously.
-  let unpackResult cs (r, cs')
-                    | cs /= cs' = ( (r, True, needsFullDist cs cs')
-                                  , over csConfigDataL (bumpSerial now) cs' )
-                    | otherwise = ((r, False, False), cs')
   let modCS ds@(DaemonState { dsTempRes = tr }) =
-        mapMOf2 dsConfigStateL (\cs -> liftM (unpackResult cs) (f tr cs)) ds
+        mapMOf2
+          dsConfigStateL (\cs -> liftM (unpackConfigResult now cs) (f tr cs)) ds
   (r, modified, distSync) <- atomicModifyIORefErrLog (dhDaemonState dh)
                                                      (liftM swap . modCS)
-  when modified $ do
-    if distSync
+  if modified
+    then if distSync
       then do
         logDebug $ "Triggering config write" ++
                    " together with full synchronous distribution"
-        liftBase . triggerAndWait (Any True) . dhSaveConfigWorker $ dh
+        res <- liftBase . triggerWithResult (Any True) $ dhSaveConfigWorker dh
+        immediateFollowup
+        wait res
         logDebug "Config write and distribution finished"
       else do
         -- trigger the config. saving worker and wait for it
         logDebug $ "Triggering config write" ++
                    " and asynchronous distribution"
-        liftBase . triggerAndWait (Any False) . dhSaveConfigWorker $ dh
+        res <- liftBase . triggerWithResult (Any False) $ dhSaveConfigWorker dh
+        immediateFollowup
+        wait res
         logDebug "Config writer finished with local task"
-    return ()
+    else
+      immediateFollowup
   return r
 
 -- | Atomically modifies the configuration state in the WConfdMonad
 -- with a computation that can possibly fail.
+modifyConfigStateErr
+  :: (TempResState -> ConfigState -> AtomicModifyMonad (a, ConfigState))
+  -> WConfdMonad a
+modifyConfigStateErr = flip modifyConfigStateErrWithImmediate (return ())
+
+-- | Atomically modifies the configuration state in the WConfdMonad
+-- with a computation that can possibly fail.
 modifyConfigStateErr_
   :: (TempResState -> ConfigState -> AtomicModifyMonad ConfigState)
   -> WConfdMonad ()
@@ -265,6 +281,15 @@
 modifyConfigState :: (ConfigState -> (a, ConfigState)) -> WConfdMonad a
 modifyConfigState f = modifyConfigStateErr ((return .) . const f)
 
+-- | Atomically modifies the configuration state in WConfdMonad; immediately
+-- afterwards (while the config write-out is not necessarily finished) do
+-- another acation.
+modifyConfigStateWithImmediate :: (ConfigState -> (a, ConfigState))
+                                  -> WConfdMonad ()
+                                  -> WConfdMonad a
+modifyConfigStateWithImmediate f =
+  modifyConfigStateErrWithImmediate ((return .) . const f)
+
 -- | Force the distribution of configuration without actually modifying it.
 --
 -- We need a separate call for this operation, because 'modifyConfigState' only
@@ -348,5 +373,56 @@
 
 
 -- | Read the underlying lock allocation.
-readLockAllocation :: WConfdMonad (LockAllocation GanetiLocks ClientId)
-readLockAllocation = liftM getAllocation readLockWaiting
+readLockAllocation :: WConfdMonad (LA.LockAllocation GanetiLocks ClientId)
+readLockAllocation = liftM LW.getAllocation readLockWaiting
+
+-- | Modify the configuration while temporarily acquiring
+-- the configuration lock. If the configuration lock is held by
+-- someone else, nothing is changed and Nothing is returned.
+modifyConfigWithLock
+  :: (TempResState -> ConfigState -> AtomicModifyMonad ConfigState)
+     -> State TempResState ()
+     -> WConfdMonad (Maybe ())
+modifyConfigWithLock f tempres = do
+  now <- liftIO getClockTime
+  dh <- lift . WConfdMonadInt $ ask
+  pid <- liftIO getProcessID
+  tid <- liftIO myThreadId
+  let cid = ClientId { ciIdentifier = ClientOther $ "wconfd-" ++ show tid
+                     , ciLockFile = dhLivelock dh
+                     , ciPid = pid
+                     }
+  let modCS ds@(DaemonState { dsTempRes = tr }) =
+        mapMOf2
+          dsConfigStateL
+          (\cs -> liftM (unpackConfigResult now cs . (,) ())  (f tr cs))
+          ds
+  maybeDist <- bracket
+    (atomicModifyWithLens (dhDaemonState dh) dsLockWaitingL
+      $ swap . LW.updateLocks cid [LA.requestExclusive ConfigLock])
+    (\(res, _) -> case res of
+        Ok s | S.null s -> do
+          (_, nfy) <- atomicModifyWithLens (dhDaemonState dh) dsLockWaitingL
+                      $ swap . LW.updateLocks cid [LA.requestRelease ConfigLock]
+          unless (S.null nfy) . liftIO . void . forkIO $ do
+            logDebug . (++) "Locks became available for " . show $ S.toList nfy
+            mapM_ (notifyJob . ciPid) $ S.toList nfy
+            logDebug "Finished notifying processes"
+        _ -> return ())
+    (\(res, _) -> case res of
+        Ok s | S.null s ->do
+          ((), modif, dist) <- atomicModifyIORefErrLog (dhDaemonState dh)
+                                 (liftM swap . modCS)
+          atomicModifyWithLens (dhDaemonState dh) dsTempResL $ runState tempres
+          return $ Just (modif, dist)
+        _ -> return Nothing)
+  flip (maybe $ return Nothing) maybeDist $ \(modified, dist) -> do
+    when modified $ do
+      logDebug . (++) "Triggering config write; distribution "
+        $ if dist then "synchronously" else "asynchronously"
+      liftBase . triggerAndWait (Any dist) $ dhSaveConfigWorker dh
+      logDebug "Config write finished"
+    logDebug "Triggering temporary reservations write"
+    liftBase . triggerAndWait_ . dhSaveTempResWorker $ dh
+    logDebug "Temporary reservations write finished"
+    return $ Just ()
diff --git a/src/Ganeti/WConfd/Server.hs b/src/Ganeti/WConfd/Server.hs
index eb31270..b226d09 100644
--- a/src/Ganeti/WConfd/Server.hs
+++ b/src/Ganeti/WConfd/Server.hs
@@ -95,7 +95,7 @@
     verifyConfigErr cdata
     lock <- readPersistent persistentLocks
     tempres <- readPersistent persistentTempRes
-    (_, livelock) <- mkLivelockFile C.wconfdLivelockPrefix
+    (_, livelock) <- mkLivelockFile C.wconfLivelockPrefix
     mkDaemonHandle conf_file
                    (mkConfigState cdata)
                    lock
diff --git a/src/Ganeti/WConfd/Ssconf.hs b/src/Ganeti/WConfd/Ssconf.hs
index a831ef4..81db4b3 100644
--- a/src/Ganeti/WConfd/Ssconf.hs
+++ b/src/Ganeti/WConfd/Ssconf.hs
@@ -45,6 +45,7 @@
 import Control.Arrow ((&&&), first, second)
 import Data.Foldable (Foldable(..), toList)
 import Data.List (partition)
+import Data.Maybe (mapMaybe)
 import qualified Data.Map as M
 import qualified Text.JSON as J
 
@@ -111,7 +112,7 @@
     , (SSOnlineNodes, mapLines nodeName online )
     , (SSPrimaryIpFamily, return . show . ipFamilyToRaw
                           . clusterPrimaryIpFamily $ cluster)
-    , (SSInstanceList, niceSort . map instName
+    , (SSInstanceList, niceSort . mapMaybe instName
                        . toList . configInstances $ cdata)
     , (SSReleaseVersion, return releaseVersion)
     , (SSHypervisorList, mapLines hypervisorToRaw
diff --git a/src/Ganeti/WConfd/TempRes.hs b/src/Ganeti/WConfd/TempRes.hs
index 020aee8..ef152ea 100644
--- a/src/Ganeti/WConfd/TempRes.hs
+++ b/src/Ganeti/WConfd/TempRes.hs
@@ -44,6 +44,7 @@
   , emptyTempResState
   , NodeUUID
   , InstanceUUID
+  , DiskUUID
   , NetworkUUID
   , DRBDMinor
   , DRBDMap
@@ -111,15 +112,17 @@
 
 type InstanceUUID = String
 
+type DiskUUID = String
+
 type NetworkUUID = String
 
 type DRBDMinor = Int
 
 -- | A map of the usage of DRBD minors
-type DRBDMap = Map NodeUUID (Map DRBDMinor InstanceUUID)
+type DRBDMap = Map NodeUUID (Map DRBDMinor DiskUUID)
 
 -- | A map of the usage of DRBD minors with possible duplicates
-type DRBDMap' = Map NodeUUID (Map DRBDMinor [InstanceUUID])
+type DRBDMap' = Map NodeUUID (Map DRBDMinor [DiskUUID])
 
 -- * The state data structure
 
@@ -218,16 +221,15 @@
                 => ConfigData -> TempResState -> m DRBDMap'
 computeDRBDMap' cfg trs =
     flip execStateT (fmap (fmap (: [])) (trsDRBD trs))
-    $ F.forM_ (configInstances cfg) addDisks
+    $ F.forM_ (configDisks cfg) addMinors
   where
     -- | Creates a lens for modifying the list of instances
-    nodeMinor :: NodeUUID -> DRBDMinor -> Lens' DRBDMap' [InstanceUUID]
+    nodeMinor :: NodeUUID -> DRBDMinor -> Lens' DRBDMap' [DiskUUID]
     nodeMinor node minor = maybeLens (at node) . maybeLens (at minor)
-    -- | Adds disks of an instance within the state monad
-    addDisks inst = do
-                      disks <- toError $ getDrbdMinorsForInstance cfg inst
-                      forM_ disks $ \(minor, node) -> nodeMinor node minor
-                                                          %= (uuidOf inst :)
+    -- | Adds minors of a disk within the state monad
+    addMinors disk = do
+      let minors = getDrbdMinorsForDisk disk
+      forM_ minors $ \(minor, node) -> nodeMinor node minor %= (uuidOf disk :)
 
 -- | Compute the map of used DRBD minor/nodes.
 -- Report any duplicate entries as an error.
@@ -246,25 +248,27 @@
 -- Allocate a drbd minor.
 --
 -- The free minor will be automatically computed from the existing devices.
--- A node can be given multiple times in order to allocate multiple minors.
+-- A node can not be given multiple times.
 -- The result is the list of minors, in the same order as the passed nodes.
 allocateDRBDMinor :: (MonadError GanetiException m, MonadState TempResState m)
-                  => ConfigData -> InstanceUUID -> [NodeUUID]
+                  => ConfigData -> DiskUUID -> [NodeUUID]
                   -> m [DRBDMinor]
-allocateDRBDMinor cfg inst nodes = do
+allocateDRBDMinor cfg disk nodes = do
+  unless (nodes == ordNub nodes) . resError
+    $ "Duplicate nodes detected in list '" ++ show nodes ++ "'"
   dMap <- computeDRBDMap' cfg =<< get
   let usedMap = fmap M.keysSet dMap
-  let alloc :: S.Set DRBDMinor -> Map DRBDMinor InstanceUUID
-            -> (DRBDMinor, Map DRBDMinor InstanceUUID)
+  let alloc :: S.Set DRBDMinor -> Map DRBDMinor DiskUUID
+            -> (DRBDMinor, Map DRBDMinor DiskUUID)
       alloc used m = let k = findFirst 0 (M.keysSet m `S.union` used)
-                      in (k, M.insert k inst m)
+                     in (k, M.insert k disk m)
   forM nodes $ \node -> trsDRBDL . maybeLens (at node)
                         %%= alloc (M.findWithDefault mempty node usedMap)
 
--- Release temporary drbd minors allocated for a given instance using
+-- Release temporary drbd minors allocated for a given disk using
 -- 'allocateDRBDMinor'.
-releaseDRBDMinors :: (MonadState TempResState m) => InstanceUUID -> m ()
-releaseDRBDMinors inst = trsDRBDL %= filterNested (/= inst)
+releaseDRBDMinors :: (MonadState TempResState m) => DiskUUID -> m ()
+releaseDRBDMinors disk = trsDRBDL %= filterNested (/= disk)
 
 -- * Other temporary resources
 
diff --git a/test/data/cluster_config_2.13.json b/test/data/cluster_config_2.13.json
new file mode 100644
index 0000000..700c5e1
--- /dev/null
+++ b/test/data/cluster_config_2.13.json
@@ -0,0 +1,573 @@
+{
+  "cluster": {
+    "beparams": {
+      "default": {
+        "always_failover": false,
+        "auto_balance": true,
+        "maxmem": 128,
+        "minmem": 128,
+        "spindle_use": 1,
+        "vcpus": 1
+      }
+    },
+    "blacklisted_os": [],
+    "candidate_certs": {},
+    "candidate_pool_size": 10,
+    "cluster_name": "cluster.name.example.com",
+    "compression_tools": ["gzip", "gzip-fast", "gzip-slow"],
+    "ctime": 1343869045.604884,
+    "data_collectors": {"cpu-avg-load": {"active": true,
+                                         "interval": 5000000.0},
+                        "diskstats": {"active": true,
+                                      "interval": 5000000.0},
+                        "drbd": {"active": true,
+                                 "interval": 5000000.0},
+                        "inst-status-xen": {"active": true,
+                                            "interval": 5000000.0},
+                        "lv": {"active": true,
+                               "interval": 5000000.0}},
+    "default_iallocator": "hail",
+    "default_iallocator_params": {},
+    "disk_state_static": {},
+    "diskparams": {
+      "blockdev": {},
+      "diskless": {},
+      "drbd": {
+        "c-delay-target": 1,
+        "c-fill-target": 200,
+        "c-max-rate": 2048,
+        "c-min-rate": 1024,
+        "c-plan-ahead": 1,
+        "data-stripes": 2,
+        "disk-barriers": "bf",
+        "disk-custom": "",
+        "dynamic-resync": false,
+        "meta-barriers": true,
+        "meta-stripes": 2,
+        "metavg": "xenvg",
+        "net-custom": "",
+        "resync-rate": 1024
+      },
+      "ext": {},
+      "file": {},
+      "plain": {
+        "stripes": 2
+      },
+      "rbd": {
+        "pool": "rbd"
+      },
+      "sharedfile": {}
+    },
+    "drbd_usermode_helper": "/bin/true",
+    "enabled_disk_templates": [
+      "drbd",
+      "plain",
+      "file",
+      "sharedfile"
+    ],
+    "enabled_hypervisors": [
+      "xen-pvm"
+    ],
+    "enabled_user_shutdown": false,
+    "file_storage_dir": "",
+    "hidden_os": [],
+    "highest_used_port": 32105,
+    "hv_state_static": {
+      "xen-pvm": {
+        "cpu_node": 1,
+        "cpu_total": 1,
+        "mem_hv": 0,
+        "mem_node": 0,
+        "mem_total": 0
+      }
+    },
+    "hvparams": {
+      "chroot": {
+        "init_script": "/ganeti-chroot"
+      },
+      "fake": {},
+      "kvm": {
+        "acpi": true,
+        "boot_order": "disk",
+        "cdrom2_image_path": "",
+        "cdrom_disk_type": "",
+        "cdrom_image_path": "",
+        "cpu_cores": 0,
+        "cpu_mask": "all",
+        "cpu_sockets": 0,
+        "cpu_threads": 0,
+        "cpu_type": "",
+        "disk_cache": "default",
+        "disk_type": "paravirtual",
+        "floppy_image_path": "",
+        "initrd_path": "",
+        "kernel_args": "ro",
+        "kernel_path": "/boot/vmlinuz-kvmU",
+        "keymap": "",
+        "kvm_extra": "",
+        "kvm_flag": "",
+        "kvm_path": "/usr/bin/kvm",
+        "machine_version": "",
+        "mem_path": "",
+        "migration_bandwidth": 4,
+        "migration_downtime": 30,
+        "migration_mode": "live",
+        "migration_port": 4041,
+        "nic_type": "paravirtual",
+        "reboot_behavior": "reboot",
+        "root_path": "/dev/vda1",
+        "security_domain": "",
+        "security_model": "none",
+        "serial_console": true,
+        "serial_speed": 38400,
+        "soundhw": "",
+        "spice_bind": "",
+        "spice_image_compression": "",
+        "spice_ip_version": 0,
+        "spice_jpeg_wan_compression": "",
+        "spice_password_file": "",
+        "spice_playback_compression": true,
+        "spice_streaming_video": "",
+        "spice_tls_ciphers": "HIGH:-DES:-3DES:-EXPORT:-ADH",
+        "spice_use_tls": false,
+        "spice_use_vdagent": true,
+        "spice_zlib_glz_wan_compression": "",
+        "usb_devices": "",
+        "usb_mouse": "",
+        "use_chroot": false,
+        "use_localtime": false,
+        "vga": "",
+        "vhost_net": false,
+        "vnc_bind_address": "",
+        "vnc_password_file": "",
+        "vnc_tls": false,
+        "vnc_x509_path": "",
+        "vnc_x509_verify": false
+      },
+      "lxc": {
+        "cpu_mask": "",
+        "lxc_cgroup_use": "",
+        "lxc_devices": "c 1:3 rw,c 1:5 rw,c 1:7 rw,c 1:8 rw,c 1:9 rw,c 1:10 rw,c 5:0 rw,c 5:1 rw,c 5:2 rw,c 136:* rw",
+        "lxc_drop_capabilities": "mac_override,sys_boot,sys_module,sys_time",
+        "lxc_extra_config": "",
+        "lxc_tty": 6,
+        "lxc_startup_wait": 30
+      },
+      "xen-hvm": {
+        "acpi": true,
+        "blockdev_prefix": "hd",
+        "boot_order": "cd",
+        "cdrom_image_path": "",
+        "cpu_cap": 0,
+        "cpu_mask": "all",
+        "cpu_weight": 256,
+        "device_model": "/usr/lib/xen/bin/qemu-dm",
+        "disk_type": "paravirtual",
+        "kernel_path": "/usr/lib/xen/boot/hvmloader",
+        "migration_mode": "non-live",
+        "migration_port": 8082,
+        "nic_type": "rtl8139",
+        "pae": true,
+        "pci_pass": "",
+        "reboot_behavior": "reboot",
+        "use_localtime": false,
+        "vif_script": "",
+        "vnc_bind_address": "0.0.0.0",
+        "vnc_password_file": "/your/vnc-cluster-password",
+        "xen_cmd": "xm"
+      },
+      "xen-pvm": {
+        "blockdev_prefix": "sd",
+        "bootloader_args": "",
+        "bootloader_path": "",
+        "cpu_cap": 0,
+        "cpu_mask": "all",
+        "cpu_weight": 256,
+        "initrd_path": "",
+        "kernel_args": "ro",
+        "kernel_path": "/boot/vmlinuz-xenU",
+        "migration_mode": "live",
+        "migration_port": 8082,
+        "reboot_behavior": "reboot",
+        "root_path": "/dev/xvda1",
+        "use_bootloader": false,
+        "vif_script": "",
+        "xen_cmd": "xm"
+      }
+    },
+    "install_image": "",
+    "instance_communication_network": "",
+    "ipolicy": {
+      "disk-templates": [
+        "sharedfile",
+        "diskless",
+        "plain",
+        "blockdev",
+        "drbd",
+        "file",
+        "rbd"
+      ],
+      "minmax": [
+        {
+          "max": {
+            "cpu-count": 8,
+            "disk-count": 16,
+            "disk-size": 1048576,
+            "memory-size": 32768,
+            "nic-count": 8,
+            "spindle-use": 12
+          },
+          "min": {
+            "cpu-count": 1,
+            "disk-count": 1,
+            "disk-size": 1024,
+            "memory-size": 128,
+            "nic-count": 1,
+            "spindle-use": 1
+          }
+        }
+      ],
+      "spindle-ratio": 32.0,
+      "std": {
+        "cpu-count": 1,
+        "disk-count": 1,
+        "disk-size": 1024,
+        "memory-size": 128,
+        "nic-count": 1,
+        "spindle-use": 1
+      },
+      "vcpu-ratio": 1.0
+    },
+    "mac_prefix": "aa:bb:cc",
+    "maintain_node_health": false,
+    "master_ip": "192.0.2.87",
+    "master_netdev": "eth0",
+    "master_netmask": 32,
+    "master_node": "9a12d554-75c0-4cb1-8064-103365145db0",
+    "modify_etc_hosts": true,
+    "modify_ssh_setup": true,
+    "mtime": 1361964122.79471,
+    "ndparams": {
+      "exclusive_storage": false,
+      "oob_program": "",
+      "spindle_count": 1
+    },
+    "nicparams": {
+      "default": {
+        "link": "br974",
+        "mode": "bridged"
+      }
+    },
+    "os_hvp": {
+      "TEMP-Ganeti-QA-OS": {
+        "xen-hvm": {
+          "acpi": false,
+          "pae": true
+        },
+        "xen-pvm": {
+          "root_path": "/dev/sda5"
+        }
+      }
+    },
+    "osparams": {},
+    "prealloc_wipe_disks": false,
+    "primary_ip_family": 2,
+    "reserved_lvs": [],
+    "rsahostkeypub": "YOURKEY",
+    "serial_no": 3189,
+    "shared_file_storage_dir": "/srv/ganeti/shared-file-storage",
+    "tags": [
+      "mytag"
+    ],
+    "tcpudp_port_pool": [
+      32101,
+      32102,
+      32103,
+      32104,
+      32105
+    ],
+    "uid_pool": [],
+    "use_external_mip_script": false,
+    "uuid": "dddf8c12-f2d8-4718-a35b-7804daf12a3f",
+    "volume_group_name": "xenvg",
+    "zeroing_image": ""
+  },
+  "ctime": 1343869045.605523,
+  "disks": {
+    "150bd154-8e23-44d1-b762-5065ae5a507b": {
+      "ctime": 1354038435.343601,
+      "dev_type": "plain",
+      "iv_name": "disk/0",
+      "logical_id": [
+        "xenvg",
+        "b27a576a-13f7-4f07-885c-63fcad4fdfcc.disk0"
+      ],
+      "mode": "rw",
+      "mtime": 1354038435.343601,
+      "params": {},
+      "serial_no": 1,
+      "size": 1280,
+      "uuid": "150bd154-8e23-44d1-b762-5065ae5a507b"
+    },
+    "77ced3a5-6756-49ae-8d1f-274e27664c05": {
+      "children": [
+        {
+          "dev_type": "plain",
+          "logical_id": [
+            "xenvg",
+            "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_data"
+          ],
+          "params": {},
+          "size": 1024
+        },
+        {
+          "dev_type": "plain",
+          "logical_id": [
+            "xenvg",
+            "5c390722-6a7a-4bb4-9cef-98d896a8e6b1.disk0_meta"
+          ],
+          "params": {},
+          "size": 128
+        }
+      ],
+      "ctime": 1363620258.608976,
+      "dev_type": "drbd",
+      "iv_name": "disk/0",
+      "logical_id": [
+        "9a12d554-75c0-4cb1-8064-103365145db0",
+        "41f9c238-173c-4120-9e41-04ad379b647a",
+        32100,
+        0,
+        0,
+        "d3c3fd475fcbaf5fd177fb245ac43b71247ada38"
+      ],
+      "mode": "rw",
+      "mtime": 1363620258.608976,
+      "params": {},
+      "serial_no": 1,
+      "size": 1024,
+      "uuid": "77ced3a5-6756-49ae-8d1f-274e27664c05"
+    },
+    "79acf611-be58-4334-9fe4-4f2b73ae8abb": {
+      "ctime": 1355186880.451181,
+      "dev_type": "plain",
+      "iv_name": "disk/0",
+      "logical_id": [
+        "xenvg",
+        "3e559cd7-1024-4294-a923-a9fd13182b2f.disk0"
+      ],
+      "mode": "rw",
+      "mtime": 1355186880.451181,
+      "params": {},
+      "serial_no": 1,
+      "size": 102400,
+      "uuid": "79acf611-be58-4334-9fe4-4f2b73ae8abb"
+    }
+  },
+  "instances": {
+    "4e091bdc-e205-4ed7-8a47-0c9130a6619f": {
+      "admin_state": "up",
+      "admin_state_source": "admin",
+      "beparams": {},
+      "ctime": 1354038435.343601,
+      "disk_template": "plain",
+      "disks": [
+        "150bd154-8e23-44d1-b762-5065ae5a507b"
+      ],
+      "hvparams": {},
+      "hypervisor": "xen-pvm",
+      "mtime": 1354224585.700732,
+      "name": "instance3.example.com",
+      "nics": [
+        {
+          "mac": "aa:bb:cc:5e:5c:75",
+          "nicparams": {},
+          "uuid": "1ab090c1-e017-406c-afb4-fc285cb43e31"
+        }
+      ],
+      "os": "debian-image",
+      "osparams": {},
+      "primary_node": "2ae3d962-2dad-44f2-bdb1-85f77107f907",
+      "serial_no": 4,
+      "tags": [],
+      "uuid": "4e091bdc-e205-4ed7-8a47-0c9130a6619f"
+    },
+    "6c078d22-3eb6-4780-857d-81772e09eef1": {
+      "admin_state": "up",
+      "admin_state_source": "admin",
+      "beparams": {},
+      "ctime": 1363620258.608976,
+      "disk_template": "drbd",
+      "disks": [
+        "77ced3a5-6756-49ae-8d1f-274e27664c05"
+      ],
+      "hvparams": {},
+      "hypervisor": "xen-pvm",
+      "mtime": 1363620320.874901,
+      "name": "instance1.example.com",
+      "nics": [
+        {
+          "mac": "aa:bb:cc:b2:6e:0b",
+          "nicparams": {},
+          "uuid": "2c953d72-fac4-4aa9-a225-4131bb271791"
+        }
+      ],
+      "os": "busybox",
+      "osparams": {},
+      "primary_node": "9a12d554-75c0-4cb1-8064-103365145db0",
+      "serial_no": 2,
+      "uuid": "6c078d22-3eb6-4780-857d-81772e09eef1"
+    },
+    "8fde9f6d-e1f1-4850-9e9c-154966f622f5": {
+      "admin_state": "up",
+      "admin_state_source": "admin",
+      "beparams": {},
+      "ctime": 1355186880.451181,
+      "disk_template": "plain",
+      "disks": [
+        "79acf611-be58-4334-9fe4-4f2b73ae8abb"
+      ],
+      "hvparams": {},
+      "hypervisor": "xen-pvm",
+      "mtime": 1355186898.307642,
+      "name": "instance2.example.com",
+      "nics": [
+        {
+          "mac": "aa:bb:cc:56:83:fb",
+          "nicparams": {},
+          "uuid": "1cf95562-e676-4fd0-8214-e8b84a2f7bd1"
+        }
+      ],
+      "os": "debian-image",
+      "osparams": {},
+      "primary_node": "41f9c238-173c-4120-9e41-04ad379b647a",
+      "serial_no": 2,
+      "tags": [],
+      "uuid": "8fde9f6d-e1f1-4850-9e9c-154966f622f5"
+    }
+  },
+  "mtime": 1367352404.758083,
+  "networks": {
+    "99f0128a-1c84-44da-90b9-9581ea00c075": {
+      "ext_reservations": "1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001",
+      "name": "a network",
+      "network": "203.0.113.0/24",
+      "reservations": "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+      "serial_no": 1,
+      "uuid": "99f0128a-1c84-44da-90b9-9581ea00c075"
+    }
+  },
+  "nodegroups": {
+    "5244a46d-7506-4e14-922d-02b58153dde1": {
+      "alloc_policy": "preferred",
+      "diskparams": {},
+      "ipolicy": {},
+      "mtime": 1361963775.575009,
+      "name": "default",
+      "ndparams": {},
+      "networks": {},
+      "serial_no": 125,
+      "tags": [],
+      "uuid": "5244a46d-7506-4e14-922d-02b58153dde1"
+    },
+    "6c0a8916-b719-45ad-95dd-82192b1e473f": {
+      "alloc_policy": "preferred",
+      "diskparams": {},
+      "ipolicy": {
+        "disk-templates": [
+          "plain"
+        ],
+        "minmax": [
+          {
+            "max": {
+              "cpu-count": 8,
+              "disk-count": 16,
+              "disk-size": 1048576,
+              "memory-size": 32768,
+              "nic-count": 18,
+              "spindle-use": 14
+            },
+            "min": {
+              "cpu-count": 2,
+              "disk-count": 2,
+              "disk-size": 1024,
+              "memory-size": 128,
+              "nic-count": 1,
+              "spindle-use": 1
+            }
+          }
+        ],
+        "spindle-ratio": 5.2,
+        "vcpu-ratio": 3.14
+      },
+      "mtime": 1361963775.575009,
+      "name": "another",
+      "ndparams": {
+        "exclusive_storage": true
+      },
+      "networks": {},
+      "serial_no": 125,
+      "tags": [],
+      "uuid": "6c0a8916-b719-45ad-95dd-82192b1e473f"
+    }
+  },
+  "nodes": {
+    "2ae3d962-2dad-44f2-bdb1-85f77107f907": {
+      "ctime": 1343869045.604884,
+      "drained": false,
+      "group": "5244a46d-7506-4e14-922d-02b58153dde1",
+      "master_candidate": true,
+      "master_capable": true,
+      "mtime": 1358348755.779906,
+      "name": "node2.example.com",
+      "ndparams": {},
+      "offline": false,
+      "powered": true,
+      "primary_ip": "192.0.2.83",
+      "secondary_ip": "198.51.100.83",
+      "serial_no": 6,
+      "tags": [],
+      "uuid": "2ae3d962-2dad-44f2-bdb1-85f77107f907",
+      "vm_capable": true
+    },
+    "41f9c238-173c-4120-9e41-04ad379b647a": {
+      "ctime": 1343869205.934807,
+      "drained": false,
+      "group": "5244a46d-7506-4e14-922d-02b58153dde1",
+      "master_candidate": true,
+      "master_capable": true,
+      "mtime": 1353019704.885368,
+      "name": "node3.example.com",
+      "ndparams": {},
+      "offline": false,
+      "powered": true,
+      "primary_ip": "192.0.2.84",
+      "secondary_ip": "198.51.100.84",
+      "serial_no": 2,
+      "tags": [],
+      "uuid": "41f9c238-173c-4120-9e41-04ad379b647a",
+      "vm_capable": true
+    },
+    "9a12d554-75c0-4cb1-8064-103365145db0": {
+      "ctime": 1349722460.022264,
+      "drained": false,
+      "group": "5244a46d-7506-4e14-922d-02b58153dde1",
+      "master_candidate": true,
+      "master_capable": true,
+      "mtime": 1359986533.353329,
+      "name": "node1.example.com",
+      "ndparams": {},
+      "offline": false,
+      "powered": true,
+      "primary_ip": "192.0.2.82",
+      "secondary_ip": "198.51.100.82",
+      "serial_no": 197,
+      "tags": [],
+      "uuid": "9a12d554-75c0-4cb1-8064-103365145db0",
+      "vm_capable": true
+    }
+  },
+  "filters": {},
+  "serial_no": 7625,
+  "version": 2130000
+}
diff --git a/test/data/htools/hail-alloc-nlocation.json b/test/data/htools/hail-alloc-nlocation.json
new file mode 100644
index 0000000..ff566c1
--- /dev/null
+++ b/test/data/htools/hail-alloc-nlocation.json
@@ -0,0 +1,159 @@
+{
+  "cluster_tags": [
+    "htools:nlocation:power"
+  ],
+  "nodegroups": {
+    "uuid-group-1": {
+      "ipolicy": {
+        "std": {
+          "nic-count": 1,
+          "disk-size": 1024,
+          "disk-count": 1,
+          "memory-size": 128,
+          "cpu-count": 1,
+          "spindle-use": 1
+        },
+        "minmax": [
+          {
+            "min": {
+              "nic-count": 1,
+              "disk-size": 128,
+              "disk-count": 1,
+              "memory-size": 128,
+              "cpu-count": 1,
+              "spindle-use": 1
+            },
+            "max": {
+              "nic-count": 8,
+              "disk-size": 1048576,
+              "disk-count": 16,
+              "memory-size": 32768,
+              "cpu-count": 8,
+              "spindle-use": 8
+            }
+          }
+        ],
+        "vcpu-ratio": 4.0,
+        "disk-templates": [
+          "sharedfile",
+          "diskless",
+          "plain",
+          "blockdev",
+          "drbd",
+          "file",
+          "rbd"
+        ],
+        "spindle-ratio": 32.0
+      },
+      "networks": [],
+      "alloc_policy": "preferred",
+      "tags": [],
+      "name": "default"
+    }
+  },
+  "cluster_name": "cluster",
+  "instances": {},
+  "nodes": {
+    "node1": {
+      "total_disk": 307200,
+      "total_cpus": 4,
+      "group": "uuid-group-1",
+      "i_pri_up_memory": 0,
+      "tags": [
+        "power:a"
+      ],
+      "master_candidate": true,
+      "free_memory": 4096,
+      "ndparams": {
+        "spindle_count": 1,
+        "oob_program": null,
+        "exclusive_storage": false
+      },
+      "reserved_cpus": 1,
+      "master_capable": true,
+      "free_disk": 307200,
+      "drained": false,
+      "total_memory": 4096,
+      "i_pri_memory": 0,
+      "reserved_memory": 0,
+      "free_spindles": 12,
+      "total_spindles": 12,
+      "vm_capable": true,
+      "offline": false
+    },
+    "node2": {
+      "total_disk": 307200,
+      "total_cpus": 4,
+      "group": "uuid-group-1",
+      "i_pri_up_memory": 0,
+      "tags": [
+        "power:a"
+      ],
+      "master_candidate": true,
+      "free_memory": 4096,
+      "ndparams": {
+        "spindle_count": 1,
+        "oob_program": null,
+        "exclusive_storage": false
+      },
+      "reserved_cpus": 1,
+      "master_capable": true,
+      "free_disk": 307200,
+      "drained": false,
+      "total_memory": 4096,
+      "i_pri_memory": 0,
+      "reserved_memory": 0,
+      "free_spindles": 12,
+      "total_spindles": 12,
+      "vm_capable": true,
+      "offline": false
+    },
+    "node3": {
+      "total_disk": 107200,
+      "total_cpus": 4,
+      "group": "uuid-group-1",
+      "i_pri_up_memory": 0,
+      "tags": [
+        "power:b"
+      ],
+      "master_candidate": true,
+      "free_memory": 1024,
+      "ndparams": {
+        "spindle_count": 1,
+        "oob_program": null,
+        "exclusive_storage": false
+      },
+      "reserved_cpus": 1,
+      "master_capable": true,
+      "free_disk": 107200,
+      "drained": false,
+      "total_memory": 1024,
+      "i_pri_memory": 0,
+      "reserved_memory": 0,
+      "free_spindles": 12,
+      "total_spindles": 12,
+      "vm_capable": true,
+      "offline": false
+    }
+  },
+  "request": {
+    "disk_space_total": 0,
+    "disk_template": "drbd",
+    "disks": [
+      {
+        "size": 1024
+      }
+    ],
+    "hypervisor": "xen-pvm",
+    "memory": 256,
+    "name": "instance-new",
+    "nics": [],
+    "os": "instance-debootstrap",
+    "required_nodes": 2,
+    "spindle_use": 1,
+    "tags": [
+    ],
+    "type": "allocate",
+    "vcpus": 1
+  }
+}
diff --git a/test/data/htools/hail-reloc-drbd-crowded.json b/test/data/htools/hail-reloc-drbd-crowded.json
index c570290..dfab68c 100644
--- a/test/data/htools/hail-reloc-drbd-crowded.json
+++ b/test/data/htools/hail-reloc-drbd-crowded.json
@@ -438,12 +438,12 @@
         "oob_program": null,
         "exclusive_storage": false
       },
-      "reserved_memory": 1017,
+      "reserved_memory": 0,
       "reserved_cpus": 1,
       "master_capable": true,
       "free_disk": 1377280,
       "drained": false,
-      "total_memory": 1463,
+      "total_memory": 89,
       "primary_ip": "192.168.1.1",
       "i_pri_memory": 0,
       "free_spindles": 12,
@@ -465,12 +465,12 @@
         "oob_program": null,
         "exclusive_storage": false
       },
-      "reserved_memory": 1017,
+      "reserved_memory": 0,
       "reserved_cpus": 1,
       "master_capable": true,
       "free_disk": 1376640,
       "drained": false,
-      "total_memory": 1063,
+      "total_memory": 46,
       "primary_ip": "192.168.1.2",
       "i_pri_memory": 0,
       "free_spindles": 11,
@@ -497,7 +497,7 @@
       "master_capable": true,
       "free_disk": 1373336,
       "drained": false,
-      "total_memory": 32763,
+      "total_memory": 31618,
       "primary_ip": "192.168.1.3",
       "i_pri_memory": 2432,
       "free_spindles": 6,
@@ -524,7 +524,7 @@
       "master_capable": true,
       "free_disk": 1371520,
       "drained": false,
-      "total_memory": 32763,
+      "total_memory": 31234,
       "primary_ip": "192.168.1.4",
       "i_pri_memory": 23552,
       "free_spindles": 0,
diff --git a/test/data/htools/hbal-forth.data b/test/data/htools/hbal-forth.data
new file mode 100644
index 0000000..b3cf3fe
--- /dev/null
+++ b/test/data/htools/hbal-forth.data
@@ -0,0 +1,13 @@
+default|fake-uuid|preferred||
+
+node1|65523|1023|65523|3405248|3405248|24|M|fake-uuid|1||N|0|1|1.0
+node2|65523|1023|65523|3405248|3405248|24|N|fake-uuid|1||N|0|1|1.0
+node3|65523|1023|65523|3405248|3405248|24|N|fake-uuid|1||N|0|1|1.0
+
+forthcoming-inst1|128|2176|1|ADMIN_down|Y|node1|node2|drbd||1|-|Y
+forthcoming-inst2|128|2176|1|ADMIN_down|Y|node1|node2|drbd||1|-|Y
+forthcoming-inst3|128|2176|1|ADMIN_down|Y|node1|node2|drbd||1|-|Y
+
+
+|128,1,1024,1,1,1|128,1,512,1,1,1;1024,8,1048576,16,8,12|drbd,plain,diskless|4.0|32.0
+default|128,1,1024,1,1,1|128,1,512,1,1,1;1024,8,1048576,16,8,12|drbd,plain,diskless|4.0|32.0
diff --git a/test/data/htools/hbal-location-1.data b/test/data/htools/hbal-location-1.data
new file mode 100644
index 0000000..81d5e70
--- /dev/null
+++ b/test/data/htools/hbal-location-1.data
@@ -0,0 +1,13 @@
+group-01|fake-uuid-01|preferred||
+
+node-01|16384|0|15360|409600|306600|16|N|fake-uuid-01|1|power:a
+node-02|16384|0|15360|409600|306600|16|N|fake-uuid-01|1|power:a
+node-11|16384|0|15360|409600|306600|16|N|fake-uuid-01|1|power:b
+node-12|16384|0|15360|409600|306600|16|N|fake-uuid-01|1|power:b
+
+inst01|1024|51200|1|running|Y|node-01|node-02|drbd||1
+inst02|1024|51200|1|running|Y|node-02|node-01|drbd||1
+inst11|1024|51200|1|running|Y|node-11|node-12|drbd||1
+inst12|1024|51200|1|running|Y|node-12|node-11|drbd||1
+
+htools:nlocation:power
diff --git a/test/data/htools/hbal-location-2.data b/test/data/htools/hbal-location-2.data
new file mode 100644
index 0000000..2ac05f6
--- /dev/null
+++ b/test/data/htools/hbal-location-2.data
@@ -0,0 +1,10 @@
+group-01|fake-uuid-01|preferred||
+
+node-01|16384|0|15360|409600|306600|16|N|fake-uuid-01|1|power:a,power:c
+node-02|2048|0|1024|109600|6600|16|N|fake-uuid-01|1|power:b
+node-03|2048|0|2048|409600|409600|16|N|fake-uuid-01|1|power:a,power:c
+
+inst1|1024|51200|1|running|Y|node-01|node-02|drbd||1
+inst2|1024|51200|1|running|Y|node-02|node-01|drbd||1
+
+htools:nlocation:power
diff --git a/test/data/instance-disks.txt b/test/data/instance-disks.txt
index 55bd0f4..cf34d94 100644
--- a/test/data/instance-disks.txt
+++ b/test/data/instance-disks.txt
@@ -2,7 +2,6 @@
    "admin_state_source": "user",
    "beparams": {},
    "ctime": 1372838883.9710441,
-   "disk_template": "drbd",
    "disks": ["5d61e205-bf89-4ba8-a319-589b7bb7419e"],
    "disks_active": true,
    "hvparams": {},
diff --git a/test/hs/Test/Ganeti/HTools/Backend/Text.hs b/test/hs/Test/Ganeti/HTools/Backend/Text.hs
index e0f20fd..bf3809d 100644
--- a/test/hs/Test/Ganeti/HTools/Backend/Text.hs
+++ b/test/hs/Test/Ganeti/HTools/Backend/Text.hs
@@ -49,9 +49,11 @@
 import Test.Ganeti.TestHTools
 import Test.Ganeti.HTools.Instance (genInstanceSmallerThanNode,
                                     genInstanceOnNodeList)
-import Test.Ganeti.HTools.Node (genNode, genOnlineNode, genUniqueNodeList)
+import Test.Ganeti.HTools.Node (genNode, genOnlineNode, genEmptyOnlineNode
+                               , genUniqueNodeList)
 
 import Ganeti.BasicTypes
+import Ganeti.Types (InstanceStatus(..))
 import qualified Ganeti.HTools.AlgorithmParams as Alg
 import qualified Ganeti.HTools.Backend.Text as Text
 import qualified Ganeti.HTools.Cluster as Cluster
@@ -108,7 +110,7 @@
 
 prop_Load_InstanceFail :: [(String, Int)] -> [String] -> Property
 prop_Load_InstanceFail ktn fields =
-  length fields < 10 || length fields > 12 ==>
+  length fields < 10 || length fields > 13 ==>
     case Text.loadInst nl fields of
       Ok _ -> failTest "Managed to load instance from invalid data"
       Bad msg -> counterexample ("Unrecognised error message: " ++ msg) $
@@ -179,21 +181,21 @@
   case Text.loadISpec "dummy" . Utils.sepSplit ',' .
        Text.serializeISpec $ ispec of
     Bad msg -> failTest $ "Failed to load ispec: " ++ msg
-    Ok ispec' -> ispec ==? ispec'
+    Ok ispec' -> ispec' ==? ispec
 
 prop_MultipleMinMaxISpecsIdempotent :: [Types.MinMaxISpecs] -> Property
 prop_MultipleMinMaxISpecsIdempotent minmaxes =
   case Text.loadMultipleMinMaxISpecs "dummy" . Utils.sepSplit ';' .
        Text.serializeMultipleMinMaxISpecs $ minmaxes of
     Bad msg -> failTest $ "Failed to load min/max ispecs: " ++ msg
-    Ok minmaxes' -> minmaxes ==? minmaxes'
+    Ok minmaxes' -> minmaxes' ==? minmaxes
 
 prop_IPolicyIdempotent :: Types.IPolicy -> Property
 prop_IPolicyIdempotent ipol =
   case Text.loadIPolicy . Utils.sepSplit '|' $
        Text.serializeIPolicy owner ipol of
     Bad msg -> failTest $ "Failed to load ispec: " ++ msg
-    Ok res -> (owner, ipol) ==? res
+    Ok res -> res ==? (owner, ipol)
   where owner = "dummy"
 
 -- | This property, while being in the text tests, does more than just
@@ -208,8 +210,17 @@
   forAll genTags $ \ctags ->
   forAll (choose (1, 20)) $ \maxiter ->
   forAll (choose (2, 10)) $ \count ->
-  forAll genOnlineNode $ \node ->
-  forAll (genInstanceSmallerThanNode node) $ \inst ->
+  forAll genEmptyOnlineNode $ \node ->
+  forAll (genInstanceSmallerThanNode node `suchThat`
+            -- We want to test with a working node, so don't generate a
+            -- status that indicates a problem with the node.
+            (\i -> Instance.runSt i `elem` [ StatusDown
+                                           , StatusOffline
+                                           , ErrorDown
+                                           , ErrorUp
+                                           , Running
+                                           , UserDown
+                                           ])) $ \inst ->
   let nl = makeSmallCluster node count
       reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
       opts = Alg.defaultOptions
@@ -220,20 +231,21 @@
        Bad msg -> failTest $ "Failed to allocate: " ++ msg
        Ok (_, _, _, [], _) -> counterexample
                               "Failed to allocate: no allocations" False
-       Ok (_, nl', il', _, _) ->
-         let cdata = Loader.ClusterData defGroupList nl' il' ctags
+       Ok (_, nl', il, _, _) ->
+         let cdata = Loader.ClusterData defGroupList nl' il ctags
                      Types.defIPolicy
              saved = Text.serializeCluster cdata
          in case Text.parseData saved >>= Loader.mergeData [] [] [] [] (TOD 0 0)
             of
               Bad msg -> failTest $ "Failed to load/merge: " ++ msg
               Ok (Loader.ClusterData gl2 nl2 il2 ctags2 cpol2) ->
-                conjoin [ ctags ==? ctags2
-                        , Types.defIPolicy ==? cpol2
-                        , il' ==? il2
-                        , defGroupList ==? gl2
-                        , nl' ==? nl2
-                        ]
+                let (_, nl3) = Loader.checkData nl2 il2
+                in conjoin [ ctags2 ==? ctags
+                           , cpol2 ==? Types.defIPolicy
+                           , il2 ==? il
+                           , gl2 ==? defGroupList
+                           , nl3 ==? nl'
+                           ]
 
 testSuite "HTools/Backend/Text"
             [ 'prop_Load_Instance
diff --git a/test/hs/Test/Ganeti/HTools/ExtLoader.hs b/test/hs/Test/Ganeti/HTools/ExtLoader.hs
index 40f92d0..31a6ccf 100644
--- a/test/hs/Test/Ganeti/HTools/ExtLoader.hs
+++ b/test/hs/Test/Ganeti/HTools/ExtLoader.hs
@@ -1,3 +1,5 @@
+{-# LANGUAGE TemplateHaskell #-}
+
 {-| Unittests for the MonD data parse function -}
 
 {-
@@ -30,9 +32,9 @@
 
 -}
 
-module Test.Ganeti.HTools.ExtLoader where
-
-import Data.Ratio
+module Test.Ganeti.HTools.ExtLoader
+  ( testHTools_ExtLoader
+  ) where
 
 import qualified Test.HUnit as HUnit
 import qualified Text.JSON as J
@@ -45,6 +47,7 @@
 import Ganeti.HTools.ExtLoader
 import Ganeti.JSON
 import Test.Ganeti.TestCommon
+import Test.Ganeti.TestHelper
 
 {-# ANN module "HLint: ignore Use camelCase" #-}
 
@@ -119,3 +122,7 @@
      && relError (cavCpuTotal a) (cavCpuTotal b)
      && length (cavCpus a) == length (cavCpus b)
      && and (zipWith relError (cavCpus a) (cavCpus b))
+
+testSuite "HTools/ExtLoader"
+          [ 'case_parseMonDData
+          ]
diff --git a/test/hs/Test/Ganeti/HTools/Instance.hs b/test/hs/Test/Ganeti/HTools/Instance.hs
index db75429..190bfc2 100644
--- a/test/hs/Test/Ganeti/HTools/Instance.hs
+++ b/test/hs/Test/Ganeti/HTools/Instance.hs
@@ -80,9 +80,10 @@
   spindles <- case max_spin of
     Nothing -> genMaybe $ choose (min_spin, maxSpindles)
     Just ls -> liftM Just $ choose (min_spin, ls)
+  forthcoming <- arbitrary
   let disk = Instance.Disk dsk spindles
   return $ Instance.create
-    name mem dsk [disk] vcpus run_st [] True pn sn dt 1 []
+    name mem dsk [disk] vcpus run_st [] True pn sn dt 1 [] forthcoming
 
 -- | Generate an instance with maximum disk/mem/cpu values.
 genInstanceSmallerThan :: Int -> Int -> Int -> Maybe Int
@@ -96,7 +97,7 @@
                          (Node.availDisk node `div` 2)
                          (Node.availCpu node `div` 2)
                          (if Node.exclStorage node
-                          then Just $ Node.fSpindles node `div` 2
+                          then Just $ Node.fSpindlesForth node `div` 2
                           else Nothing)
 
 -- | Generates an instance possibly bigger than a node.
diff --git a/test/hs/Test/Ganeti/HTools/Node.hs b/test/hs/Test/Ganeti/HTools/Node.hs
index d782fe2..e7f46e2 100644
--- a/test/hs/Test/Ganeti/HTools/Node.hs
+++ b/test/hs/Test/Ganeti/HTools/Node.hs
@@ -41,6 +41,7 @@
   , setInstanceSmallerThanNode
   , genNode
   , genOnlineNode
+  , genEmptyOnlineNode
   , genNodeList
   , genUniqueNodeList
   ) where
@@ -118,15 +119,41 @@
                               Node.availCpu n > 2 &&
                               Node.tSpindles n > 2)
 
+-- | Helper function to generate a sane empty node with consistent
+-- internal data.
+genEmptyOnlineNode :: Gen Node.Node
+genEmptyOnlineNode =
+  (do node <- arbitrary
+      let fmem = truncate (Node.tMem node) - Node.nMem node
+      let node' = node { Node.offline = False
+                       , Node.fMem = fmem
+                       , Node.fMemForth = fmem
+                       , Node.pMem = fromIntegral fmem / Node.tMem node
+                       , Node.pMemForth = fromIntegral fmem / Node.tMem node
+                       , Node.rMem = 0
+                       , Node.rMemForth = 0
+                       , Node.pRem = 0
+                       , Node.pRemForth = 0
+                       }
+      return node') `suchThat` (\ n -> not (Node.failN1 n) &&
+                                       Node.availDisk n > 0 &&
+                                       Node.availMem n > 0 &&
+                                       Node.availCpu n > 0 &&
+                                       Node.tSpindles n > 0)
+
 -- | Generate a node with exclusive storage enabled.
 genExclStorNode :: Gen Node.Node
 genExclStorNode = do
   n <- genOnlineNode
   fs <- choose (Types.unitSpindle, Node.tSpindles n)
+  fsForth <- choose (Types.unitSpindle, fs)
   let pd = fromIntegral fs / fromIntegral (Node.tSpindles n)::Double
+  let pdForth = fromIntegral fsForth / fromIntegral (Node.tSpindles n)::Double
   return n { Node.exclStorage = True
            , Node.fSpindles = fs
+           , Node.fSpindlesForth = fsForth
            , Node.pDsk = pd
+           , Node.pDskForth = pdForth
            }
 
 -- | Generate a node with exclusive storage possibly enabled.
@@ -187,17 +214,6 @@
   Types.iPolicyVcpuRatio (Node.iPolicy newnode) ==? mc
     where newnode = Node.setMcpu node mc
 
-prop_setFmemGreater :: Node.Node -> Int -> Property
-prop_setFmemGreater node new_mem =
-  not (Node.failN1 node) && (Node.rMem node >= 0) &&
-  (new_mem > Node.rMem node) ==>
-  not (Node.failN1 (Node.setFmem node new_mem))
-
-prop_setFmemExact :: Node.Node -> Property
-prop_setFmemExact node =
-  not (Node.failN1 node) && (Node.rMem node >= 0) ==>
-  not (Node.failN1 (Node.setFmem node (Node.rMem node)))
-
 -- Check if adding an instance that consumes exactly all reserved
 -- memory does not raise an N+1 error
 prop_addPri_NoN1Fail :: Property
@@ -212,15 +228,16 @@
 prop_addPriFM :: Node.Node -> Instance.Instance -> Property
 prop_addPriFM node inst =
   Instance.mem inst >= Node.fMem node && not (Node.failN1 node) &&
-  not (Instance.isOffline inst) ==>
+  Instance.usesMemory inst ==>
   (Node.addPri node inst'' ==? Bad Types.FailMem)
   where inst' = setInstanceSmallerThanNode node inst
         inst'' = inst' { Instance.mem = Instance.mem inst }
 
 -- | Check that adding a primary instance with too much disk fails
 -- with type FailDisk.
-prop_addPriFD :: Node.Node -> Instance.Instance -> Property
-prop_addPriFD node inst =
+prop_addPriFD :: Instance.Instance -> Property
+prop_addPriFD inst =
+  forAll (genNode (Just 1) Nothing) $ \node ->
   forAll (elements Instance.localStorageTemplates) $ \dt ->
   Instance.dsk inst >= Node.fDsk node && not (Node.failN1 node) ==>
   let inst' = setInstanceSmallerThanNode node inst
@@ -254,7 +271,8 @@
 prop_addPriFC =
   forAll (choose (1, maxCpu)) $ \extra ->
   forAll genMaybeExclStorNode $ \node ->
-  forAll (arbitrary `suchThat` Instance.notOffline) $ \inst ->
+  forAll (arbitrary `suchThat` Instance.notOffline
+                    `suchThat` (not . Instance.forthcoming)) $ \inst ->
   let inst' = setInstanceSmallerThanNode node inst
       inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
   in case Node.addPri node inst'' of
@@ -282,7 +300,7 @@
   let inst' = inst { Instance.runSt = Types.StatusOffline
                    , Instance.mem = Node.availMem node + extra_mem
                    , Instance.vcpus = Node.availCpu node + extra_cpu }
-  in case Node.addPri node inst' of
+  in case Node.addPriEx True node inst' of
        Ok _ -> passTest
        v -> failTest $ "Expected OpGood, but got: " ++ show v
 
@@ -304,7 +322,8 @@
 -- | Checks for memory reservation changes.
 prop_rMem :: Instance.Instance -> Property
 prop_rMem inst =
-  not (Instance.isOffline inst) ==>
+  not (Instance.isOffline inst) && not (Instance.forthcoming inst) ==>
+  -- TODO Should we also require ((> Types.unitMem) . Node.fMemForth) ?
   forAll (genMaybeExclStorNode `suchThat` ((> Types.unitMem) . Node.fMem)) $
     \node ->
   -- ab = auto_balance, nb = non-auto_balance
@@ -450,8 +469,6 @@
             [ 'prop_setAlias
             , 'prop_setOffline
             , 'prop_setMcpu
-            , 'prop_setFmemGreater
-            , 'prop_setFmemExact
             , 'prop_setXmem
             , 'prop_addPriFM
             , 'prop_addPriFD
diff --git a/test/hs/Test/Ganeti/JQScheduler.hs b/test/hs/Test/Ganeti/JQScheduler.hs
index a05a879..a0aa650 100644
--- a/test/hs/Test/Ganeti/JQScheduler.hs
+++ b/test/hs/Test/Ganeti/JQScheduler.hs
@@ -287,11 +287,12 @@
                           , opPriority = OpPrioHigh
                           , opDepends = Just []
                           , opComment = Nothing
-                          , opReason = [("source1", "reason1", 1234)]}
-                          , metaOpCode = OpClusterRename
-                              { opName = clusterName
-                              }
-                        }
+                          , opReason = [("source1", "reason1", 1234)]
+                          }
+                      , metaOpCode = OpClusterRename
+                          { opName = clusterName
+                          }
+                      }
                   , qoStatus = OP_STATUS_QUEUED
                   , qoResult = JSNull
                   , qoLog = []
diff --git a/test/hs/Test/Ganeti/Locking/Locks.hs b/test/hs/Test/Ganeti/Locking/Locks.hs
index eedaed0..732779f 100644
--- a/test/hs/Test/Ganeti/Locking/Locks.hs
+++ b/test/hs/Test/Ganeti/Locking/Locks.hs
@@ -58,8 +58,6 @@
                     , Instance <$> genFQDN
                     , return NodeGroupLockSet
                     , NodeGroup <$> genUUID
-                    , return NAL
-                    , return NodeAllocLockSet
                     , return NodeResLockSet
                     , NodeRes <$> genUUID
                     , return NodeLockSet
diff --git a/test/hs/Test/Ganeti/Objects.hs b/test/hs/Test/Ganeti/Objects.hs
index 06f15c4..fab48bb 100644
--- a/test/hs/Test/Ganeti/Objects.hs
+++ b/test/hs/Test/Ganeti/Objects.hs
@@ -121,10 +121,20 @@
 -- properties, we only generate disks with no children (FIXME), as
 -- generating recursive datastructures is a bit more work.
 instance Arbitrary Disk where
-  arbitrary = Disk <$> arbitrary <*> pure [] <*> arbitrary
+  arbitrary =
+   frequency [ (2, liftM RealDisk $ RealDiskData <$> arbitrary
+                   <*> pure [] <*> arbitrary
                    <*> arbitrary <*> arbitrary <*> arbitrary
                    <*> arbitrary <*> arbitrary <*> arbitrary
                    <*> arbitrary <*> arbitrary <*> arbitrary
+                   <*> arbitrary)
+             , (1, liftM ForthcomingDisk $ ForthcomingDiskData <$> arbitrary
+                   <*> pure [] <*> arbitrary
+                   <*> arbitrary <*> arbitrary <*> arbitrary
+                   <*> arbitrary <*> arbitrary <*> arbitrary
+                   <*> arbitrary <*> arbitrary <*> arbitrary
+                   <*> arbitrary)
+             ]
 
 -- FIXME: we should generate proper values, >=0, etc., but this is
 -- hard for partial ones, where all must be wrapped in a 'Maybe'
@@ -138,9 +148,50 @@
 
 $(genArbitrary ''PartialNic)
 
-instance Arbitrary Instance where
+instance Arbitrary ForthcomingInstanceData where
   arbitrary =
-    Instance
+    ForthcomingInstanceData
+      -- name
+      <$> genMaybe genFQDN
+      -- primary node
+      <*> genMaybe genFQDN
+      -- OS
+      <*> genMaybe genFQDN
+      -- hypervisor
+      <*> arbitrary
+      -- hvparams
+      -- FIXME: add non-empty hvparams when they're a proper type
+      <*> pure (GenericContainer Map.empty)
+      -- beparams
+      <*> arbitrary
+      -- osparams
+      <*> pure (GenericContainer Map.empty)
+      -- osparams_private
+      <*> pure (GenericContainer Map.empty)
+      -- admin_state
+      <*> genMaybe arbitrary
+      -- admin_state_source
+      <*> genMaybe arbitrary
+      -- nics
+      <*> arbitrary
+      -- disks
+      <*> vectorOf 5 arbitrary
+      -- disks active
+      <*> genMaybe arbitrary
+      -- network port
+      <*> arbitrary
+      -- ts
+      <*> arbitrary <*> arbitrary
+      -- uuid
+      <*> arbitrary
+      -- serial
+      <*> arbitrary
+      -- tags
+      <*> (Set.fromList <$> genTags)
+
+instance Arbitrary RealInstanceData where
+  arbitrary =
+    RealInstanceData
       -- name
       <$> genFQDN
       -- primary node
@@ -166,8 +217,6 @@
       <*> arbitrary
       -- disks
       <*> vectorOf 5 arbitrary
-      -- disk template
-      <*> arbitrary
       -- disks active
       <*> arbitrary
       -- network port
@@ -181,11 +230,16 @@
       -- tags
       <*> (Set.fromList <$> genTags)
 
+instance Arbitrary Instance where
+  arbitrary = frequency [ (1, ForthcomingInstance <$> arbitrary)
+                        , (3, RealInstance <$> arbitrary)
+                        ]
+
 -- | Generates an instance that is connected to the given networks
 -- and possibly some other networks
 genInstWithNets :: [String] -> Gen Instance
 genInstWithNets nets = do
-  plain_inst <- arbitrary
+  plain_inst <- RealInstance <$> arbitrary
   enhanceInstWithNets plain_inst nets
 
 -- | Generates an instance that is connected to some networks
@@ -207,13 +261,17 @@
   let genNic net = PartialNic mac ip nicparams net name uuid
       partial_nics = map (genNic . Just)
                          (List.nub (nets ++ more_nets))
-      new_inst = inst { instNics = partial_nics }
+      new_inst = case inst of
+                   RealInstance rinst ->
+                     RealInstance rinst { realInstNics = partial_nics }
+                   ForthcomingInstance _ -> inst
   return new_inst
 
 genDiskWithChildren :: Int -> Gen Disk
 genDiskWithChildren num_children = do
   logicalid <- arbitrary
   children <- vectorOf num_children (genDiskWithChildren 0)
+  nodes <- arbitrary
   ivname <- genName
   size <- arbitrary
   mode <- arbitrary
@@ -223,8 +281,8 @@
   uuid <- genName
   serial <- arbitrary
   time <- arbitrary
-  return $
-    Disk logicalid children ivname size mode name
+  return . RealDisk $
+    RealDiskData logicalid children nodes ivname size mode name
       spindles params uuid serial time time
 
 genDisk :: Gen Disk
@@ -667,8 +725,9 @@
       lv_name = "1234sdf-qwef-2134-asff-asd2-23145d.data" :: String
       lv = LogicalVolume vg_name lv_name
       time = TOD 0 0
-      d =
-        Disk (LIDPlain lv) [] "diskname" 1000 DiskRdWr
+      d = RealDisk $
+        RealDiskData (LIDPlain lv) [] ["node1.example.com"] "diskname"
+          1000 DiskRdWr
           Nothing Nothing Nothing "asdfgr-1234-5123-daf3-sdfw-134f43"
           0 time time
   in
@@ -681,15 +740,20 @@
   let vg_name = "xenvg" :: String
       lv_name = "1234sdf-qwef-2134-asff-asd2-23145d.data" :: String
       time = TOD 0 0
-      d =
-        Disk
+      d = RealDisk $
+        RealDiskData
           (LIDDrbd8 "node1.example.com" "node2.example.com" 2000 1 5
            (Private "secret"))
-          [ Disk (mkLIDPlain "onevg" "onelv") [] "disk1" 1000 DiskRdWr Nothing
-              Nothing Nothing "145145-asdf-sdf2-2134-asfd-534g2x" 0 time time
-          , Disk (mkLIDPlain vg_name lv_name) [] "disk2" 1000 DiskRdWr Nothing
-              Nothing Nothing "6gd3sd-423f-ag2j-563b-dg34-gj3fse" 0 time time
-          ] "diskname" 1000 DiskRdWr Nothing Nothing Nothing
+          [ RealDisk $ RealDiskData (mkLIDPlain "onevg" "onelv") []
+              ["node1.example.com", "node2.example.com"] "disk1" 1000 DiskRdWr
+              Nothing Nothing Nothing "145145-asdf-sdf2-2134-asfd-534g2x"
+              0 time time
+          , RealDisk $ RealDiskData (mkLIDPlain vg_name lv_name) []
+              ["node1.example.com", "node2.example.com"] "disk2" 1000 DiskRdWr
+              Nothing Nothing Nothing "6gd3sd-423f-ag2j-563b-dg34-gj3fse"
+              0 time time
+          ] ["node1.example.com", "node2.example.com"] "diskname" 1000 DiskRdWr
+          Nothing Nothing Nothing
           "asdfgr-1234-5123-daf3-sdfw-134f43" 0 time time
   in
     HUnit.assertBool "Unable to detect that plain Disk includes logical ID" $
@@ -701,9 +765,10 @@
   let vg_name = "xenvg" :: String
       lv_name = "1234sdf-qwef-2134-asff-asd2-23145d.data" :: String
       time = TOD 0 0
-      d =
-        Disk (mkLIDPlain "othervg" "otherlv") [] "diskname" 1000 DiskRdWr
-          Nothing Nothing Nothing "asdfgr-1234-5123-daf3-sdfw-134f43"
+      d = RealDisk $
+        RealDiskData (mkLIDPlain "othervg" "otherlv") [] ["node1.example.com"]
+          "diskname" 1000 DiskRdWr Nothing Nothing Nothing
+          "asdfgr-1234-5123-daf3-sdfw-134f43"
           0 time time
   in
     HUnit.assertBool "Unable to detect that plain Disk includes logical ID" $
diff --git a/test/hs/Test/Ganeti/OpCodes.hs b/test/hs/Test/Ganeti/OpCodes.hs
index ba9e4bc..694c9fe 100644
--- a/test/hs/Test/Ganeti/OpCodes.hs
+++ b/test/hs/Test/Ganeti/OpCodes.hs
@@ -297,7 +297,7 @@
       "OP_NODE_EVACUATE" ->
         OpCodes.OpNodeEvacuate <$> arbitrary <*> genNodeNameNE <*>
           return Nothing <*> genMaybe genNodeNameNE <*> return Nothing <*>
-          genMaybe genNameNE <*> arbitrary
+          genMaybe genNameNE <*> arbitrary <*> arbitrary
       "OP_INSTANCE_CREATE" ->
         OpCodes.OpInstanceCreate
           <$> genFQDN                         -- instance_name
@@ -338,6 +338,8 @@
           <*> genMaybe genNameNE              -- src_path
           <*> genPrintableAsciiString         -- compress
           <*> arbitrary                       -- start
+          <*> arbitrary                       -- forthcoming
+          <*> arbitrary                       -- commit
           <*> (genTags >>= mapM mkNonEmpty)   -- tags
           <*> arbitrary                       -- instance_communication
           <*> arbitrary                       -- helper_startup_timeout
@@ -452,12 +454,23 @@
       "OP_BACKUP_PREPARE" ->
         OpCodes.OpBackupPrepare <$> genFQDN <*> return Nothing <*> arbitrary
       "OP_BACKUP_EXPORT" ->
-        OpCodes.OpBackupExport <$> genFQDN <*> return Nothing <*>
-          genPrintableAsciiString <*>
-          arbitrary <*> arbitrary <*> return Nothing <*>
-          arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*>
-          genMaybe (pure []) <*> genMaybe genNameNE <*> arbitrary <*>
-          arbitrary <*> arbitrary
+        OpCodes.OpBackupExport
+          <$> genFQDN                  -- instance_name
+          <*> return Nothing           -- instance_uuid
+          <*> genPrintableAsciiString  -- compress
+          <*> arbitrary                -- shutdown_timeout
+          <*> arbitrary                -- target_node
+          <*> return Nothing           -- target_node_uuid
+          <*> arbitrary                -- shutdown
+          <*> arbitrary                -- remove_instance
+          <*> arbitrary                -- ignore_remove_failures
+          <*> arbitrary                -- mode
+          <*> genMaybe (pure [])       -- x509_key_name
+          <*> genMaybe genNameNE       -- destination_x509_ca
+          <*> arbitrary                -- zero_free_space
+          <*> arbitrary                -- zeroing_timeout_fixed
+          <*> arbitrary                -- zeroing_timeout_per_mib
+          <*> arbitrary                -- long_sleep
       "OP_BACKUP_REMOVE" ->
         OpCodes.OpBackupRemove <$> genFQDN <*> return Nothing
       "OP_TEST_ALLOCATOR" ->
diff --git a/test/hs/Test/Ganeti/PartialParams.hs b/test/hs/Test/Ganeti/PartialParams.hs
new file mode 100644
index 0000000..d999c67
--- /dev/null
+++ b/test/hs/Test/Ganeti/PartialParams.hs
@@ -0,0 +1,73 @@
+{-| Common tests for PartialParams instances
+
+-}
+
+{-
+
+Copyright (C) 2012 Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-}
+
+module Test.Ganeti.PartialParams
+  ( testFillParamsLaw1
+  , testToParamsLaw2
+  , testToFilledLaw3
+  , testToFilledMonoidLaw1
+  , testToFilledMonoidLaw2
+  ) where
+
+import Data.Monoid
+
+import Test.QuickCheck
+
+import Ganeti.PartialParams
+
+import Test.Ganeti.TestCommon
+
+-- | Checks for serialisation idempotence.
+testFillParamsLaw1 :: (PartialParams f p, Show f, Eq f)
+                   => f -> p -> Property
+testFillParamsLaw1 f p = fillParams (fillParams f p) p ==? fillParams f p
+
+-- | Tests that filling partial parameters satisfies the law.
+testToParamsLaw2 :: (PartialParams f p, Show f, Eq f) => f -> f -> Property
+testToParamsLaw2 x f = fillParams x (toPartial f) ==? f
+
+-- | Tests that converting partial to filled parameters satisfies the law.
+testToFilledLaw3 :: (PartialParams f p, Show f, Eq f) => f -> Property
+testToFilledLaw3 f = toFilled (toPartial f) ==? Just f
+
+-- | Tests that the partial params behave correctly as a monoid action.
+testToFilledMonoidLaw1 :: (PartialParams f p, Show f, Eq f, Monoid p)
+                       => f -> Property
+testToFilledMonoidLaw1 f = fillParams f mempty ==? f
+
+-- | Tests that the partial params behave correctly as a monoid action.
+testToFilledMonoidLaw2 :: (PartialParams f p, Show f, Eq f, Monoid p)
+                       => f -> p -> p -> Property
+testToFilledMonoidLaw2 f p1 p2 =
+  fillParams f (p1 <> p2) ==? fillParams (fillParams f p1) p2
diff --git a/test/hs/Test/Ganeti/Query/Instance.hs b/test/hs/Test/Ganeti/Query/Instance.hs
index cdb3521..404843c 100644
--- a/test/hs/Test/Ganeti/Query/Instance.hs
+++ b/test/hs/Test/Ganeti/Query/Instance.hs
@@ -58,11 +58,11 @@
 -- placeholders.
 createInstance :: String -> String -> AdminState -> AdminStateSource -> Instance
 createInstance name pnodeUuid adminState adminStateSource =
-  Instance name pnodeUuid "" Kvm
+  RealInstance $ RealInstanceData name pnodeUuid "" Kvm
     (GenericContainer Map.empty)
     (PartialBeParams Nothing Nothing Nothing Nothing Nothing Nothing)
     (GenericContainer Map.empty) (GenericContainer Map.empty)
-    adminState adminStateSource [] [] DTDrbd8 False Nothing epochTime epochTime
+    adminState adminStateSource [] [] False Nothing epochTime epochTime
     "" 0 Set.empty
   where epochTime = TOD 0 0
 
diff --git a/test/hs/Test/Ganeti/THH.hs b/test/hs/Test/Ganeti/THH.hs
index 16fdc0b..89e1c39 100644
--- a/test/hs/Test/Ganeti/THH.hs
+++ b/test/hs/Test/Ganeti/THH.hs
@@ -1,4 +1,5 @@
-{-# LANGUAGE TemplateHaskell #-}
+{-# LANGUAGE TemplateHaskell, FunctionalDependencies #-}
+{-# OPTIONS -fno-warn-unused-binds #-}
 
 {-| Unittests for our template-haskell generated code.
 
@@ -43,7 +44,9 @@
 import Text.JSON
 
 import Ganeti.THH
+import Ganeti.PartialParams
 
+import Test.Ganeti.PartialParams
 import Test.Ganeti.TestHelper
 import Test.Ganeti.TestCommon
 
@@ -102,10 +105,62 @@
   forAll ((arbitrary :: Gen [(String, Int)]) `suchThat` (not . null))
   $ testDeserialisationFail UnitObj . encJSDict
 
+$(buildParam "Test" "tparam"
+  [ simpleField "c" [t| Int |]
+  , simpleField "d" [t| String  |]
+  ])
+
+$(genArbitrary ''FilledTestParams)
+$(genArbitrary ''PartialTestParams)
+
+-- | Tests that filling partial parameters works as expected.
+prop_fillWithPartialParams :: Property
+prop_fillWithPartialParams =
+  let partial = PartialTestParams (Just 4) Nothing
+      filled = FilledTestParams 2 "42"
+      expected = FilledTestParams 4 "42"
+  in fillParams filled partial ==? expected
+
+-- | Tests that filling partial parameters satisfies the law.
+prop_fillPartialLaw1 :: FilledTestParams -> PartialTestParams -> Property
+prop_fillPartialLaw1 = testFillParamsLaw1
+
+-- | Tests that filling partial parameters works as expected.
+prop_toParams :: Property
+prop_toParams =
+  let filled = FilledTestParams 2 "42"
+      expected = FilledTestParams 4 "42"
+  in toPartial (FilledTestParams 2 "42") ==?
+     PartialTestParams (Just 2) (Just "42")
+
+-- | Tests that filling partial parameters satisfies the law.
+prop_fillPartialLaw2 :: FilledTestParams -> FilledTestParams -> Property
+prop_fillPartialLaw2 = testToParamsLaw2
+
+-- | Tests that filling partial parameters satisfies the law.
+prop_fillPartialLaw3 :: FilledTestParams -> Property
+prop_fillPartialLaw3 = testToFilledLaw3
+
+-- | Tests that the monoid action laws are satisfied.
+prop_fillPartialMonoidLaw1 :: FilledTestParams -> Property
+prop_fillPartialMonoidLaw1 = testToFilledMonoidLaw1
+
+-- | Tests that the monoid action laws are satisfied.
+prop_fillPartialMonoidLaw2
+  :: FilledTestParams -> PartialTestParams -> PartialTestParams -> Property
+prop_fillPartialMonoidLaw2 = testToFilledMonoidLaw2
+
 testSuite "THH"
             [ 'prop_OptFields
             , 'prop_TestObj_serialization
             , 'prop_TestObj_deserialisationFail
             , 'prop_UnitObj_serialization
             , 'prop_UnitObj_deserialisationFail
+            , 'prop_fillWithPartialParams
+            , 'prop_fillPartialLaw1
+            , 'prop_toParams
+            , 'prop_fillPartialLaw2
+            , 'prop_fillPartialLaw3
+            , 'prop_fillPartialMonoidLaw1
+            , 'prop_fillPartialMonoidLaw2
             ]
diff --git a/test/hs/Test/Ganeti/TestCommon.hs b/test/hs/Test/Ganeti/TestCommon.hs
index 3991e5e..bcd8421 100644
--- a/test/hs/Test/Ganeti/TestCommon.hs
+++ b/test/hs/Test/Ganeti/TestCommon.hs
@@ -92,14 +92,6 @@
   , counterexample
   ) where
 
--- The following macro is just a temporary solution for 2.12 and 2.13.
--- Since 2.14 cabal creates proper macros for all dependencies.
-#define MIN_VERSION_QuickCheck(maj,min,rev) \
-  (((maj)<QUICKCHECK_MAJOR)|| \
-   (((maj)==QUICKCHECK_MAJOR)&&((min)<=QUICKCHECK_MINOR))|| \
-   (((maj)==QUICKCHECK_MAJOR)&&((min)==QUICKCHECK_MINOR)&& \
-    ((rev)<=QUICKCHECK_REV)))
-
 import Control.Applicative
 import Control.Exception (catchJust)
 import Control.Monad
diff --git a/test/hs/Test/Ganeti/TestHTools.hs b/test/hs/Test/Ganeti/TestHTools.hs
index 2817f2e..e2ec6a5 100644
--- a/test/hs/Test/Ganeti/TestHTools.hs
+++ b/test/hs/Test/Ganeti/TestHTools.hs
@@ -114,7 +114,7 @@
 createInstance :: Int -> Int -> Int -> Instance.Instance
 createInstance mem dsk vcpus =
   Instance.create "inst-unnamed" mem dsk [Instance.Disk dsk Nothing] vcpus
-    Types.Running [] True (-1) (-1) Types.DTDrbd8 1 []
+    Types.Running [] True (-1) (-1) Types.DTDrbd8 1 [] False
 
 -- | Create a small cluster by repeating a node spec.
 makeSmallCluster :: Node.Node -> Int -> Node.List
@@ -139,6 +139,6 @@
           , Instance.vcpus = Node.availCpu node `div` 2
           , Instance.disks = [Instance.Disk new_dsk
                               (if Node.exclStorage node
-                               then Just $ Node.fSpindles node `div` 2
+                               then Just $ Node.fSpindlesForth node `div` 2
                                else Nothing)]
           }
diff --git a/test/hs/Test/Ganeti/Utils.hs b/test/hs/Test/Ganeti/Utils.hs
index 390040d..afcf4a9 100644
--- a/test/hs/Test/Ganeti/Utils.hs
+++ b/test/hs/Test/Ganeti/Utils.hs
@@ -47,7 +47,7 @@
 import qualified Data.Set as S
 import System.Time
 import qualified Text.JSON as J
-#ifndef NO_REGEX_PCRE
+#ifdef VERSION_regex_pcre
 import Text.Regex.PCRE
 #endif
 
@@ -269,7 +269,7 @@
   uuid <- newUUID
   assertBool "newUUID" $ isUUID uuid
 
-#ifndef NO_REGEX_PCRE
+#ifdef VERSION_regex_pcre
 {-# ANN case_new_uuid_regex "HLint: ignore Use camelCase" #-}
 
 -- | Tests that the newUUID function produces valid UUIDs.
@@ -390,7 +390,7 @@
             , 'prop_rStripSpace
             , 'prop_trim
             , 'case_new_uuid
-#ifndef NO_REGEX_PCRE
+#ifdef VERSION_regex_pcre
             , 'case_new_uuid_regex
 #endif
             , 'prop_clockTimeToString
diff --git a/test/hs/htest.hs b/test/hs/htest.hs
index 3852e2d..b244e0f 100644
--- a/test/hs/htest.hs
+++ b/test/hs/htest.hs
@@ -54,6 +54,7 @@
 import Test.Ganeti.HTools.CLI
 import Test.Ganeti.HTools.Cluster
 import Test.Ganeti.HTools.Container
+import Test.Ganeti.HTools.ExtLoader
 import Test.Ganeti.HTools.Graph
 import Test.Ganeti.HTools.Instance
 import Test.Ganeti.HTools.Loader
@@ -128,6 +129,7 @@
   , testHTools_CLI
   , testHTools_Cluster
   , testHTools_Container
+  , testHTools_ExtLoader
   , testHTools_Graph
   , testHTools_Instance
   , testHTools_Loader
diff --git a/test/hs/shelltests/htools-balancing.test b/test/hs/shelltests/htools-balancing.test
index dbd3e7c..2772f0d 100644
--- a/test/hs/shelltests/htools-balancing.test
+++ b/test/hs/shelltests/htools-balancing.test
@@ -157,3 +157,8 @@
 ./test/hs/hbal -t$TESTDATA_DIR/hbal-soft-errors.data --ignore-soft-errors
 >>>/Solution length=[1-9]/
 >>>=0
+
+# forthcoming instances can be balanced as well
+./test/hs/hbal -t$TESTDATA_DIR/hbal-forth.data
+>>>/Solution length=[1-9]/
+>>>=0
diff --git a/test/hs/shelltests/htools-hail.test b/test/hs/shelltests/htools-hail.test
index c901430..1a9c175 100644
--- a/test/hs/shelltests/htools-hail.test
+++ b/test/hs/shelltests/htools-hail.test
@@ -176,3 +176,8 @@
 ./test/hs/hail --ignore-soft-errors $TESTDATA_DIR/hail-alloc-plain-tags.json
 >>> /"success":true/
 >>>= 0
+
+# Check that hail account location tags
+./test/hs/hail $TESTDATA_DIR/hail-alloc-nlocation.json
+>>> /"success":true,.*,"result":\["node3","node2"\]/
+>>>= 0
diff --git a/test/hs/shelltests/htools-hbal.test b/test/hs/shelltests/htools-hbal.test
index c31f569..dbbb0d4 100644
--- a/test/hs/shelltests/htools-hbal.test
+++ b/test/hs/shelltests/htools-hbal.test
@@ -25,4 +25,14 @@
  node-04    0/
 >>>= 0
 
+./test/hs/hbal -t $TESTDATA_DIR/hbal-location-1.data
+>>>/Solution length=[1-9]/
+>>>= 0
 
+./test/hs/hbal --print-nodes=name,pcnt -t $TESTDATA_DIR/hbal-location-2.data
+>>>2/Final cluster status:
+ Name    pcnt
+ node-01    1
+ node-02    0
+ node-03    1/
+>>>= 0
diff --git a/test/py/cfgupgrade_unittest.py b/test/py/cfgupgrade_unittest.py
index 52a2093..f629a65 100755
--- a/test/py/cfgupgrade_unittest.py
+++ b/test/py/cfgupgrade_unittest.py
@@ -38,14 +38,17 @@
 import operator
 import json
 
+from ganeti import cli
 from ganeti import constants
 from ganeti import utils
 from ganeti import serializer
 from ganeti import netutils
 
 from ganeti.utils import version
+from ganeti.tools.cfgupgrade import CfgUpgrade, ParseOptions, Error
 
 import testutils
+import mock
 
 
 def GetMinimalConfig():
@@ -86,23 +89,28 @@
 
 
 def _RunUpgrade(path, dry_run, no_verify, ignore_hostname=True,
-                downgrade=False):
-  cmd = [sys.executable, "%s/tools/cfgupgrade" % testutils.GetSourceDir(),
-         "--debug", "--force", "--path=%s" % path, "--confdir=%s" % path]
+                downgrade=False, fail=False):
+  args = ["--debug", "--force", "--path=%s" % path, "--confdir=%s" % path]
 
   if ignore_hostname:
-    cmd.append("--ignore-hostname")
+    args.append("--ignore-hostname")
   if dry_run:
-    cmd.append("--dry-run")
+    args.append("--dry-run")
   if no_verify:
-    cmd.append("--no-verify")
+    args.append("--no-verify")
   if downgrade:
-    cmd.append("--downgrade")
+    args.append("--downgrade")
 
-  result = utils.RunCmd(cmd, cwd=os.getcwd())
-  if result.failed:
-    raise Exception("cfgupgrade failed: %s, output %r" %
-                    (result.fail_reason, result.output))
+  opts, args = ParseOptions(args=args)
+  upgrade = CfgUpgrade(opts, args)
+
+  with mock.patch('sys.exit'):
+    with mock.patch.object(upgrade, 'SetupLogging'):
+      with mock.patch.object(cli, 'ToStderr'):
+        upgrade.Run()
+    if sys.exit.called:
+      raise Error("upgrade failed")
+
 
 
 class TestCfgupgrade(unittest.TestCase):
@@ -411,6 +419,9 @@
   def testUpgradeFullConfigFrom_2_12(self):
     self._TestUpgradeFromFile("cluster_config_2.12.json", False)
 
+  def testUpgradeFullConfigFrom_2_13(self):
+    self._TestUpgradeFromFile("cluster_config_2.13.json", False)
+
   def testUpgradeCurrent(self):
     self._TestSimpleUpgrade(constants.CONFIG_VERSION, False)
 
@@ -428,11 +439,12 @@
   def testDowngradeFullConfig(self):
     """Test for upgrade + downgrade combination."""
     # This test can work only with the previous version of a configuration!
-    oldconfname = "cluster_config_2.12.json"
+    oldconfname = "cluster_config_2.13.json"
     self._TestUpgradeFromFile(oldconfname, False)
     _RunUpgrade(self.tmpdir, False, True, downgrade=True)
     oldconf = self._LoadTestDataConfig(oldconfname)
     newconf = self._LoadConfig()
+    self.maxDiff = None
     self.assertEqual(oldconf, newconf)
 
   def testDowngradeFullConfigBackwardFrom_2_7(self):
@@ -487,5 +499,6 @@
     newconf = self._LoadConfig()
     self.assertEqual(oldconf["version"], newconf["version"])
 
+
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
diff --git a/test/py/cmdlib/backup_unittest.py b/test/py/cmdlib/backup_unittest.py
index b33fabe..c91ee05 100644
--- a/test/py/cmdlib/backup_unittest.py
+++ b/test/py/cmdlib/backup_unittest.py
@@ -63,6 +63,39 @@
     self.ExecOpCode(op)
 
 
+def InstanceRemoved(remove_instance):
+  """Checks whether the instance was removed during a test of opcode execution.
+
+  """
+  def WrappingFunction(fn):
+    def CheckingFunction(self, *args, **kwargs):
+      fn(self, *args, **kwargs)
+      instance_removed = (self.rpc.call_blockdev_remove.called -
+                          self.rpc.call_blockdev_snapshot.called) > 0
+      if remove_instance and not instance_removed:
+        raise self.fail(msg="Instance not removed when it should have been")
+      if not remove_instance and instance_removed:
+        raise self.fail(msg="Instance removed when it should not have been")
+    return CheckingFunction
+  return WrappingFunction
+
+
+def TrySnapshots(try_snapshot):
+  """Checks whether an attempt to snapshot disks should have been attempted.
+
+  """
+  def WrappingFunction(fn):
+    def CheckingFunction(self, *args, **kwargs):
+      fn(self, *args, **kwargs)
+      snapshots_tried = self.rpc.call_blockdev_snapshot.called > 0
+      if try_snapshot and not snapshots_tried:
+        raise self.fail(msg="Disks should have been snapshotted but weren't")
+      if not try_snapshot and snapshots_tried:
+        raise self.fail(msg="Disks snapshotted without a need to do so")
+    return CheckingFunction
+  return WrappingFunction
+
+
 class TestLUBackupExportBase(CmdlibTestCase):
   def setUp(self):
     super(TestLUBackupExportBase, self).setUp()
@@ -119,50 +152,115 @@
     self.ExecOpCodeExpectOpPrereqError(
       op, "Can not remove instance without shutting it down before")
 
-  def testUnsupportedDiskTemplate(self):
-    inst = self.cfg.AddNewInstance(disk_template=constants.DT_FILE)
-    op = opcodes.OpBackupExport(instance_name=inst.name,
-                                target_node=self.master.name)
-    self.ExecOpCodeExpectOpPrereqError(
-      op, "Export not supported for instances with file-based disks")
-
 
 class TestLUBackupExportLocalExport(TestLUBackupExportBase):
   def setUp(self):
+    # The initial instance prep
     super(TestLUBackupExportLocalExport, self).setUp()
 
-    self.inst = self.cfg.AddNewInstance()
     self.target_node = self.cfg.AddNewNode()
     self.op = opcodes.OpBackupExport(mode=constants.EXPORT_MODE_LOCAL,
-                                     instance_name=self.inst.name,
                                      target_node=self.target_node.name)
+    self._PrepareInstance()
 
     self.rpc.call_import_start.return_value = \
       self.RpcResultsBuilder() \
         .CreateSuccessfulNodeResult(self.target_node, "import_daemon")
 
-  def testExportWithShutdown(self):
-    inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
-    op = self.CopyOpCode(self.op, instance_name=inst.name, shutdown=True)
-    self.ExecOpCode(op)
+  def _PrepareInstance(self, online=False, snapshottable=True):
+    """Produces an instance for export tests, and updates the opcode.
 
-  def testExportDeactivatedDisks(self):
+    """
+    if online:
+      admin_state = constants.ADMINST_UP
+    else:
+      admin_state = constants.ADMINST_DOWN
+
+    if snapshottable:
+      disk_template = constants.DT_PLAIN
+    else:
+      disk_template = constants.DT_FILE
+
+    inst = self.cfg.AddNewInstance(admin_state=admin_state,
+                                   disk_template=disk_template)
+    self.op = self.CopyOpCode(self.op, instance_name=inst.name)
+
+  @TrySnapshots(True)
+  @InstanceRemoved(False)
+  def testPlainExportWithShutdown(self):
+    self._PrepareInstance(online=True)
     self.ExecOpCode(self.op)
 
-  def testExportRemoveInstance(self):
+  @TrySnapshots(False)
+  @InstanceRemoved(False)
+  def testFileExportWithShutdown(self):
+    self._PrepareInstance(online=True, snapshottable=False)
+    self.ExecOpCodeExpectOpExecError(self.op, ".*--long-sleep option.*")
+
+  @TrySnapshots(False)
+  @InstanceRemoved(False)
+  def testFileLongSleepExport(self):
+    self._PrepareInstance(online=True, snapshottable=False)
+    op = self.CopyOpCode(self.op, long_sleep=True)
+    self.ExecOpCode(op)
+
+  @TrySnapshots(True)
+  @InstanceRemoved(False)
+  def testPlainLiveExport(self):
+    self._PrepareInstance(online=True)
+    op = self.CopyOpCode(self.op, shutdown=False)
+    self.ExecOpCode(op)
+
+  @TrySnapshots(False)
+  @InstanceRemoved(False)
+  def testFileLiveExport(self):
+    self._PrepareInstance(online=True, snapshottable=False)
+    op = self.CopyOpCode(self.op, shutdown=False)
+    self.ExecOpCodeExpectOpExecError(op, ".*live export.*")
+
+  @TrySnapshots(False)
+  @InstanceRemoved(False)
+  def testPlainOfflineExport(self):
+    self._PrepareInstance(online=False)
+    self.ExecOpCode(self.op)
+
+  @TrySnapshots(False)
+  @InstanceRemoved(False)
+  def testFileOfflineExport(self):
+    self._PrepareInstance(online=False, snapshottable=False)
+    self.ExecOpCode(self.op)
+
+  @TrySnapshots(False)
+  @InstanceRemoved(True)
+  def testExportRemoveOfflineInstance(self):
+    self._PrepareInstance(online=False)
     op = self.CopyOpCode(self.op, remove_instance=True)
     self.ExecOpCode(op)
 
+  @TrySnapshots(False)
+  @InstanceRemoved(True)
+  def testExportRemoveOnlineInstance(self):
+    self._PrepareInstance(online=True)
+    op = self.CopyOpCode(self.op, remove_instance=True)
+    self.ExecOpCode(op)
+
+  @TrySnapshots(False)
+  @InstanceRemoved(False)
   def testValidCompressionTool(self):
     op = self.CopyOpCode(self.op, compress="lzop")
     self.cfg.SetCompressionTools(["gzip", "lzop"])
     self.ExecOpCode(op)
 
+  @InstanceRemoved(False)
   def testInvalidCompressionTool(self):
     op = self.CopyOpCode(self.op, compress="invalid")
     self.cfg.SetCompressionTools(["gzip", "lzop"])
     self.ExecOpCodeExpectOpPrereqError(op, "Compression tool not allowed")
 
+  def testLiveLongSleep(self):
+    op = self.CopyOpCode(self.op, shutdown=False, long_sleep=True)
+    self.ExecOpCodeExpectOpPrereqError(op, ".*long sleep.*")
+
 
 class TestLUBackupExportRemoteExport(TestLUBackupExportBase):
   def setUp(self):
@@ -175,11 +273,13 @@
                                      x509_key_name=["mock_key_name"],
                                      destination_x509_ca="mock_dest_ca")
 
+  @InstanceRemoved(False)
   def testRemoteExportWithoutX509KeyName(self):
     op = self.CopyOpCode(self.op, x509_key_name=self.REMOVE)
     self.ExecOpCodeExpectOpPrereqError(op,
                                        "Missing X509 key name for encryption")
 
+  @InstanceRemoved(False)
   def testRemoteExportWithoutX509DestCa(self):
     op = self.CopyOpCode(self.op, destination_x509_ca=self.REMOVE)
     self.ExecOpCodeExpectOpPrereqError(op,
diff --git a/test/py/cmdlib/cluster_unittest.py b/test/py/cmdlib/cluster_unittest.py
index 2d3ee5e..90099a0 100644
--- a/test/py/cmdlib/cluster_unittest.py
+++ b/test/py/cmdlib/cluster_unittest.py
@@ -42,6 +42,7 @@
 import os
 
 from ganeti.cmdlib import cluster
+from ganeti.cmdlib.cluster import verify
 from ganeti import constants
 from ganeti import errors
 from ganeti import netutils
@@ -59,7 +60,7 @@
 
 class TestClusterVerifySsh(unittest.TestCase):
   def testMultipleGroups(self):
-    fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes
+    fn = verify.LUClusterVerifyGroup._SelectSshCheckNodes
     mygroupnodes = [
       objects.Node(name="node20", group="my", offline=False,
                    master_candidate=True),
@@ -114,7 +115,7 @@
       })
 
   def testSingleGroup(self):
-    fn = cluster.LUClusterVerifyGroup._SelectSshCheckNodes
+    fn = verify.LUClusterVerifyGroup._SelectSshCheckNodes
     nodes = [
       objects.Node(name="node1", group="default", offline=True,
                    master_candidate=True),
@@ -889,7 +890,7 @@
     op = opcodes.OpClusterSetParams(
            enabled_disk_templates=new_disk_templates,
            ipolicy={constants.IPOLICY_DTS: new_disk_templates})
-    self.ExecOpCodeExpectOpPrereqError(op, "least one instance using it")
+    self.ExecOpCodeExpectOpPrereqError(op, "least one disk using it")
 
   def testEnabledDiskTemplatesWithoutVgName(self):
     enabled_disk_templates = [constants.DT_PLAIN]
@@ -907,6 +908,14 @@
            ipolicy={constants.IPOLICY_DTS: enabled_disk_templates})
     self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template")
 
+  def testDisableDiskTemplateWithExistingInstanceDiskless(self):
+    self.cfg.AddNewInstance(disks=[])
+    enabled_disk_templates = [constants.DT_PLAIN]
+    op = opcodes.OpClusterSetParams(
+           enabled_disk_templates=enabled_disk_templates,
+           ipolicy={constants.IPOLICY_DTS: enabled_disk_templates})
+    self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template")
+
   def testVgNameNoLvmDiskTemplateEnabled(self):
     vg_name = "test_vg"
     self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
@@ -1123,6 +1132,12 @@
       "following instances have a non-existing primary-node")
     self.assertFalse(result)
 
+  def testDanglingDisk(self):
+    self.cfg.AddOrphanDisk()
+    op = opcodes.OpClusterVerifyConfig()
+    result = self.ExecOpCode(op)
+    self.assertTrue(result)
+
 
 class TestLUClusterVerifyGroup(CmdlibTestCase):
   def testEmptyNodeGroup(self):
@@ -1174,6 +1189,19 @@
 
     self.ExecOpCode(op)
 
+  def testVerifyNodeDrbdSuccess(self):
+    ninfo = self.cfg.AddNewNode()
+    disk = self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
+                                primary_node=self.master,
+                                secondary_node=ninfo)
+    instance = self.cfg.AddNewInstance(disks=[disk])
+    instanceinfo = self.cfg.GetAllInstancesInfo()
+    disks_info = self.cfg.GetAllDisksInfo()
+    drbd_map = {ninfo.uuid: {0: disk.uuid}}
+    minors = verify.LUClusterVerifyGroup._ComputeDrbdMinors(
+      ninfo, instanceinfo, disks_info, drbd_map, lambda *args: None)
+    self.assertEquals(minors, {0: (disk.uuid, instance.uuid, False)})
+
 
 class TestLUClusterVerifyClientCerts(CmdlibTestCase):
 
@@ -1321,7 +1349,7 @@
     lu._exclusive_storage = False
     lu.master_node = self.master_uuid
     lu.group_info = self.group
-    cluster.LUClusterVerifyGroup.all_node_info = \
+    verify.LUClusterVerifyGroup.all_node_info = \
       property(fget=lambda _: self.cfg.GetAllNodesInfo())
 
 
@@ -1440,7 +1468,7 @@
   def testValidNodeResultExclusiveStorage(self, lu):
     lu._exclusive_storage = True
     lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg",
-                            cluster.LUClusterVerifyGroup.NodeImage())
+                            verify.LUClusterVerifyGroup.NodeImage())
     self.mcpu.assertLogIsEmpty()
 
 
@@ -1491,7 +1519,7 @@
   @withLockedLU
   def testNoPvInfo(self, lu):
     lu._exclusive_storage = True
-    nimg = cluster.LUClusterVerifyGroup.NodeImage()
+    nimg = verify.LUClusterVerifyGroup.NodeImage()
     lu._VerifyGroupLVM({self.master.uuid: nimg}, "mock_vg")
     self.mcpu.assertLogIsEmpty()
 
@@ -1499,10 +1527,10 @@
   def testValidPvInfos(self, lu):
     lu._exclusive_storage = True
     node2 = self.cfg.AddNewNode()
-    nimg1 = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master.uuid)
+    nimg1 = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master.uuid)
     nimg1.pv_min = 10000
     nimg1.pv_max = 10010
-    nimg2 = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid)
+    nimg2 = verify.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid)
     nimg2.pv_min = 9998
     nimg2.pv_max = 10005
     lu._VerifyGroupLVM({self.master.uuid: nimg1, node2.uuid: nimg2}, "mock_vg")
@@ -1618,7 +1646,7 @@
     self.diskless_inst = self.cfg.AddNewInstance(disks=[])
 
     self.master_img = \
-      cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
+      verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
     self.master_img.volumes = ["/".join(disk.logical_id)
                                for inst in [self.running_inst,
                                             self.diskless_inst]
@@ -1629,7 +1657,7 @@
       ["/".join(disk.logical_id) for disk in drbd_inst_disks[0].children])
     self.master_img.instances = [self.running_inst.uuid]
     self.node1_img = \
-      cluster.LUClusterVerifyGroup.NodeImage(uuid=self.node1.uuid)
+      verify.LUClusterVerifyGroup.NodeImage(uuid=self.node1.uuid)
     self.node1_img.volumes = \
       ["/".join(disk.logical_id) for disk in drbd_inst_disks[0].children]
     self.node_imgs = {
@@ -1704,7 +1732,7 @@
     self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True
     lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus)
     self.mcpu.assertLogContainsRegex(
-      "instance has template drbd, which is not supported")
+        "disk types? drbd, which are not supported")
 
   @withLockedLU
   def testExclusiveStorageWithValidInstance(self, lu):
@@ -1735,7 +1763,7 @@
         TestLUClusterVerifyGroupMethods):
   @withLockedLU
   def testOrphanedVolume(self, lu):
-    master_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
+    master_img = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
     master_img.volumes = [
       "mock_vg/disk_0",  # Required, present, no error
       "mock_vg/disk_1",  # Unknown, present, orphan
@@ -1773,14 +1801,14 @@
     inst2 = self.cfg.AddNewInstance()
     inst3 = self.cfg.AddNewInstance()
 
-    node1_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node1.uuid)
+    node1_img = verify.LUClusterVerifyGroup.NodeImage(uuid=node1.uuid)
     node1_img.sbp = {
       self.master_uuid: [inst1.uuid, inst2.uuid, inst3.uuid]
     }
 
-    node2_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid)
+    node2_img = verify.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid)
 
-    node3_img = cluster.LUClusterVerifyGroup.NodeImage(uuid=node3.uuid)
+    node3_img = verify.LUClusterVerifyGroup.NodeImage(uuid=node3.uuid)
     node3_img.offline = True
 
     node_imgs = {
@@ -1903,7 +1931,7 @@
     for ndata in [{}, {constants.NV_OSLIST: ""}, {constants.NV_OSLIST: [""]},
                   {constants.NV_OSLIST: [["1", "2"]]}]:
       self.mcpu.ClearLogMessages()
-      nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
+      nimage = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
       lu._UpdateNodeOS(self.master, ndata, nimage)
       self.mcpu.assertLogContainsRegex("node hasn't returned valid OS data")
 
@@ -1918,15 +1946,15 @@
          True]
       ]
     }
-    nimage = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
+    nimage = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
     lu._UpdateNodeOS(self.master, ndata, nimage)
     self.mcpu.assertLogIsEmpty()
 
   @withLockedLU
   def testVerifyNodeOs(self, lu):
     node = self.cfg.AddNewNode()
-    nimg_root = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
-    nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=node.uuid)
+    nimg_root = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
+    nimg = verify.LUClusterVerifyGroup.NodeImage(uuid=node.uuid)
 
     nimg_root.os_fail = False
     nimg_root.oslist = {
@@ -2054,7 +2082,7 @@
   TestLUClusterVerifyGroupMethods):
   def setUp(self):
     super(TestLUClusterVerifyGroupUpdateNodeVolumes, self).setUp()
-    self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
+    self.nimg = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
 
   @withLockedLU
   def testNoVgName(self, lu):
@@ -2088,7 +2116,7 @@
   TestLUClusterVerifyGroupMethods):
   def setUp(self):
     super(TestLUClusterVerifyGroupUpdateNodeInstances, self).setUp()
-    self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
+    self.nimg = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
 
   @withLockedLU
   def testInvalidNodeResult(self, lu):
@@ -2107,7 +2135,7 @@
 class TestLUClusterVerifyGroupUpdateNodeInfo(TestLUClusterVerifyGroupMethods):
   def setUp(self):
     super(TestLUClusterVerifyGroupUpdateNodeInfo, self).setUp()
-    self.nimg = cluster.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
+    self.nimg = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
     self.valid_hvresult = {constants.NV_HVINFO: {"memory_free": 1024}}
 
   @withLockedLU
@@ -2177,15 +2205,15 @@
                               secondary_node=self.node2,
                               disk_template=constants.DT_DRBD8)
 
-    self.node1_img = cluster.LUClusterVerifyGroup.NodeImage(
+    self.node1_img = verify.LUClusterVerifyGroup.NodeImage(
                        uuid=self.node1.uuid)
     self.node1_img.pinst = [self.diskless_inst.uuid]
     self.node1_img.sinst = []
-    self.node2_img = cluster.LUClusterVerifyGroup.NodeImage(
+    self.node2_img = verify.LUClusterVerifyGroup.NodeImage(
                        uuid=self.node2.uuid)
     self.node2_img.pinst = [self.plain_inst.uuid]
     self.node2_img.sinst = [self.drbd_inst.uuid]
-    self.node3_img = cluster.LUClusterVerifyGroup.NodeImage(
+    self.node3_img = verify.LUClusterVerifyGroup.NodeImage(
                        uuid=self.node3.uuid)
     self.node3_img.pinst = [self.drbd_inst.uuid]
     self.node3_img.sinst = []
diff --git a/test/py/cmdlib/cmdlib_unittest.py b/test/py/cmdlib/cmdlib_unittest.py
index 510b694..9a1893a 100755
--- a/test/py/cmdlib/cmdlib_unittest.py
+++ b/test/py/cmdlib/cmdlib_unittest.py
@@ -31,6 +31,7 @@
 """Script for unittesting the cmdlib module"""
 
 
+import mock
 import unittest
 import itertools
 import copy
@@ -39,6 +40,7 @@
 from ganeti import mcpu
 from ganeti import cmdlib
 from ganeti.cmdlib import cluster
+from ganeti.cmdlib.cluster import verify
 from ganeti.cmdlib import instance_storage
 from ganeti.cmdlib import instance_utils
 from ganeti.cmdlib import common
@@ -443,42 +445,42 @@
   def test(self):
     compute_fn = _ValidateComputeMinMaxSpec
     ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                             [1024], 1, constants.DT_PLAIN,
+                                             [1024], 1, [constants.DT_PLAIN],
                                              _compute_fn=compute_fn)
     self.assertEqual(ret, [])
 
   def testDiskFull(self):
     compute_fn = _NoDiskComputeMinMaxSpec
     ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                             [1024], 1, constants.DT_PLAIN,
+                                             [1024], 1, [constants.DT_PLAIN],
                                              _compute_fn=compute_fn)
     self.assertEqual(ret, [constants.ISPEC_DISK_COUNT])
 
   def testDiskLess(self):
     compute_fn = _NoDiskComputeMinMaxSpec
-    ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                             [1024], 1, constants.DT_DISKLESS,
+    ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 0, 1,
+                                             [], 1, [],
                                              _compute_fn=compute_fn)
     self.assertEqual(ret, [])
 
   def testWrongTemplates(self):
     compute_fn = _ValidateComputeMinMaxSpec
     ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                             [1024], 1, constants.DT_DRBD8,
+                                             [1024], 1, [constants.DT_DRBD8],
                                              _compute_fn=compute_fn)
     self.assertEqual(len(ret), 1)
     self.assertTrue("Disk template" in ret[0])
 
   def testInvalidArguments(self):
     self.assertRaises(AssertionError, common.ComputeIPolicySpecViolation,
-                      self._MICRO_IPOL, 1024, 1, 1, 1, [], 1,
+                      self._MICRO_IPOL, 1024, 1, 1, 1, constants.DT_DISKLESS, 1,
                       constants.DT_PLAIN,)
 
   def testInvalidSpec(self):
     spec = _SpecWrapper([None, False, "foo", None, "bar", None])
     compute_fn = spec.ComputeMinMaxSpec
     ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                             [1024], 1, constants.DT_PLAIN,
+                                             [1024], 1, [constants.DT_PLAIN],
                                              _compute_fn=compute_fn)
     self.assertEqual(ret, ["foo", "bar"])
     self.assertFalse(spec.spec)
@@ -537,7 +539,7 @@
       ret = common.ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count,
                                                disk_count, nic_count,
                                                disk_sizes, spindle_use,
-                                               disk_template)
+                                               [disk_template]*disk_count)
       self.assertEqual(len(ret), violations)
 
     AssertComputeViolation(ipolicy1, 0)
@@ -561,42 +563,57 @@
   # Minimal policy accepted by _ComputeIPolicyDiskSizesViolation()
   _MICRO_IPOL = {
     constants.IPOLICY_DTS: [constants.DT_PLAIN, constants.DT_DISKLESS],
-    constants.ISPECS_MINMAX: [NotImplemented],
+    constants.ISPECS_MINMAX: [None],
     }
 
+  def MakeDisks(self, *dev_types):
+    return [mock.Mock(dev_type=d) for d in dev_types]
+
   def test(self):
     compute_fn = _ValidateComputeMinMaxSpec
-    ret = common.ComputeIPolicyDiskSizesViolation(self._MICRO_IPOL, [1024],
-                                                  constants.DT_PLAIN,
-                                                  _compute_fn=compute_fn)
+    ret = common.ComputeIPolicyDiskSizesViolation(
+      self._MICRO_IPOL, [1024], self.MakeDisks(constants.DT_PLAIN),
+      _compute_fn=compute_fn)
     self.assertEqual(ret, [])
 
   def testDiskFull(self):
     compute_fn = _NoDiskComputeMinMaxSpec
-    ret = common.ComputeIPolicyDiskSizesViolation(self._MICRO_IPOL, [1024],
-                                                  constants.DT_PLAIN,
-                                                  _compute_fn=compute_fn)
+    ret = common.ComputeIPolicyDiskSizesViolation(
+      self._MICRO_IPOL, [1024], self.MakeDisks(constants.DT_PLAIN),
+      _compute_fn=compute_fn)
     self.assertEqual(ret, [constants.ISPEC_DISK_COUNT])
 
+  def testDisksMixed(self):
+    compute_fn = _ValidateComputeMinMaxSpec
+    ipol = copy.deepcopy(self._MICRO_IPOL)
+    ipol[constants.IPOLICY_DTS].append(constants.DT_DRBD8)
+    ret = common.ComputeIPolicyDiskSizesViolation(
+      ipol, [1024, 1024],
+      self.MakeDisks(constants.DT_DRBD8, constants.DT_PLAIN),
+      _compute_fn=compute_fn)
+    self.assertEqual(ret, [])
+
+
   def testDiskLess(self):
     compute_fn = _NoDiskComputeMinMaxSpec
     ret = common.ComputeIPolicyDiskSizesViolation(self._MICRO_IPOL, [],
-                                                  constants.DT_DISKLESS,
+                                                  [],
                                                   _compute_fn=compute_fn)
     self.assertEqual(ret, [])
 
   def testWrongTemplates(self):
     compute_fn = _ValidateComputeMinMaxSpec
-    ret = common.ComputeIPolicyDiskSizesViolation(self._MICRO_IPOL, [1024],
-                                                  constants.DT_DRBD8,
-                                                  _compute_fn=compute_fn)
+
+    ret = common.ComputeIPolicyDiskSizesViolation(
+      self._MICRO_IPOL, [1024], self.MakeDisks(constants.DT_DRBD8),
+      _compute_fn=compute_fn)
     self.assertEqual(len(ret), 1)
     self.assertTrue("Disk template" in ret[0])
 
-  def _AssertComputeViolation(self, ipolicy, disk_sizes, disk_template,
+  def _AssertComputeViolation(self, ipolicy, disk_sizes, dev_types,
                               violations):
-    ret = common.ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes,
-                                                  disk_template)
+    ret = common.ComputeIPolicyDiskSizesViolation(
+      ipolicy, disk_sizes, self.MakeDisks(*dev_types))
     self.assertEqual(len(ret), violations)
 
   def testWithIPolicy(self):
@@ -624,11 +641,13 @@
       constants.IPOLICY_DTS: [disk_template],
       }
 
-    self._AssertComputeViolation(ipolicy, [512], disk_template, 0)
-    self._AssertComputeViolation(ipolicy, [], disk_template, 1)
-    self._AssertComputeViolation(ipolicy, [512, 512], disk_template, 1)
-    self._AssertComputeViolation(ipolicy, [511], disk_template, 1)
-    self._AssertComputeViolation(ipolicy, [513], disk_template, 1)
+    self._AssertComputeViolation(ipolicy, [512], [disk_template], 0)
+    self._AssertComputeViolation(ipolicy, [], [disk_template], 1)
+    self._AssertComputeViolation(ipolicy, [], [], 1)
+    self._AssertComputeViolation(ipolicy, [512, 512],
+                                 [disk_template, disk_template], 1)
+    self._AssertComputeViolation(ipolicy, [511], [disk_template], 1)
+    self._AssertComputeViolation(ipolicy, [513], [disk_template], 1)
 
 
 class _StubComputeIPolicySpecViolation:
@@ -675,40 +694,63 @@
     return ("pnode_uuid", )
 
   def GetInstanceDisks(self, _):
-    return [objects.Disk(size=512, spindles=13, uuid="disk_uuid")]
+    return [objects.Disk(size=512, spindles=13, uuid="disk_uuid",
+                         dev_type=constants.DT_PLAIN)]
 
 
 class TestComputeIPolicyInstanceViolation(unittest.TestCase):
-  def test(self):
-    beparams = {
+  def setUp(self):
+    self.beparams = {
       constants.BE_MAXMEM: 2048,
       constants.BE_VCPUS: 2,
       constants.BE_SPINDLE_USE: 4,
       }
-    cfg = _FakeConfigForComputeIPolicyInstanceViolation(beparams, False)
-    instance = objects.Instance(beparams=beparams, disks=["disk_uuid"],
+    self.cfg = _FakeConfigForComputeIPolicyInstanceViolation(
+        self.beparams, False)
+    self.cfg_exclusive = _FakeConfigForComputeIPolicyInstanceViolation(
+        self.beparams, True)
+    self.stub = mock.MagicMock()
+    self.stub.return_value = []
+
+  def testPlain(self):
+    instance = objects.Instance(beparams=self.beparams, disks=["disk_uuid"],
                                 nics=[], primary_node="pnode_uuid",
                                 disk_template=constants.DT_PLAIN)
-    stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4,
-                                            constants.DT_PLAIN)
-    ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance,
-                                                 cfg, _compute_fn=stub)
+    ret = common.ComputeIPolicyInstanceViolation(
+        NotImplemented, instance, self.cfg, _compute_fn=self.stub)
     self.assertEqual(ret, [])
-    instance2 = objects.Instance(beparams={}, disks=["disk_uuid"],
+    self.stub.assert_called_with(NotImplemented, 2048, 2, 1, 0, [512], 4,
+                                 [constants.DT_PLAIN])
+
+  def testNoBeparams(self):
+    instance = objects.Instance(beparams={}, disks=["disk_uuid"],
                                  nics=[], primary_node="pnode_uuid",
                                  disk_template=constants.DT_PLAIN)
-    ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance2,
-                                                 cfg, _compute_fn=stub)
+    ret = common.ComputeIPolicyInstanceViolation(
+        NotImplemented, instance, self.cfg, _compute_fn=self.stub)
     self.assertEqual(ret, [])
-    cfg_es = _FakeConfigForComputeIPolicyInstanceViolation(beparams, True)
-    stub_es = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 13,
-                                               constants.DT_PLAIN)
-    ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance,
-                                                 cfg_es, _compute_fn=stub_es)
+    self.stub.assert_called_with(NotImplemented, 2048, 2, 1, 0, [512], 4,
+                                 [constants.DT_PLAIN])
+
+  def testExclusiveStorage(self):
+    instance = objects.Instance(beparams=self.beparams, disks=["disk_uuid"],
+                                nics=[], primary_node="pnode_uuid",
+                                disk_template=constants.DT_PLAIN)
+    ret = common.ComputeIPolicyInstanceViolation(
+        NotImplemented, instance, self.cfg_exclusive, _compute_fn=self.stub)
     self.assertEqual(ret, [])
-    ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance2,
-                                                 cfg_es, _compute_fn=stub_es)
+    self.stub.assert_called_with(NotImplemented, 2048, 2, 1, 0, [512], 13,
+                                 [constants.DT_PLAIN])
+
+  def testExclusiveStorageNoBeparams(self):
+    instance = objects.Instance(beparams={}, disks=["disk_uuid"],
+                                 nics=[], primary_node="pnode_uuid",
+                                 disk_template=constants.DT_PLAIN)
+    ret = common.ComputeIPolicyInstanceViolation(
+        NotImplemented, instance, self.cfg_exclusive, _compute_fn=self.stub)
     self.assertEqual(ret, [])
+    self.stub.assert_called_with(NotImplemented, 2048, 2, 1, 0, [512], 13,
+                                 [constants.DT_PLAIN])
 
 
 class _CallRecorder:
@@ -787,9 +829,9 @@
     ]
 
 
-class _LuTestVerifyErrors(cluster._VerifyErrors):
+class _LuTestVerifyErrors(verify._VerifyErrors):
   def __init__(self, **kwargs):
-    cluster._VerifyErrors.__init__(self)
+    super(_LuTestVerifyErrors, self).__init__()
     self.op = _OpTestVerifyErrors(**kwargs)
     self.op.Validate(True)
     self.msglist = []
diff --git a/test/py/cmdlib/instance_migration_unittest.py b/test/py/cmdlib/instance_migration_unittest.py
index 091f760..d05ec59 100644
--- a/test/py/cmdlib/instance_migration_unittest.py
+++ b/test/py/cmdlib/instance_migration_unittest.py
@@ -99,7 +99,7 @@
     op = self.CopyOpCode(self.op,
                          target_node=node.name)
     self.ExecOpCodeExpectOpPrereqError(
-      op, "Instances with disk template drbd cannot be migrated to"
+      op, "Instances with disk types drbd cannot be migrated to"
           " arbitrary nodes")
 
   def testMigration(self):
@@ -160,7 +160,7 @@
     op = self.CopyOpCode(self.op,
                          target_node=node.name)
     self.ExecOpCodeExpectOpPrereqError(
-      op, "Instances with disk template drbd cannot be failed over to"
+      op, "Instances with disk types drbd cannot be failed over to"
           " arbitrary nodes")
 
   def testMigration(self):
diff --git a/test/py/cmdlib/instance_storage_unittest.py b/test/py/cmdlib/instance_storage_unittest.py
index c29f808..8fbc2fa 100755
--- a/test/py/cmdlib/instance_storage_unittest.py
+++ b/test/py/cmdlib/instance_storage_unittest.py
@@ -37,9 +37,13 @@
 from ganeti.cmdlib import instance_storage
 from ganeti import errors
 from ganeti import objects
+from ganeti import opcodes
 
 import testutils
 import mock
+import time
+
+from testsupport import CmdlibTestCase
 
 
 class TestCheckNodesFreeDiskOnVG(unittest.TestCase):
@@ -163,7 +167,8 @@
       self.assertEqual(disk.get("provider"), self.ext_params.get("provider"))
 
   def testComputeDisksInfoPlainToDrbd(self):
-    disks = [{constants.IDISK_SIZE: d.size,
+    disks = [{constants.IDISK_TYPE: constants.DT_DRBD8,
+              constants.IDISK_SIZE: d.size,
               constants.IDISK_MODE: d.mode,
               constants.IDISK_VG: d.logical_id[0],
               constants.IDISK_NAME: d.name}
@@ -189,5 +194,104 @@
       self.disks, constants.DT_EXT, self.default_vg, self.ext_params)
 
 
+class TestLUInstanceReplaceDisks(CmdlibTestCase):
+  """Tests for LUInstanceReplaceDisks."""
+
+  def setUp(self):
+    super(TestLUInstanceReplaceDisks, self).setUp()
+
+    self.MockOut(time, 'sleep')
+
+    self.node1 = self.cfg.AddNewNode()
+    self.node2 = self.cfg.AddNewNode()
+
+  def MakeOpCode(self, disks, early_release=False, ignore_ipolicy=False,
+                 remote_node=False, mode='replace_auto', iallocator=None):
+    return opcodes.OpInstanceReplaceDisks(
+        instance_name=self.instance.name,
+        instance_uuid=self.instance.uuid,
+        early_release=early_release,
+        ignore_ipolicy=ignore_ipolicy,
+        mode=mode,
+        disks=disks,
+        remote_node=self.node2.name if remote_node else None,
+        remote_node_uuid=self.node2.uuid if remote_node else None,
+        iallocator=iallocator)
+
+  def testInvalidTemplate(self):
+    self.instance = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP,
+                                            disk_template='diskless',
+                                            primary_node=self.node1)
+
+    opcode = self.MakeOpCode([])
+    self.ExecOpCodeExpectOpPrereqError(
+        opcode, 'strange layout')
+
+  def SimulateDiskFailure(self, node, disk):
+    def Faulty(node_uuid):
+      disks = self.cfg.GetInstanceDisks(node_uuid)
+      return [i for i,d in enumerate(disks)
+              if i == disk and node.uuid == node_uuid]
+    self.MockOut(instance_storage.TLReplaceDisks, '_FindFaultyDisks',
+                 side_effect=Faulty)
+    self.MockOut(instance_storage.TLReplaceDisks, '_CheckDevices')
+    self.MockOut(instance_storage.TLReplaceDisks, '_CheckVolumeGroup')
+    self.MockOut(instance_storage.TLReplaceDisks, '_CheckDisksExistence')
+    self.MockOut(instance_storage.TLReplaceDisks, '_CheckDisksConsistency')
+    self.MockOut(instance_storage.LUInstanceReplaceDisks, 'AssertReleasedLocks')
+    self.MockOut(instance_storage, 'WaitForSync')
+    self.rpc.call_blockdev_addchildren().fail_msg = None
+
+  def testReplacePrimary(self):
+    self.instance = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP,
+                                            disk_template='drbd',
+                                            primary_node=self.node1,
+                                            secondary_node=self.node2)
+
+    self.SimulateDiskFailure(self.node1, 0)
+
+    opcode = self.MakeOpCode([0], mode='replace_on_primary')
+    self.ExecOpCode(opcode)
+    self.rpc.call_blockdev_rename.assert_any_call(self.node1.uuid, [])
+
+  def testReplaceSecondary(self):
+    self.instance = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP,
+                                            disk_template='drbd',
+                                            primary_node=self.node1,
+                                            secondary_node=self.node2)
+
+    self.SimulateDiskFailure(self.node2, 0)
+
+    opcode = self.MakeOpCode([0], mode='replace_on_secondary')
+    self.ExecOpCode(opcode)
+    self.rpc.call_blockdev_rename.assert_any_call(self.node2.uuid, [])
+
+  def testReplaceSecondaryNew(self):
+    disk = self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
+                               primary_node=self.node1,
+                               secondary_node=self.node2)
+    self.instance = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP,
+                                            disk_template='drbd',
+                                            disks=[disk],
+                                            primary_node=self.node1,
+                                            secondary_node=self.node2)
+
+    self.SimulateDiskFailure(self.node2, 0)
+    node3 = self.cfg.AddNewNode()
+    self.MockOut(instance_storage.TLReplaceDisks, '_RunAllocator',
+                 return_value=node3.uuid)
+    self.rpc.call_drbd_disconnect_net().__getitem__().fail_msg = None
+    self.rpc.call_blockdev_shutdown().fail_msg = None
+    self.rpc.call_drbd_attach_net().fail_msg = None
+
+    opcode = self.MakeOpCode([], mode='replace_new_secondary',
+                             iallocator='hail')
+    self.ExecOpCode(opcode)
+    self.rpc.call_blockdev_shutdown.assert_any_call(
+        self.node2.uuid, (disk, self.instance))
+    self.rpc.call_drbd_attach_net.assert_any_call(
+        [self.node1.uuid, node3.uuid], ([disk], self.instance),
+        False)
+
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
diff --git a/test/py/cmdlib/instance_unittest.py b/test/py/cmdlib/instance_unittest.py
index 5dca43e..1a4a45b 100644
--- a/test/py/cmdlib/instance_unittest.py
+++ b/test/py/cmdlib/instance_unittest.py
@@ -38,6 +38,7 @@
 import unittest
 import mock
 import operator
+import os
 
 from ganeti import backend
 from ganeti import compat
@@ -50,9 +51,12 @@
 from ganeti.rpc import node as rpc
 from ganeti import utils
 from ganeti.cmdlib import instance
+from ganeti.cmdlib import instance_storage
+from ganeti.cmdlib import instance_create
+from ganeti.cmdlib import instance_set_params
 from ganeti.cmdlib import instance_utils
 
-from cmdlib.cmdlib_unittest import _StubComputeIPolicySpecViolation, _FakeLU
+from cmdlib.cmdlib_unittest import _FakeLU
 
 from testsupport import *
 
@@ -60,8 +64,8 @@
 
 
 class TestComputeIPolicyInstanceSpecViolation(unittest.TestCase):
-  def test(self):
-    ispec = {
+  def setUp(self):
+    self.ispec = {
       constants.ISPEC_MEM_SIZE: 2048,
       constants.ISPEC_CPU_COUNT: 2,
       constants.ISPEC_DISK_COUNT: 1,
@@ -69,12 +73,15 @@
       constants.ISPEC_NIC_COUNT: 0,
       constants.ISPEC_SPINDLE_USE: 1,
       }
-    stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 1,
-                                            constants.DT_PLAIN)
-    ret = instance._ComputeIPolicyInstanceSpecViolation(NotImplemented, ispec,
-                                                        constants.DT_PLAIN,
-                                                        _compute_fn=stub)
+    self.stub = mock.MagicMock()
+    self.stub.return_value = []
+
+  def testPassThrough(self):
+    ret = instance_utils.ComputeIPolicyInstanceSpecViolation(
+        NotImplemented, self.ispec, [constants.DT_PLAIN], _compute_fn=self.stub)
     self.assertEqual(ret, [])
+    self.stub.assert_called_with(NotImplemented, 2048, 2, 1, 0, [512],
+                                 1, [constants.DT_PLAIN])
 
 
 class TestLUInstanceCreate(CmdlibTestCase):
@@ -96,6 +103,10 @@
 
   def setUp(self):
     super(TestLUInstanceCreate, self).setUp()
+    self.ResetMocks()
+
+    self.MockOut(instance_create, 'netutils', self.netutils_mod)
+    self.MockOut(instance_utils, 'netutils', self.netutils_mod)
 
     self.net = self.cfg.AddNewNetwork()
     self.cfg.ConnectNetworkToGroup(self.net, self.group)
@@ -125,7 +136,7 @@
     self.iallocator_cls.return_value.result = [self.node1.name, self.node2.name]
 
     self.diskless_op = opcodes.OpInstanceCreate(
-      instance_name="diskless.test.com",
+      instance_name="diskless.example.com",
       pnode=self.master.name,
       disk_template=constants.DT_DISKLESS,
       mode=constants.INSTANCE_CREATE,
@@ -134,7 +145,7 @@
       os_type=self.os_name_variant)
 
     self.plain_op = opcodes.OpInstanceCreate(
-      instance_name="plain.test.com",
+      instance_name="plain.example.com",
       pnode=self.master.name,
       disk_template=constants.DT_PLAIN,
       mode=constants.INSTANCE_CREATE,
@@ -145,7 +156,7 @@
       os_type=self.os_name_variant)
 
     self.block_op = opcodes.OpInstanceCreate(
-      instance_name="block.test.com",
+      instance_name="block.example.com",
       pnode=self.master.name,
       disk_template=constants.DT_BLOCK,
       mode=constants.INSTANCE_CREATE,
@@ -157,7 +168,7 @@
       os_type=self.os_name_variant)
 
     self.drbd_op = opcodes.OpInstanceCreate(
-      instance_name="drbd.test.com",
+      instance_name="drbd.example.com",
       pnode=self.node1.name,
       snode=self.node2.name,
       disk_template=constants.DT_DRBD8,
@@ -169,7 +180,7 @@
       os_type=self.os_name_variant)
 
     self.file_op = opcodes.OpInstanceCreate(
-      instance_name="file.test.com",
+      instance_name="file.example.com",
       pnode=self.node1.name,
       disk_template=constants.DT_FILE,
       mode=constants.INSTANCE_CREATE,
@@ -179,6 +190,39 @@
       }],
       os_type=self.os_name_variant)
 
+    self.shared_file_op = opcodes.OpInstanceCreate(
+      instance_name="shared-file.example.com",
+      pnode=self.node1.name,
+      disk_template=constants.DT_SHARED_FILE,
+      mode=constants.INSTANCE_CREATE,
+      nics=[{}],
+      disks=[{
+        constants.IDISK_SIZE: 1024
+      }],
+      os_type=self.os_name_variant)
+
+    self.gluster_op = opcodes.OpInstanceCreate(
+      instance_name="gluster.example.com",
+      pnode=self.node1.name,
+      disk_template=constants.DT_GLUSTER,
+      mode=constants.INSTANCE_CREATE,
+      nics=[{}],
+      disks=[{
+        constants.IDISK_SIZE: 1024
+      }],
+      os_type=self.os_name_variant)
+
+    self.rbd_op = opcodes.OpInstanceCreate(
+      instance_name="gluster.example.com",
+      pnode=self.node1.name,
+      disk_template=constants.DT_RBD,
+      mode=constants.INSTANCE_CREATE,
+      nics=[{}],
+      disks=[{
+        constants.IDISK_SIZE: 1024
+      }],
+      os_type=self.os_name_variant)
+
   def testSimpleCreate(self):
     op = self.CopyOpCode(self.diskless_op)
     self.ExecOpCode(op)
@@ -544,7 +588,7 @@
     self.netutils_mod.TcpPing.return_value = True
     op = self.CopyOpCode(self.diskless_op)
     self.ExecOpCodeExpectOpPrereqError(
-      op, "IP .* of instance diskless.test.com already in use")
+      op, "IP .* of instance diskless.example.com already in use")
 
   def testPrimaryIsSecondaryNode(self):
     op = self.CopyOpCode(self.drbd_op,
@@ -768,6 +812,87 @@
     self.ExecOpCode(op)
 
 
+class TestDiskTemplateDiskTypeBijection(TestLUInstanceCreate):
+  """Tests that one disk template corresponds to exactly one disk type."""
+
+  def GetSingleInstance(self):
+    instances = self.cfg.GetInstancesInfoByFilter(lambda _: True)
+    self.assertEqual(len(instances), 1,
+      "Expected 1 instance, got\n%s" % instances)
+    return instances.values()[0]
+
+  def testDiskTemplateLogicalIdBijectionDiskless(self):
+    op = self.CopyOpCode(self.diskless_op)
+    self.ExecOpCode(op)
+    instance = self.GetSingleInstance()
+    self.assertEqual(instance.disk_template, constants.DT_DISKLESS)
+    self.assertEqual(instance.disks, [])
+
+  def testDiskTemplateLogicalIdBijectionPlain(self):
+    op = self.CopyOpCode(self.plain_op)
+    self.ExecOpCode(op)
+    instance = self.GetSingleInstance()
+    self.assertEqual(instance.disk_template, constants.DT_PLAIN)
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    self.assertEqual(disks[0].dev_type, constants.DT_PLAIN)
+
+  def testDiskTemplateLogicalIdBijectionBlock(self):
+    self.rpc.call_bdev_sizes.return_value = \
+      self.RpcResultsBuilder() \
+        .AddSuccessfulNode(self.master, {
+          "/dev/disk/block0": 10000
+        }) \
+        .Build()
+    op = self.CopyOpCode(self.block_op)
+    self.ExecOpCode(op)
+    instance = self.GetSingleInstance()
+    self.assertEqual(instance.disk_template, constants.DT_BLOCK)
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    self.assertEqual(disks[0].dev_type, constants.DT_BLOCK)
+
+  def testDiskTemplateLogicalIdBijectionDrbd(self):
+    op = self.CopyOpCode(self.drbd_op)
+    self.ExecOpCode(op)
+    instance = self.GetSingleInstance()
+    self.assertEqual(instance.disk_template, constants.DT_DRBD8)
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    self.assertEqual(disks[0].dev_type, constants.DT_DRBD8)
+
+  def testDiskTemplateLogicalIdBijectionFile(self):
+    op = self.CopyOpCode(self.file_op)
+    self.ExecOpCode(op)
+    instance = self.GetSingleInstance()
+    self.assertEqual(instance.disk_template, constants.DT_FILE)
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    self.assertEqual(disks[0].dev_type, constants.DT_FILE)
+
+  def testDiskTemplateLogicalIdBijectionSharedFile(self):
+    self.cluster.shared_file_storage_dir = '/tmp'
+    op = self.CopyOpCode(self.shared_file_op)
+    self.ExecOpCode(op)
+    instance = self.GetSingleInstance()
+    self.assertEqual(instance.disk_template, constants.DT_SHARED_FILE)
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    self.assertEqual(disks[0].dev_type, constants.DT_SHARED_FILE)
+
+  def testDiskTemplateLogicalIdBijectionGluster(self):
+    self.cluster.gluster_storage_dir = '/tmp'
+    op = self.CopyOpCode(self.gluster_op)
+    self.ExecOpCode(op)
+    instance = self.GetSingleInstance()
+    self.assertEqual(instance.disk_template, constants.DT_GLUSTER)
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    self.assertEqual(disks[0].dev_type, constants.DT_GLUSTER)
+
+  def testDiskTemplateLogicalIdBijectionRbd(self):
+    op = self.CopyOpCode(self.rbd_op)
+    self.ExecOpCode(op)
+    instance = self.GetSingleInstance()
+    self.assertEqual(instance.disk_template, constants.DT_RBD)
+    disks = self.cfg.GetInstanceDisks(instance.uuid)
+    self.assertEqual(disks[0].dev_type, constants.DT_RBD)
+
+
 class TestCheckOSVariant(CmdlibTestCase):
   def testNoVariantsSupported(self):
     os = self.cfg.CreateOs(supported_variants=[])
@@ -834,105 +959,215 @@
     self.mcpu.assertLogContainsRegex(msg)
 
 
+class TestIndexOperations(unittest.TestCase):
+
+  """Test if index operations on containers work as expected."""
+
+  def testGetIndexFromIdentifierTail(self):
+    """Check if -1 is translated to tail index."""
+    container = ['item1134']
+
+    idx = instance_utils.GetIndexFromIdentifier("-1", "test", container)
+    self.assertEqual(1, idx)
+
+  def testGetIndexFromIdentifierEmpty(self):
+    """Check if empty containers return 0 as index."""
+    container = []
+
+    idx = instance_utils.GetIndexFromIdentifier("0", "test", container)
+    self.assertEqual(0, idx)
+    idx = instance_utils.GetIndexFromIdentifier("-1", "test", container)
+    self.assertEqual(0, idx)
+
+  def testGetIndexFromIdentifierError(self):
+    """Check if wrong input raises an exception."""
+    container = []
+
+    self.assertRaises(errors.OpPrereqError,
+                      instance_utils.GetIndexFromIdentifier,
+                      "lala", "test", container)
+
+  def testGetIndexFromIdentifierOffByOne(self):
+    """Check for off-by-one errors."""
+    container = []
+
+    self.assertRaises(IndexError, instance_utils.GetIndexFromIdentifier,
+                      "1", "test", container)
+
+  def testGetIndexFromIdentifierOutOfRange(self):
+    """Check for identifiers out of the container range."""
+    container = []
+
+    self.assertRaises(IndexError, instance_utils.GetIndexFromIdentifier,
+                      "-1134", "test", container)
+    self.assertRaises(IndexError, instance_utils.GetIndexFromIdentifier,
+                      "1134", "test", container)
+
+  def testInsertItemtoIndex(self):
+    """Test if we can insert an item to a container at a specified index."""
+    container = []
+
+    instance_utils.InsertItemToIndex(0, 2, container)
+    self.assertEqual([2], container)
+
+    instance_utils.InsertItemToIndex(0, 1, container)
+    self.assertEqual([1, 2], container)
+
+    instance_utils.InsertItemToIndex(-1, 3, container)
+    self.assertEqual([1, 2, 3], container)
+
+    self.assertRaises(AssertionError, instance_utils.InsertItemToIndex, -2,
+                      1134, container)
+
+    self.assertRaises(AssertionError, instance_utils.InsertItemToIndex, 4, 1134,
+                      container)
+
+
 class TestApplyContainerMods(unittest.TestCase):
+
+  def applyAndAssert(self, container, inp, expected_container,
+                     expected_chgdesc=[]):
+    """Apply a list of changes to a container and check the container state
+
+    Parameters:
+    @type container: List
+    @param container: The container on which we will apply the changes
+    @type inp: List<(action, index, object)>
+    @param inp: The list of changes, a tupple with three elements:
+                i. action, e.g. constants.DDM_ADD
+                ii. index, e.g. -1, 0, 10
+                iii. object (any type)
+    @type expected: List
+    @param expected: The expected state of the container
+    @type chgdesc: List
+    @param chgdesc: List of applied changes
+
+
+    """
+    chgdesc = []
+    mods = instance_utils.PrepareContainerMods(inp, None)
+    instance_utils.ApplyContainerMods("test", container, chgdesc, mods,
+                                      None, None, None, None, None)
+    self.assertEqual(container, expected_container)
+    self.assertEqual(chgdesc, expected_chgdesc)
+
+  def _insertContainerSuccessFn(self, op):
+    container = []
+    inp = [(op, -1, "Hello"),
+           (op, -1, "World"),
+           (op, 0, "Start"),
+           (op, -1, "End"),
+           ]
+    expected = ["Start", "Hello", "World", "End"]
+    self.applyAndAssert(container, inp, expected)
+
+    inp = [(op, 0, "zero"),
+           (op, 3, "Added"),
+           (op, 5, "four"),
+           (op, 7, "xyz"),
+           ]
+    expected = ["zero", "Start", "Hello", "Added", "World", "four", "End",
+                "xyz"]
+    self.applyAndAssert(container, inp, expected)
+
+  def _insertContainerErrorFn(self, op):
+    container = []
+    expected = None
+
+    inp = [(op, 1, "error"), ]
+    self.assertRaises(IndexError, self.applyAndAssert, container, inp,
+                      expected)
+
+    inp = [(op, -2, "error"), ]
+    self.assertRaises(IndexError, self.applyAndAssert, container, inp,
+                      expected)
+
+  def _extractContainerSuccessFn(self, op):
+    container = ["item1", "item2", "item3", "item4", "item5"]
+    inp = [(op, -1, None),
+           (op, -0, None),
+           (op, 1, None),
+           ]
+    expected = ["item2", "item4"]
+    chgdesc = [('test/4', op),
+               ('test/0', op),
+               ('test/1', op)
+               ]
+    self.applyAndAssert(container, inp, expected, chgdesc)
+
+  def _extractContainerErrorFn(self, op):
+    container = []
+    expected = None
+
+    inp = [(op, 0, None), ]
+    self.assertRaises(IndexError, self.applyAndAssert, container, inp,
+                      expected)
+
+    inp = [(op, -1, None), ]
+    self.assertRaises(IndexError, self.applyAndAssert, container, inp,
+                      expected)
+
+    inp = [(op, 2, None), ]
+    self.assertRaises(IndexError, self.applyAndAssert, container, inp,
+                      expected)
+    container = [""]
+    inp = [(op, 0, None), ]
+    expected = None
+    self.assertRaises(AssertionError, self.applyAndAssert, container, inp,
+                      expected)
+
   def testEmptyContainer(self):
     container = []
     chgdesc = []
-    instance._ApplyContainerMods("test", container, chgdesc, [], None, None,
-                                 None)
+    instance_utils.ApplyContainerMods("test", container, chgdesc, [], None,
+                                      None, None, None, None)
     self.assertEqual(container, [])
     self.assertEqual(chgdesc, [])
 
-  def testAdd(self):
-    container = []
-    chgdesc = []
-    mods = instance._PrepareContainerMods([
-      (constants.DDM_ADD, -1, "Hello"),
-      (constants.DDM_ADD, -1, "World"),
-      (constants.DDM_ADD, 0, "Start"),
-      (constants.DDM_ADD, -1, "End"),
-      ], None)
-    instance._ApplyContainerMods("test", container, chgdesc, mods,
-                                 None, None, None)
-    self.assertEqual(container, ["Start", "Hello", "World", "End"])
-    self.assertEqual(chgdesc, [])
-
-    mods = instance._PrepareContainerMods([
-      (constants.DDM_ADD, 0, "zero"),
-      (constants.DDM_ADD, 3, "Added"),
-      (constants.DDM_ADD, 5, "four"),
-      (constants.DDM_ADD, 7, "xyz"),
-      ], None)
-    instance._ApplyContainerMods("test", container, chgdesc, mods,
-                                 None, None, None)
-    self.assertEqual(container,
-                     ["zero", "Start", "Hello", "Added", "World", "four",
-                      "End", "xyz"])
-    self.assertEqual(chgdesc, [])
-
-    for idx in [-2, len(container) + 1]:
-      mods = instance._PrepareContainerMods([
-        (constants.DDM_ADD, idx, "error"),
-        ], None)
-      self.assertRaises(IndexError, instance._ApplyContainerMods,
-                        "test", container, None, mods, None, None, None)
-
-  def testRemoveError(self):
-    for idx in [0, 1, 2, 100, -1, -4]:
-      mods = instance._PrepareContainerMods([
-        (constants.DDM_REMOVE, idx, None),
-        ], None)
-      self.assertRaises(IndexError, instance._ApplyContainerMods,
-                        "test", [], None, mods, None, None, None)
-
-    mods = instance._PrepareContainerMods([
-      (constants.DDM_REMOVE, 0, object()),
-      ], None)
-    self.assertRaises(AssertionError, instance._ApplyContainerMods,
-                      "test", [""], None, mods, None, None, None)
+  def testAddSuccess(self):
+    self._insertContainerSuccessFn(constants.DDM_ADD)
 
   def testAddError(self):
-    for idx in range(-100, -1) + [100]:
-      mods = instance._PrepareContainerMods([
-        (constants.DDM_ADD, idx, None),
-        ], None)
-      self.assertRaises(IndexError, instance._ApplyContainerMods,
-                        "test", [], None, mods, None, None, None)
+    self._insertContainerErrorFn(constants.DDM_ADD)
 
-  def testRemove(self):
-    container = ["item 1", "item 2"]
-    mods = instance._PrepareContainerMods([
-      (constants.DDM_ADD, -1, "aaa"),
-      (constants.DDM_REMOVE, -1, None),
-      (constants.DDM_ADD, -1, "bbb"),
-      ], None)
-    chgdesc = []
-    instance._ApplyContainerMods("test", container, chgdesc, mods,
-                                 None, None, None)
-    self.assertEqual(container, ["item 1", "item 2", "bbb"])
-    self.assertEqual(chgdesc, [
-      ("test/2", "remove"),
-      ])
+  def testAttachSuccess(self):
+    self._insertContainerSuccessFn(constants.DDM_ATTACH)
+
+  def testAttachError(self):
+    self._insertContainerErrorFn(constants.DDM_ATTACH)
+
+  def testRemoveSuccess(self):
+    self._extractContainerSuccessFn(constants.DDM_REMOVE)
+
+  def testRemoveError(self):
+    self._extractContainerErrorFn(constants.DDM_REMOVE)
+
+  def testDetachSuccess(self):
+    self._extractContainerSuccessFn(constants.DDM_DETACH)
+
+  def testDetachError(self):
+    self._extractContainerErrorFn(constants.DDM_DETACH)
 
   def testModify(self):
     container = ["item 1", "item 2"]
-    mods = instance._PrepareContainerMods([
+    mods = instance_utils.PrepareContainerMods([
       (constants.DDM_MODIFY, -1, "a"),
       (constants.DDM_MODIFY, 0, "b"),
       (constants.DDM_MODIFY, 1, "c"),
       ], None)
     chgdesc = []
-    instance._ApplyContainerMods("test", container, chgdesc, mods,
-                                 None, None, None)
+    instance_utils.ApplyContainerMods("test", container, chgdesc, mods,
+                                      None, None, None, None, None)
     self.assertEqual(container, ["item 1", "item 2"])
     self.assertEqual(chgdesc, [])
 
     for idx in [-2, len(container) + 1]:
-      mods = instance._PrepareContainerMods([
+      mods = instance_utils.PrepareContainerMods([
         (constants.DDM_MODIFY, idx, "error"),
         ], None)
-      self.assertRaises(IndexError, instance._ApplyContainerMods,
-                        "test", container, None, mods, None, None, None)
+      self.assertRaises(IndexError, instance_utils.ApplyContainerMods,
+                        "test", container, None, mods, None, None, None, None,
+                        None)
 
   @staticmethod
   def _CreateTestFn(idx, params, private):
@@ -942,6 +1177,13 @@
       ])
 
   @staticmethod
+  def _AttachTestFn(idx, params, private):
+    private.data = ("attach", idx, params)
+    return ((100 * idx, params), [
+      ("test/%s" % idx, hex(idx)),
+      ])
+
+  @staticmethod
   def _ModifyTestFn(idx, item, params, private):
     private.data = ("modify", idx, params)
     return [
@@ -952,10 +1194,14 @@
   def _RemoveTestFn(idx, item, private):
     private.data = ("remove", idx, item)
 
+  @staticmethod
+  def _DetachTestFn(idx, item, private):
+    private.data = ("detach", idx, item)
+
   def testAddWithCreateFunction(self):
     container = []
     chgdesc = []
-    mods = instance._PrepareContainerMods([
+    mods = instance_utils.PrepareContainerMods([
       (constants.DDM_ADD, -1, "Hello"),
       (constants.DDM_ADD, -1, "World"),
       (constants.DDM_ADD, 0, "Start"),
@@ -964,14 +1210,17 @@
       (constants.DDM_MODIFY, -1, "foobar"),
       (constants.DDM_REMOVE, 2, None),
       (constants.DDM_ADD, 1, "More"),
+      (constants.DDM_DETACH, -1, None),
+      (constants.DDM_ATTACH, 0, "Hello"),
       ], mock.Mock)
-    instance._ApplyContainerMods("test", container, chgdesc, mods,
-                                 self._CreateTestFn, self._ModifyTestFn,
-                                 self._RemoveTestFn)
+    instance_utils.ApplyContainerMods("test", container, chgdesc, mods,
+                                      self._CreateTestFn, self._AttachTestFn,
+                                      self._ModifyTestFn, self._RemoveTestFn,
+                                      self._DetachTestFn)
     self.assertEqual(container, [
+      (000, "Hello"),
       (000, "Start"),
       (100, "More"),
-      (000, "Hello"),
       ])
     self.assertEqual(chgdesc, [
       ("test/0", "0x0"),
@@ -981,7 +1230,9 @@
       ("test/2", "remove"),
       ("test/2", "modify foobar"),
       ("test/2", "remove"),
-      ("test/1", "0x1")
+      ("test/1", "0x1"),
+      ("test/2", "detach"),
+      ("test/0", "0x0"),
       ])
     self.assertTrue(compat.all(op == private.data[0]
                                for (op, _, _, private) in mods))
@@ -994,6 +1245,8 @@
       ("modify", 2, "foobar"),
       ("remove", 2, (300, "End")),
       ("add", 1, "More"),
+      ("detach", 2, (000, "Hello")),
+      ("attach", 0, "Hello"),
       ])
 
 
@@ -1009,7 +1262,7 @@
   def GenerateUniqueID(self, ec_id):
     return "ec%s-uq%s" % (ec_id, self._unique_id.next())
 
-  def AllocateDRBDMinor(self, nodes, instance):
+  def AllocateDRBDMinor(self, nodes, disk):
     return [self._drbd_minor.next()
             for _ in nodes]
 
@@ -1036,7 +1289,7 @@
     return copy.deepcopy(constants.DISK_DT_DEFAULTS)
 
   def testWrongDiskTemplate(self):
-    gdt = instance.GenerateDiskTemplate
+    gdt = instance_storage.GenerateDiskTemplate
     disk_template = "##unknown##"
 
     assert disk_template not in constants.DISK_TEMPLATES
@@ -1047,7 +1300,7 @@
                       self.GetDiskParams())
 
   def testDiskless(self):
-    gdt = instance.GenerateDiskTemplate
+    gdt = instance_storage.GenerateDiskTemplate
 
     result = gdt(self.lu, constants.DT_DISKLESS, "inst27734.example.com",
                  "node30113.example.com", [], [],
@@ -1058,7 +1311,7 @@
   def _TestTrivialDisk(self, template, disk_info, base_index, exp_dev_type,
                        file_storage_dir=NotImplemented,
                        file_driver=NotImplemented):
-    gdt = instance.GenerateDiskTemplate
+    gdt = instance_storage.GenerateDiskTemplate
 
     map(lambda params: utils.ForceDictType(params,
                                            constants.IDISK_PARAMS_TYPES),
@@ -1110,6 +1363,9 @@
       ("xenvg", "ec1-uq0.disk3"),
       ("othervg", "ec1-uq1.disk4"),
       ])
+    self.assertEqual(map(operator.attrgetter("nodes"), result), [
+                     ["node21741.example.com"], ["node21741.example.com"]])
+
 
   def testFile(self):
     # anything != DT_FILE would do here
@@ -1144,7 +1400,17 @@
                     for x in (2,3,4)]
         self.assertEqual(map(operator.attrgetter("logical_id"), result),
                          expected)
+        self.assertEqual(map(operator.attrgetter("nodes"), result), [
+          [], [], []])
       else:
+        if disk_template == constants.DT_FILE:
+          self.assertEqual(map(operator.attrgetter("nodes"), result), [
+            ["node21741.example.com"], ["node21741.example.com"],
+            ["node21741.example.com"]])
+        else:
+          self.assertEqual(map(operator.attrgetter("nodes"), result), [
+            [], [], []])
+
         for (idx, disk) in enumerate(result):
           (file_driver, file_storage_dir) = disk.logical_id
           dir_fmt = r"^/tmp/.*\.%s\.disk%d$" % (disk_template, idx + 2)
@@ -1165,6 +1431,7 @@
     self.assertEqual(map(operator.attrgetter("logical_id"), result), [
       (constants.BLOCKDEV_DRIVER_MANUAL, "/tmp/some/block/dev"),
       ])
+    self.assertEqual(map(operator.attrgetter("nodes"), result), [[]])
 
   def testRbd(self):
     disk_info = [{
@@ -1182,9 +1449,10 @@
       ("rbd", "ec1-uq0.rbd.disk0"),
       ("rbd", "ec1-uq1.rbd.disk1"),
       ])
+    self.assertEqual(map(operator.attrgetter("nodes"), result), [[], []])
 
   def testDrbd8(self):
-    gdt = instance.GenerateDiskTemplate
+    gdt = instance_storage.GenerateDiskTemplate
     drbd8_defaults = constants.DISK_LD_DEFAULTS[constants.DT_DRBD8]
     drbd8_default_metavg = drbd8_defaults[constants.LDP_DEFAULT_METAVG]
 
@@ -1214,6 +1482,8 @@
         (drbd8_default_metavg, "ec1-uq2.disk2_meta"),
       ]]
 
+    exp_nodes = ["node1334.example.com", "node12272.example.com"]
+
     assert len(exp_logical_ids) == len(disk_info)
 
     map(lambda params: utils.ForceDictType(params,
@@ -1241,9 +1511,11 @@
         self.assertTrue(isinstance(disk, objects.Disk))
         self.assertEqual(child.dev_type, constants.DT_PLAIN)
         self.assertTrue(child.children is None)
+        self.assertEqual(child.nodes, exp_nodes)
 
       self.assertEqual(map(operator.attrgetter("logical_id"), disk.children),
                        exp_logical_ids[idx])
+      self.assertEqual(disk.nodes, exp_nodes)
 
       self.assertEqual(len(disk.children), 2)
       self.assertEqual(disk.children[0].size, disk.size)
@@ -1361,7 +1633,7 @@
                             disk_template=constants.DT_PLAIN,
                             disks=[d.uuid for d in disks])
 
-    self.assertRaises(errors.OpExecError, instance.WipeDisks, lu, inst)
+    self.assertRaises(errors.OpExecError, instance_create.WipeDisks, lu, inst)
 
   def _FailingWipeCb(self, (disk, _), offset, size):
     # This should only ever be called for the first disk
@@ -1390,7 +1662,7 @@
                             disks=[d.uuid for d in disks])
 
     try:
-      instance.WipeDisks(lu, inst)
+      instance_create.WipeDisks(lu, inst)
     except errors.OpExecError, err:
       self.assertTrue(str(err), "Could not wipe disk 0 at offset 0 ")
     else:
@@ -1435,7 +1707,7 @@
 
     (lu, inst, pauset, progresst) = self._PrepareWipeTest(0, disks)
 
-    instance.WipeDisks(lu, inst)
+    instance_create.WipeDisks(lu, inst)
 
     self.assertEqual(pauset.history, [
       ("disk0", 1024, True),
@@ -1465,8 +1737,8 @@
         self._PrepareWipeTest(start_offset, disks)
 
       # Test start offset with only one disk
-      instance.WipeDisks(lu, inst,
-                         disks=[(1, disks[1], start_offset)])
+      instance_create.WipeDisks(lu, inst,
+                                disks=[(1, disks[1], start_offset)])
 
       # Only the second disk may have been paused and wiped
       self.assertEqual(pauset.history, [
@@ -1492,12 +1764,12 @@
     return op
 
   def testMissingAttributes(self):
-    self.assertRaises(AttributeError, instance._CheckOpportunisticLocking,
+    self.assertRaises(AttributeError, instance.CheckOpportunisticLocking,
                       object())
 
   def testDefaults(self):
     op = self._MakeOp()
-    instance._CheckOpportunisticLocking(op)
+    instance.CheckOpportunisticLocking(op)
 
   def test(self):
     for iallocator in [None, "something", "other"]:
@@ -1506,9 +1778,9 @@
                           opportunistic_locking=opplock)
         if opplock and not iallocator:
           self.assertRaises(errors.OpPrereqError,
-                            instance._CheckOpportunisticLocking, op)
+                            instance.CheckOpportunisticLocking, op)
         else:
-          instance._CheckOpportunisticLocking(op)
+          instance.CheckOpportunisticLocking(op)
 
 
 class TestLUInstanceRemove(CmdlibTestCase):
@@ -1644,6 +1916,8 @@
   def setUp(self):
     super(TestLUInstanceRename, self).setUp()
 
+    self.MockOut(instance_utils, 'netutils', self.netutils_mod)
+
     self.inst = self.cfg.AddNewInstance()
 
     self.op = opcodes.OpInstanceRename(instance_name=self.inst.name,
@@ -1792,7 +2066,11 @@
   def setUp(self):
     super(TestLUInstanceSetParams, self).setUp()
 
-    self.inst = self.cfg.AddNewInstance()
+    self.MockOut(instance_set_params, 'netutils', self.netutils_mod)
+    self.MockOut(instance_utils, 'netutils', self.netutils_mod)
+
+    self.dev_type = constants.DT_PLAIN
+    self.inst = self.cfg.AddNewInstance(disk_template=self.dev_type)
     self.op = opcodes.OpInstanceSetParams(instance_name=self.inst.name)
 
     self.running_inst = \
@@ -1800,6 +2078,16 @@
     self.running_op = \
       opcodes.OpInstanceSetParams(instance_name=self.running_inst.name)
 
+    ext_disks = [self.cfg.CreateDisk(dev_type=constants.DT_EXT,
+                                     params={
+                                       constants.IDISK_PROVIDER: "pvdr"
+                                     })]
+    self.ext_storage_inst = \
+      self.cfg.AddNewInstance(disk_template=constants.DT_EXT,
+                              disks=ext_disks)
+    self.ext_storage_op = \
+      opcodes.OpInstanceSetParams(instance_name=self.ext_storage_inst.name)
+
     self.snode = self.cfg.AddNewNode()
 
     self.mocked_storage_type = constants.ST_LVM_VG
@@ -1814,6 +2102,9 @@
     self.mocked_running_inst_state = "running"
     self.mocked_running_inst_time = 10938474
 
+    self.mocked_disk_uuid = "mock_uuid_1134"
+    self.mocked_disk_name = "mock_disk_1134"
+
     bootid = "mock_bootid"
     storage_info = [
       {
@@ -1839,7 +2130,7 @@
         .Build()
 
     def _InstanceInfo(_, instance, __, ___):
-      if instance == self.inst.name:
+      if instance in [self.inst.name, self.ext_storage_inst.name]:
         return self.RpcResultsBuilder() \
           .CreateSuccessfulNodeResult(self.master, None)
       elif instance == self.running_inst.name:
@@ -2046,6 +2337,12 @@
                          nics=[(constants.DDM_ADD, -1, {})])
     self.ExecOpCode(op)
 
+  def testAttachNICs(self):
+    msg = "Attach operation is not supported for NICs"
+    op = self.CopyOpCode(self.op,
+                         nics=[(constants.DDM_ATTACH, -1, {})])
+    self.ExecOpCodeExpectOpPrereqError(op, msg)
+
   def testNoHotplugSupport(self):
     op = self.CopyOpCode(self.op,
                          nics=[(constants.DDM_ADD, -1, {})],
@@ -2188,6 +2485,12 @@
                          nics=[(constants.DDM_REMOVE, 0, {})])
     self.ExecOpCode(op)
 
+  def testDetachNICs(self):
+    msg = "Detach operation is not supported for NICs"
+    op = self.CopyOpCode(self.op,
+                         nics=[(constants.DDM_DETACH, -1, {})])
+    self.ExecOpCodeExpectOpPrereqError(op, msg)
+
   def testHotRemoveNic(self):
     inst = self.cfg.AddNewInstance(nics=[self.cfg.CreateNic(),
                                          self.cfg.CreateNic()])
@@ -2236,6 +2539,15 @@
     self.ExecOpCodeExpectException(
       op, errors.TypeEnforcementError, "is not a valid size")
 
+  def testAddDiskUnknownParam(self):
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ADD, -1,
+                                 {
+                                   "uuid": self.mocked_disk_uuid
+                                 }]])
+    self.ExecOpCodeExpectException(
+      op, errors.TypeEnforcementError, "Unknown parameter 'uuid'")
+
   def testAddDiskRunningInstanceNoWaitForSync(self):
     op = self.CopyOpCode(self.running_op,
                          disks=[[constants.DDM_ADD, -1,
@@ -2253,9 +2565,48 @@
                                    constants.IDISK_SIZE: 1024
                                  }]])
     self.ExecOpCode(op)
-
     self.assertTrue(self.rpc.call_blockdev_shutdown.called)
 
+  def testAddDiskIndexBased(self):
+    SPECIFIC_SIZE = 435 * 4
+    insertion_index = len(self.inst.disks)
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ADD, insertion_index,
+                                 {
+                                   constants.IDISK_SIZE: SPECIFIC_SIZE
+                                 }]])
+    self.ExecOpCode(op)
+    self.assertEqual(len(self.inst.disks), insertion_index + 1)
+    new_disk = self.cfg.GetDisk(self.inst.disks[insertion_index])
+    self.assertEqual(new_disk.size, SPECIFIC_SIZE)
+
+  def testAddDiskHugeIndex(self):
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ADD, 5,
+                                 {
+                                   constants.IDISK_SIZE: 1024
+                                 }]])
+    self.ExecOpCodeExpectException(
+      op, IndexError, "Got disk index.*but there are only.*"
+    )
+
+  def testAddExtDisk(self):
+    op = self.CopyOpCode(self.ext_storage_op,
+                         disks=[[constants.DDM_ADD, -1,
+                                 {
+                                   constants.IDISK_SIZE: 1024
+                                 }]])
+    self.ExecOpCodeExpectOpPrereqError(op,
+                                       "Missing provider for template 'ext'")
+
+    op = self.CopyOpCode(self.ext_storage_op,
+                         disks=[[constants.DDM_ADD, -1,
+                                 {
+                                   constants.IDISK_SIZE: 1024,
+                                   constants.IDISK_PROVIDER: "bla"
+                                 }]])
+    self.ExecOpCode(op)
+
   def testAddDiskDownInstanceNoWaitForSync(self):
     op = self.CopyOpCode(self.op,
                          disks=[[constants.DDM_ADD, -1,
@@ -2265,7 +2616,7 @@
                          wait_for_sync=False)
     self.ExecOpCodeExpectOpPrereqError(
       op, "Can't add a disk to an instance with deactivated disks"
-          " and --no-wait-for-sync given.")
+          " and --no-wait-for-sync given")
 
   def testAddDiskRunningInstance(self):
     op = self.CopyOpCode(self.running_op,
@@ -2307,6 +2658,144 @@
     self.assertTrue(self.rpc.call_blockdev_assemble.called)
     self.assertTrue(self.rpc.call_hotplug_device.called)
 
+  def testAttachDiskWrongParams(self):
+    msg = "Only one argument is permitted in attach op, either name or uuid"
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   constants.IDISK_SIZE: 1134
+                                 }]],
+                         )
+    self.ExecOpCodeExpectOpPrereqError(op, msg)
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   'uuid': "1134",
+                                   constants.IDISK_NAME: "1134",
+                                 }]],
+                         )
+    self.ExecOpCodeExpectOpPrereqError(op, msg)
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   'uuid': "1134",
+                                   constants.IDISK_SIZE: 1134,
+                                 }]],
+                         )
+    self.ExecOpCodeExpectOpPrereqError(op, msg)
+
+  def testAttachDiskWrongTemplate(self):
+    msg = "Instance has '%s' template while disk has '%s' template" % \
+      (constants.DT_PLAIN, constants.DT_BLOCK)
+    self.cfg.AddOrphanDisk(name=self.mocked_disk_name,
+                           dev_type=constants.DT_BLOCK)
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   constants.IDISK_NAME: self.mocked_disk_name
+                                 }]],
+                         )
+    self.ExecOpCodeExpectOpPrereqError(op, msg)
+
+  def testAttachDiskWrongNodes(self):
+    msg = "Disk nodes are \['mock_node_1134'\]"
+
+    self.cfg.AddOrphanDisk(name=self.mocked_disk_name,
+                           primary_node="mock_node_1134")
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   constants.IDISK_NAME: self.mocked_disk_name
+                                 }]],
+                         )
+    self.ExecOpCodeExpectOpPrereqError(op, msg)
+
+  def testAttachDiskRunningInstance(self):
+    self.cfg.AddOrphanDisk(name=self.mocked_disk_name,
+                           primary_node=self.master.uuid)
+    self.rpc.call_blockdev_assemble.return_value = \
+      self.RpcResultsBuilder() \
+        .CreateSuccessfulNodeResult(self.master,
+                                    ("/dev/mocked_path",
+                                     "/var/run/ganeti/instance-disks/mocked_d",
+                                     None))
+    op = self.CopyOpCode(self.running_op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   constants.IDISK_NAME: self.mocked_disk_name
+                                 }]],
+                         )
+    self.ExecOpCode(op)
+    self.assertTrue(self.rpc.call_blockdev_assemble.called)
+    self.assertFalse(self.rpc.call_blockdev_shutdown.called)
+
+  def testAttachDiskRunningInstanceNoWaitForSync(self):
+    self.cfg.AddOrphanDisk(name=self.mocked_disk_name,
+                           primary_node=self.master.uuid)
+    self.rpc.call_blockdev_assemble.return_value = \
+      self.RpcResultsBuilder() \
+        .CreateSuccessfulNodeResult(self.master,
+                                    ("/dev/mocked_path",
+                                     "/var/run/ganeti/instance-disks/mocked_d",
+                                     None))
+    op = self.CopyOpCode(self.running_op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   constants.IDISK_NAME: self.mocked_disk_name
+                                 }]],
+                         wait_for_sync=False)
+    self.ExecOpCode(op)
+    self.assertTrue(self.rpc.call_blockdev_assemble.called)
+    self.assertFalse(self.rpc.call_blockdev_shutdown.called)
+
+  def testAttachDiskDownInstance(self):
+    self.cfg.AddOrphanDisk(name=self.mocked_disk_name,
+                           primary_node=self.master.uuid)
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   constants.IDISK_NAME: self.mocked_disk_name
+                                 }]])
+    self.ExecOpCode(op)
+
+    self.assertTrue(self.rpc.call_blockdev_assemble.called)
+    self.assertTrue(self.rpc.call_blockdev_shutdown.called)
+
+  def testAttachDiskDownInstanceNoWaitForSync(self):
+    self.cfg.AddOrphanDisk(name=self.mocked_disk_name)
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   constants.IDISK_NAME: self.mocked_disk_name
+                                 }]],
+                         wait_for_sync=False)
+    self.ExecOpCodeExpectOpPrereqError(
+      op, "Can't attach a disk to an instance with deactivated disks"
+          " and --no-wait-for-sync given.")
+
+  def testHotAttachDisk(self):
+    self.cfg.AddOrphanDisk(name=self.mocked_disk_name,
+                           primary_node=self.master.uuid)
+    self.rpc.call_blockdev_assemble.return_value = \
+      self.RpcResultsBuilder() \
+        .CreateSuccessfulNodeResult(self.master,
+                                    ("/dev/mocked_path",
+                                     "/var/run/ganeti/instance-disks/mocked_d",
+                                     None))
+    op = self.CopyOpCode(self.op,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   constants.IDISK_NAME: self.mocked_disk_name
+                                 }]],
+                         hotplug=True)
+    self.rpc.call_hotplug_supported.return_value = \
+      self.RpcResultsBuilder() \
+        .CreateSuccessfulNodeResult(self.master)
+    self.ExecOpCode(op)
+    self.assertTrue(self.rpc.call_hotplug_supported.called)
+    self.assertTrue(self.rpc.call_blockdev_assemble.called)
+    self.assertTrue(self.rpc.call_hotplug_device.called)
+
   def testHotRemoveDisk(self):
     inst = self.cfg.AddNewInstance(disks=[self.cfg.CreateDisk(),
                                           self.cfg.CreateDisk()])
@@ -2324,6 +2813,254 @@
     self.assertTrue(self.rpc.call_blockdev_shutdown.called)
     self.assertTrue(self.rpc.call_blockdev_remove.called)
 
+  def testHotDetachDisk(self):
+    inst = self.cfg.AddNewInstance(disks=[self.cfg.CreateDisk(),
+                                          self.cfg.CreateDisk()])
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_DETACH, -1,
+                                 {}]],
+                         hotplug=True)
+    self.rpc.call_hotplug_supported.return_value = \
+      self.RpcResultsBuilder() \
+        .CreateSuccessfulNodeResult(self.master)
+    self.ExecOpCode(op)
+    self.assertTrue(self.rpc.call_hotplug_supported.called)
+    self.assertTrue(self.rpc.call_hotplug_device.called)
+    self.assertTrue(self.rpc.call_blockdev_shutdown.called)
+
+  def testDetachAttachFileBasedDisk(self):
+    """Detach and re-attach a disk from a file-based instance."""
+    # Create our disk and calculate the path where it is stored, its name, as
+    # well as the expected path where it will be moved.
+    mock_disk = self.cfg.CreateDisk(
+      name='mock_disk_1134', dev_type=constants.DT_FILE,
+      logical_id=('loop', '/tmp/instance/disk'), primary_node=self.master.uuid)
+
+    # Create a file-based instance
+    file_disk = self.cfg.CreateDisk(
+      dev_type=constants.DT_FILE,
+      logical_id=('loop', '/tmp/instance/disk2'))
+    inst = self.cfg.AddNewInstance(name='instance',
+                                   disk_template=constants.DT_FILE,
+                                   disks=[file_disk, mock_disk],
+                                  )
+
+    # Detach the disk and assert that it has been moved to the upper directory
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_DETACH, -1,
+                                 {}]],
+                         )
+    self.ExecOpCode(op)
+    mock_disk = self.cfg.GetDiskInfo(mock_disk.uuid)
+    self.assertEqual('/tmp/disk', mock_disk.logical_id[1])
+
+    # Re-attach the disk and assert that it has been moved to the original
+    # directory
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_ATTACH, -1,
+                                 {
+                                   constants.IDISK_NAME: self.mocked_disk_name
+                                 }]],
+                         )
+    self.ExecOpCode(op)
+    mock_disk = self.cfg.GetDiskInfo(mock_disk.uuid)
+    self.assertIn('/tmp/instance', mock_disk.logical_id[1])
+
+  def testAttachDetachDisk(self):
+    """Check if the disks can be attached and detached in sequence.
+
+    Also, check if the operations succeed both with name and uuid.
+    """
+    disk1 = self.cfg.CreateDisk(uuid=self.mocked_disk_uuid,
+                                primary_node=self.master.uuid)
+    disk2 = self.cfg.CreateDisk(name="mock_name_1134",
+                                primary_node=self.master.uuid)
+
+    inst = self.cfg.AddNewInstance(disks=[disk1, disk2])
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_DETACH, self.mocked_disk_uuid,
+                                 {}]])
+    self.ExecOpCode(op)
+    self.assertEqual([disk2], self.cfg.GetInstanceDisks(inst.uuid))
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_ATTACH, 0,
+                                 {
+                                   'uuid': self.mocked_disk_uuid
+                                 }]])
+    self.ExecOpCode(op)
+    self.assertEqual([disk1, disk2], self.cfg.GetInstanceDisks(inst.uuid))
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_DETACH, 1,
+                                 {}]])
+    self.ExecOpCode(op)
+    self.assertEqual([disk1], self.cfg.GetInstanceDisks(inst.uuid))
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_ATTACH, 0,
+                                 {
+                                   constants.IDISK_NAME: "mock_name_1134"
+                                 }]])
+    self.ExecOpCode(op)
+    self.assertEqual([disk2, disk1], self.cfg.GetInstanceDisks(inst.uuid))
+
+  def testDetachAndAttachToDisklessInstance(self):
+    """Check if a disk can be detached and then re-attached if the instance is
+    diskless inbetween.
+
+    """
+    disk = self.cfg.CreateDisk(uuid=self.mocked_disk_uuid,
+                               primary_node=self.master.uuid)
+
+    inst = self.cfg.AddNewInstance(disks=[disk], primary_node=self.master)
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_DETACH,
+                         self.mocked_disk_uuid, {}]])
+
+    self.ExecOpCode(op)
+    self.assertEqual([], self.cfg.GetInstanceDisks(inst.uuid))
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_ATTACH, 0,
+                                 {
+                                   'uuid': self.mocked_disk_uuid
+                                 }]])
+    self.ExecOpCode(op)
+    self.assertEqual([disk], self.cfg.GetInstanceDisks(inst.uuid))
+
+  def testDetachAttachDrbdDisk(self):
+    """Check if a DRBD disk can be detached and then re-attached.
+
+    """
+    disk = self.cfg.CreateDisk(uuid=self.mocked_disk_uuid,
+                               primary_node=self.master.uuid,
+                               secondary_node=self.snode.uuid,
+                               dev_type=constants.DT_DRBD8)
+
+    inst = self.cfg.AddNewInstance(disks=[disk], primary_node=self.master)
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_DETACH,
+                         self.mocked_disk_uuid, {}]])
+
+    self.ExecOpCode(op)
+    self.assertEqual([], self.cfg.GetInstanceDisks(inst.uuid))
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_ATTACH, 0,
+                                 {
+                                   'uuid': self.mocked_disk_uuid
+                                 }]])
+    self.ExecOpCode(op)
+    self.assertEqual([disk], self.cfg.GetInstanceDisks(inst.uuid))
+
+  def testDetachAttachDrbdDiskWithWrongPrimaryNode(self):
+    """Check if disk attachment with a wrong primary node fails.
+
+    """
+    disk1 = self.cfg.CreateDisk(uuid=self.mocked_disk_uuid,
+                               primary_node=self.master.uuid,
+                               secondary_node=self.snode.uuid,
+                               dev_type=constants.DT_DRBD8)
+
+    inst1 = self.cfg.AddNewInstance(disks=[disk1], primary_node=self.master,
+                                    secondary_node=self.snode)
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst1.name,
+                         disks=[[constants.DDM_DETACH,
+                         self.mocked_disk_uuid, {}]])
+
+    self.ExecOpCode(op)
+    self.assertEqual([], self.cfg.GetInstanceDisks(inst1.uuid))
+
+    disk2 = self.cfg.CreateDisk(uuid="mock_uuid_1135",
+                               primary_node=self.snode.uuid,
+                               secondary_node=self.master.uuid,
+                               dev_type=constants.DT_DRBD8)
+
+    inst2 = self.cfg.AddNewInstance(disks=[disk2], primary_node=self.snode,
+                                    secondary_node=self.master)
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst2.name,
+                         disks=[[constants.DDM_ATTACH, 0,
+                                 {
+                                   'uuid': self.mocked_disk_uuid
+                                 }]])
+
+    self.assertRaises(errors.OpExecError, self.ExecOpCode, op)
+
+
+  def testDetachAttachExtDisk(self):
+    """Check attach/detach functionality of ExtStorage disks.
+
+    """
+    disk = self.cfg.CreateDisk(uuid=self.mocked_disk_uuid,
+                               dev_type=constants.DT_EXT,
+                               params={
+                                 constants.IDISK_PROVIDER: "pvdr"
+                               })
+
+    inst = self.cfg.AddNewInstance(disks=[disk], primary_node=self.master)
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_DETACH,
+                         self.mocked_disk_uuid, {}]])
+
+    self.ExecOpCode(op)
+    self.assertEqual([], self.cfg.GetInstanceDisks(inst.uuid))
+
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_ATTACH, 0,
+                                 {
+                                   'uuid': self.mocked_disk_uuid
+                                 }]])
+    self.ExecOpCode(op)
+    self.assertEqual([disk], self.cfg.GetInstanceDisks(inst.uuid))
+
+  def testRemoveDiskRemovesStorageDir(self):
+    inst = self.cfg.AddNewInstance(disks=[self.cfg.CreateDisk(dev_type='file')])
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_REMOVE, -1,
+                                 {}]])
+    self.rpc.call_file_storage_dir_remove.return_value = \
+      self.RpcResultsBuilder() \
+        .CreateSuccessfulNodeResult(self.master)
+    self.ExecOpCode(op)
+    self.rpc.call_file_storage_dir_remove.assert_called_with(
+        self.master.uuid, '/file/storage')
+
+  def testRemoveDiskKeepsStorageForRemaining(self):
+    inst = self.cfg.AddNewInstance(disks=[self.cfg.CreateDisk(dev_type='file'),
+                                          self.cfg.CreateDisk(dev_type='file')])
+    op = self.CopyOpCode(self.op,
+                         instance_name=inst.name,
+                         disks=[[constants.DDM_REMOVE, -1,
+                                 {}]])
+    self.rpc.call_file_storage_dir_remove.return_value = \
+      self.RpcResultsBuilder() \
+        .CreateSuccessfulNodeResult(self.master)
+    self.ExecOpCode(op)
+    self.assertFalse(self.rpc.call_file_storage_dir_remove.called)
+
   def testModifyDiskWithSize(self):
     op = self.CopyOpCode(self.op,
                          disks=[[constants.DDM_MODIFY, 0,
@@ -2352,9 +3089,22 @@
                                   }]])
     self.ExecOpCode(op)
 
+  def testModifyExtDiskProvider(self):
+    mod = [[constants.DDM_MODIFY, 0,
+             {
+               constants.IDISK_PROVIDER: "anything"
+             }]]
+    op = self.CopyOpCode(self.op, disks=mod)
+    self.ExecOpCodeExpectException(op, errors.TypeEnforcementError,
+                                   "Unknown parameter 'provider'")
+
+    op = self.CopyOpCode(self.ext_storage_op, disks=mod)
+    self.ExecOpCodeExpectOpPrereqError(op, "Disk 'provider' parameter change"
+                                           " is not possible")
+
   def testSetOldDiskTemplate(self):
     op = self.CopyOpCode(self.op,
-                         disk_template=self.inst.disk_template)
+                         disk_template=self.dev_type)
     self.ExecOpCodeExpectOpPrereqError(
       op, "Instance already has disk template")
 
@@ -2380,15 +3130,7 @@
           " template, not .*")
 
   def testConvertToExtWithSameProvider(self):
-    for disk_uuid in self.inst.disks:
-      self.cfg.RemoveInstanceDisk(self.inst.uuid, disk_uuid)
-    disk = self.cfg.CreateDisk(dev_type=constants.DT_EXT,
-                               params={constants.IDISK_PROVIDER: "pvdr"})
-    self.cfg.AddInstanceDisk(self.inst.uuid, disk)
-    self.inst.disk_template = constants.DT_EXT
-
-    op = self.CopyOpCode(self.op,
-                         instance_name=self.inst.name,
+    op = self.CopyOpCode(self.ext_storage_op,
                          disk_template=constants.DT_EXT,
                          ext_params={constants.IDISK_PROVIDER: "pvdr"})
     self.ExecOpCodeExpectOpPrereqError(
@@ -2454,30 +3196,6 @@
                          disk_template=constants.DT_PLAIN)
     self.ExecOpCode(op)
 
-  def testConvertDisklessDRBDToPlain(self):
-    self.cfg.SetIPolicyField(
-      constants.ISPECS_MIN, constants.ISPEC_DISK_COUNT, 0)
-    self.inst.disks = []
-    self.inst.disk_template = constants.DT_DRBD8
-
-    op = self.CopyOpCode(self.op,
-                         disk_template=constants.DT_PLAIN)
-    self.ExecOpCode(op)
-
-    self.assertEqual(self.inst.disk_template, constants.DT_PLAIN)
-
-  def testConvertDisklessPlainToDRBD(self):
-    self.cfg.SetIPolicyField(
-      constants.ISPECS_MIN, constants.ISPEC_DISK_COUNT, 0)
-    self.inst.disks = []
-    self.inst.disk_template = constants.DT_PLAIN
-
-    op = self.CopyOpCode(self.op,
-                         disk_template=constants.DT_DRBD8,
-                         remote_node=self.snode.name)
-    self.ExecOpCode(op)
-
-    self.assertEqual(self.inst.disk_template, constants.DT_DRBD8)
 
 class TestLUInstanceChangeGroup(CmdlibTestCase):
   def setUp(self):
diff --git a/test/py/cmdlib/node_unittest.py b/test/py/cmdlib/node_unittest.py
index d5b50e6..8dcad78 100644
--- a/test/py/cmdlib/node_unittest.py
+++ b/test/py/cmdlib/node_unittest.py
@@ -33,11 +33,13 @@
 """
 
 from collections import defaultdict
+import mock
 
 from ganeti import compat
 from ganeti import constants
 from ganeti import objects
 from ganeti import opcodes
+from ganeti.cmdlib import node
 
 from testsupport import *
 
@@ -278,5 +280,53 @@
     self.ExecOpCodeExpectOpPrereqError(op, "Can't get version information from"
                                        " node %s" % self.node_add.name)
 
+
+class TestLUNodeSetParams(CmdlibTestCase):
+  def setUp(self):
+    super(TestLUNodeSetParams, self).setUp()
+
+    self.MockOut(node, 'netutils', self.netutils_mod)
+    node.netutils.TcpPing.return_value = True
+
+    self.node = self.cfg.AddNewNode(
+        primary_ip='192.168.168.191',
+        secondary_ip='192.168.168.192',
+        master_candidate=True, uuid='blue_bunny')
+
+    self.snode = self.cfg.AddNewNode(
+        primary_ip='192.168.168.193',
+        secondary_ip='192.168.168.194',
+        master_candidate=True, uuid='pink_bunny')
+
+  def testSetSecondaryIp(self):
+    self.instance = self.cfg.AddNewInstance(primary_node=self.node,
+                                            secondary_node=self.snode,
+                                            disk_template='drbd')
+    op = opcodes.OpNodeSetParams(node_name=self.node.name,
+                                 secondary_ip='254.254.254.254')
+    self.ExecOpCode(op)
+
+    self.assertEqual('254.254.254.254', self.node.secondary_ip)
+    self.assertEqual(sorted(self.wconfd.all_locks.items()), [
+        ('cluster/BGL', 'shared'),
+        ('instance/mock_inst_1.example.com', 'shared'),
+        ('node-res/blue_bunny', 'exclusive'),
+        ('node/blue_bunny', 'exclusive')])
+
+  def testSetSecondaryIpNoLock(self):
+    self.instance = self.cfg.AddNewInstance(primary_node=self.node,
+                                            secondary_node=self.snode,
+                                            disk_template='file')
+    op = opcodes.OpNodeSetParams(node_name=self.node.name,
+                                 secondary_ip='254.254.254.254')
+    self.ExecOpCode(op)
+
+    self.assertEqual('254.254.254.254', self.node.secondary_ip)
+    self.assertEqual(sorted(self.wconfd.all_locks.items()), [
+        ('cluster/BGL', 'shared'),
+        ('node-res/blue_bunny', 'exclusive'),
+        ('node/blue_bunny', 'exclusive')])
+
+
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
diff --git a/test/py/cmdlib/testsupport/__init__.py b/test/py/cmdlib/testsupport/__init__.py
index a1f0678..7dc5344 100644
--- a/test/py/cmdlib/testsupport/__init__.py
+++ b/test/py/cmdlib/testsupport/__init__.py
@@ -34,7 +34,7 @@
 
 from cmdlib.testsupport.cmdlib_testcase import CmdlibTestCase, \
   withLockedLU
-from cmdlib.testsupport.config_mock import ConfigMock
+from testutils.config_mock import ConfigMock
 from cmdlib.testsupport.iallocator_mock import patchIAllocator
 from cmdlib.testsupport.livelock_mock import LiveLockMock
 from cmdlib.testsupport.utils_mock import patchUtils
diff --git a/test/py/cmdlib/testsupport/cmdlib_testcase.py b/test/py/cmdlib/testsupport/cmdlib_testcase.py
index f245f45..3e776ee 100644
--- a/test/py/cmdlib/testsupport/cmdlib_testcase.py
+++ b/test/py/cmdlib/testsupport/cmdlib_testcase.py
@@ -35,8 +35,10 @@
 import mock
 import re
 import traceback
+import functools
+import sys
 
-from cmdlib.testsupport.config_mock import ConfigMock
+from testutils.config_mock import ConfigMock
 from cmdlib.testsupport.iallocator_mock import patchIAllocator
 from cmdlib.testsupport.livelock_mock import LiveLockMock
 from cmdlib.testsupport.netutils_mock import patchNetutils, \
@@ -144,6 +146,7 @@
       pass
 
     self.ResetMocks()
+    self._cleanups = []
 
   def _StopPatchers(self):
     if self._iallocator_patcher is not None:
@@ -164,6 +167,14 @@
 
     self._StopPatchers()
 
+    while self._cleanups:
+      f, args, kwargs = self._cleanups.pop(-1)
+      try:
+        # pylint: disable=W0142
+        f(*args, **kwargs)
+      except BaseException, e:
+        sys.stderr.write('Error in cleanup: %s\n' % e)
+
   def _GetTestModule(self):
     module = inspect.getsourcefile(self.__class__).split("/")[-1]
     suffix = "_unittest.py"
@@ -182,7 +193,8 @@
     self.cfg = ConfigMock()
     self.rpc = CreateRpcRunnerMock()
     self.ctx = GanetiContextMock(self)
-    self.mcpu = ProcessorMock(self.ctx)
+    self.wconfd = WConfdMock()
+    self.mcpu = ProcessorMock(self.ctx, self.wconfd)
 
     self._StopPatchers()
     try:
@@ -223,7 +235,7 @@
 
     """
     return MockLU(self.mcpu, mock.MagicMock(), self.ctx, self.cfg, self.rpc,
-                  (1234, "/tmp/mock/livelock"), WConfdMock())
+                  (1234, "/tmp/mock/livelock"), self.wconfd)
 
   def RpcResultsBuilder(self, use_node_names=False):
     """Creates a pre-configured L{RpcResultBuilder}
@@ -383,6 +395,23 @@
         return group
     assert False
 
+  def MockOut(self, *args, **kwargs):
+    """Immediately start mock.patch.object."""
+    patcher = mock.patch.object(*args, **kwargs)
+    mocked = patcher.start()
+    self.AddCleanup(patcher.stop)
+    return mocked
+
+  # Simplified backport of 2.7 feature
+  def AddCleanup(self, func, *args, **kwargs):
+    self._cleanups.append((func, args, kwargs))
+
+  def assertIn(self, first, second, msg=None):
+    if first not in second:
+      if msg is None:
+        msg = "%r not found in %r" % (first, second)
+      self.fail(msg)
+
 
 # pylint: disable=C0103
 def withLockedLU(func):
@@ -396,6 +425,7 @@
   the LU right before the test method is called.
 
   """
+  @functools.wraps(func)
   def wrapper(*args, **kwargs):
     test = args[0]
     assert isinstance(test, CmdlibTestCase)
diff --git a/test/py/cmdlib/testsupport/processor_mock.py b/test/py/cmdlib/testsupport/processor_mock.py
index 3f3b250..36a9919 100644
--- a/test/py/cmdlib/testsupport/processor_mock.py
+++ b/test/py/cmdlib/testsupport/processor_mock.py
@@ -36,8 +36,6 @@
 from ganeti import constants
 from ganeti import mcpu
 
-from cmdlib.testsupport.wconfd_mock import WConfdMock
-
 
 class LogRecordingCallback(mcpu.OpExecCbBase):
   """Helper class for log output recording.
@@ -77,11 +75,11 @@
 
   """
 
-  def __init__(self, context):
+  def __init__(self, context, wconfd):
     super(ProcessorMock, self).__init__(context, 1, True)
     self.log_entries = []
     self._lu_test_func = None
-    self.wconfd = WConfdMock()
+    self.wconfd = wconfd
 
   def ExecOpCodeAndRecordOutput(self, op):
     """Executes the given opcode and records the output for further inspection.
diff --git a/test/py/cmdlib/testsupport/wconfd_mock.py b/test/py/cmdlib/testsupport/wconfd_mock.py
index 9786f0f..2a5d07e 100644
--- a/test/py/cmdlib/testsupport/wconfd_mock.py
+++ b/test/py/cmdlib/testsupport/wconfd_mock.py
@@ -47,6 +47,7 @@
           del self.wconfdmock.mylocks[lockrq[0]]
       else:
         self.wconfdmock.mylocks[lockrq[0]] = lockrq[1]
+        self.wconfdmock.all_locks[lockrq[0]] = lockrq[1]
     return []
 
   def UpdateLocksWaiting(self, cid, _prio, req):
@@ -91,6 +92,7 @@
   """
   def __init__(self):
     self.mylocks = {}
+    self.all_locks = {}
 
   def Client(self):
     return MockClient(self)
diff --git a/test/py/ganeti.backend_unittest.py b/test/py/ganeti.backend_unittest.py
index 373adf0..68b2eee 100755
--- a/test/py/ganeti.backend_unittest.py
+++ b/test/py/ganeti.backend_unittest.py
@@ -48,7 +48,7 @@
 from ganeti import serializer
 from ganeti import ssh
 from ganeti import utils
-from cmdlib.testsupport.config_mock import ConfigMock
+from testutils.config_mock import ConfigMock
 
 
 class TestX509Certificates(unittest.TestCase):
diff --git a/test/py/ganeti.client.gnt_instance_unittest.py b/test/py/ganeti.client.gnt_instance_unittest.py
index 3349c1a..b908e03 100755
--- a/test/py/ganeti.client.gnt_instance_unittest.py
+++ b/test/py/ganeti.client.gnt_instance_unittest.py
@@ -129,26 +129,41 @@
 
 
 class TestConvertNicDiskModifications(unittest.TestCase):
-  def test(self):
+  def testErrorMods(self):
     fn = gnt_instance._ConvertNicDiskModifications
 
     self.assertEqual(fn([]), [])
 
     # Error cases
     self.assertRaises(errors.OpPrereqError, fn, [
-      (constants.DDM_REMOVE, { "param": "value", }),
+      (constants.DDM_REMOVE, {"param": "value", }),
       ])
     self.assertRaises(errors.OpPrereqError, fn, [
-      (0, { constants.DDM_REMOVE: True, "param": "value", }),
+      (0, {constants.DDM_REMOVE: True, "param": "value", }),
       ])
     self.assertRaises(errors.OpPrereqError, fn, [
+      (constants.DDM_DETACH, {"param": "value", }),
+      ])
+    self.assertRaises(errors.OpPrereqError, fn, [
+      (0, {constants.DDM_DETACH: True, "param": "value", }),
+      ])
+
+    self.assertRaises(errors.OpPrereqError, fn, [
       (0, {
         constants.DDM_REMOVE: True,
         constants.DDM_ADD: True,
         }),
       ])
+    self.assertRaises(errors.OpPrereqError, fn, [
+      (0, {
+        constants.DDM_DETACH: True,
+        constants.DDM_MODIFY: True,
+        }),
+      ])
 
-    # Legacy calls
+  def testLegacyCalls(self):
+    fn = gnt_instance._ConvertNicDiskModifications
+
     for action in constants.DDMS_VALUES:
       self.assertEqual(fn([
         (action, {}),
@@ -172,7 +187,9 @@
         }),
       ])
 
-    # New-style calls
+  def testNewStyleCalls(self):
+    fn = gnt_instance._ConvertNicDiskModifications
+
     self.assertEqual(fn([
       (2, {
         constants.IDISK_MODE: constants.DISK_RDWR,
@@ -213,7 +230,9 @@
         }),
       ])
 
-    # Names and UUIDs
+  def testNamesUUIDs(self):
+    fn = gnt_instance._ConvertNicDiskModifications
+
     self.assertEqual(fn([
       ('name', {
         constants.IDISK_MODE: constants.DISK_RDWR,
diff --git a/test/py/ganeti.config_unittest.py b/test/py/ganeti.config_unittest.py
index efcf9cb..47750fe 100755
--- a/test/py/ganeti.config_unittest.py
+++ b/test/py/ganeti.config_unittest.py
@@ -51,7 +51,7 @@
 import testutils
 import mocks
 import mock
-from cmdlib.testsupport.config_mock import ConfigMock
+from testutils.config_mock import ConfigMock
 
 
 def _StubGetEntResolver():
@@ -148,6 +148,77 @@
     self.assertEqual(all_nodes[0], iobj.primary_node,
                      msg="Primary node not first node in list")
 
+  def _CreateInstanceDisk(self, cfg):
+    # Construct instance and add a plain disk
+    inst = self._create_instance(cfg)
+    cfg.AddInstance(inst, "my-job")
+    disk = objects.Disk(dev_type=constants.DT_PLAIN, size=128,
+                        logical_id=("myxenvg", "disk25494"), uuid="disk0",
+                        name="name0")
+    cfg.AddInstanceDisk(inst.uuid, disk)
+
+    return inst, disk
+
+  def testDiskInfoByUUID(self):
+    """Check if the GetDiskInfo works with UUIDs."""
+    # Create mock config writer
+    cfg = self._get_object_mock()
+
+    # Create an instance and attach a disk to it
+    inst, disk = self._CreateInstanceDisk(cfg)
+
+    result = cfg.GetDiskInfo("disk0")
+    self.assertEqual(disk, result)
+
+  def testDiskInfoByName(self):
+    """Check if the GetDiskInfo works with names."""
+    # Create mock config writer
+    cfg = self._get_object_mock()
+
+    # Create an instance and attach a disk to it
+    inst, disk = self._CreateInstanceDisk(cfg)
+
+    result = cfg.GetDiskInfoByName("name0")
+    self.assertEqual(disk, result)
+
+  def testDiskInfoByWrongUUID(self):
+    """Assert that GetDiskInfo raises an exception when given a wrong UUID."""
+    # Create mock config writer
+    cfg = self._get_object_mock()
+
+    # Create an instance and attach a disk to it
+    inst, disk = self._CreateInstanceDisk(cfg)
+
+    result = cfg.GetDiskInfo("disk1134")
+    self.assertEqual(None, result)
+
+  def testDiskInfoByWrongName(self):
+    """Assert that GetDiskInfo returns None when given a wrong name."""
+    # Create mock config writer
+    cfg = self._get_object_mock()
+
+    # Create an instance and attach a disk to it
+    inst, disk = self._CreateInstanceDisk(cfg)
+
+    result = cfg.GetDiskInfoByName("name1134")
+    self.assertEqual(None, result)
+
+  def testDiskInfoDuplicateName(self):
+    """Assert that GetDiskInfo raises exception on duplicate names."""
+    # Create mock config writer
+    cfg = self._get_object_mock()
+
+    # Create an instance and attach a disk to it
+    inst, disk = self._CreateInstanceDisk(cfg)
+
+    # Create a disk with the same name and attach it to the instance.
+    disk = objects.Disk(dev_type=constants.DT_PLAIN, size=128,
+                        logical_id=("myxenvg", "disk25494"), uuid="disk1",
+                        name="name0")
+    cfg.AddInstanceDisk(inst.uuid, disk)
+
+    self.assertRaises(errors.ConfigurationError, cfg.GetDiskInfoByName, "name0")
+
   def testInstNodesNoDisks(self):
     """Test all_nodes/secondary_nodes when there are no disks"""
     # construct instance
@@ -232,20 +303,6 @@
       node2.uuid: ["myxenvg/disk0", "myxenvg/meta0"],
       })
 
-  def testSetInstanceDiskTemplate(self):
-    """Test that instance's disk template updates succcessfully"""
-    # Construct an instance with 'diskless' template
-    cfg = self._get_object_mock()
-    inst = self._create_instance(cfg)
-    cfg.AddInstance(inst, "my-job")
-    self.assertEqual(inst.disk_template, constants.DT_DISKLESS)
-
-    # Modify the disk template, e.g., 'plain'
-    cfg.SetInstanceDiskTemplate(inst.uuid, constants.DT_PLAIN)
-    # re-read the instance from configuration
-    inst = cfg.GetInstanceInfo(inst.uuid)
-    self.assertEqual(inst.disk_template, constants.DT_PLAIN)
-
   def testUpdateCluster(self):
     """Test updates on the cluster object"""
     cfg = self._get_object()
@@ -619,6 +676,54 @@
     cfg.RemoveNodeFromCandidateCerts(node_uuid, warn_fn=None)
     self.assertEqual(0, len(cfg.GetCandidateCerts()))
 
+  def testAttachDetachDisks(self):
+    """Test if the attach/detach wrappers work properly.
+
+    This test checks if the configuration remains in a consistent state after a
+    series of detach/attach ops
+    """
+    # construct instance
+    cfg = self._get_object_mock()
+    inst = self._create_instance(cfg)
+    disk = objects.Disk(dev_type=constants.DT_PLAIN, size=128,
+                        logical_id=("myxenvg", "disk25494"), uuid="disk0")
+    cfg.AddInstance(inst, "my-job")
+    cfg.AddInstanceDisk(inst.uuid, disk)
+
+    # Detach disk from non-existent instance
+    self.assertRaises(errors.ConfigurationError, cfg.DetachInstanceDisk,
+                      "1134", "disk0")
+
+    # Detach non-existent disk
+    self.assertRaises(errors.ConfigurationError, cfg.DetachInstanceDisk,
+                      "test-uuid", "disk1")
+
+    # Detach disk
+    cfg.DetachInstanceDisk("test-uuid", "disk0")
+    instance_disks = cfg.GetInstanceDisks("test-uuid")
+    self.assertEqual(instance_disks, [])
+
+    # Detach disk again
+    self.assertRaises(errors.ProgrammerError, cfg.DetachInstanceDisk,
+                      "test-uuid", "disk0")
+
+    # Attach disk to non-existent instance
+    self.assertRaises(errors.ConfigurationError, cfg.AttachInstanceDisk,
+                      "1134", "disk0")
+
+    # Attach non-existent disk
+    self.assertRaises(errors.ConfigurationError, cfg.AttachInstanceDisk,
+                      "test-uuid", "disk1")
+
+    # Attach disk
+    cfg.AttachInstanceDisk("test-uuid", "disk0")
+    instance_disks = cfg.GetInstanceDisks("test-uuid")
+    self.assertEqual(instance_disks, [disk])
+
+    # Attach disk again
+    self.assertRaises(errors.ReservationError, cfg.AttachInstanceDisk,
+                      "test-uuid", "disk0")
+
 
 def _IsErrorInList(err_str, err_list):
   return any(map(lambda e: err_str in e, err_list))
diff --git a/test/py/ganeti.hypervisor.hv_kvm_unittest.py b/test/py/ganeti.hypervisor.hv_kvm_unittest.py
index c2145bd..4f0fbf6 100755
--- a/test/py/ganeti.hypervisor.hv_kvm_unittest.py
+++ b/test/py/ganeti.hypervisor.hv_kvm_unittest.py
@@ -50,8 +50,11 @@
 import ganeti.hypervisor.hv_kvm.netdev as netdev
 import ganeti.hypervisor.hv_kvm.monitor as monitor
 
+import mock
 import testutils
 
+from testutils.config_mock import ConfigMock
+
 
 class QmpStub(threading.Thread):
   """Stub for a QMP endpoint for a KVM instance
@@ -244,7 +247,7 @@
 
 
 class TestConsole(unittest.TestCase):
-  def _Test(self, instance, node, group, hvparams):
+  def MakeConsole(self, instance, node, group, hvparams):
     cons = hv_kvm.KVMHypervisor.GetInstanceConsole(instance, node, group,
                                                    hvparams, {})
     self.assertEqual(cons.Validate(), None)
@@ -261,7 +264,7 @@
       constants.HV_VNC_BIND_ADDRESS: None,
       constants.HV_KVM_SPICE_BIND: None,
       }
-    cons = self._Test(instance, node, group, hvparams)
+    cons = self.MakeConsole(instance, node, group, hvparams)
     self.assertEqual(cons.kind, constants.CONS_SSH)
     self.assertEqual(cons.host, node.name)
     self.assertEqual(cons.command[0], pathutils.KVM_CONSOLE_WRAPPER)
@@ -279,7 +282,7 @@
       constants.HV_VNC_BIND_ADDRESS: "192.0.2.1",
       constants.HV_KVM_SPICE_BIND: None,
       }
-    cons = self._Test(instance, node, group, hvparams)
+    cons = self.MakeConsole(instance, node, group, hvparams)
     self.assertEqual(cons.kind, constants.CONS_VNC)
     self.assertEqual(cons.host, "192.0.2.1")
     self.assertEqual(cons.port, constants.VNC_BASE_PORT + 10)
@@ -297,7 +300,7 @@
       constants.HV_VNC_BIND_ADDRESS: None,
       constants.HV_KVM_SPICE_BIND: "192.0.2.1",
       }
-    cons = self._Test(instance, node, group, hvparams)
+    cons = self.MakeConsole(instance, node, group, hvparams)
     self.assertEqual(cons.kind, constants.CONS_SPICE)
     self.assertEqual(cons.host, "192.0.2.1")
     self.assertEqual(cons.port, 11000)
@@ -314,78 +317,94 @@
       constants.HV_VNC_BIND_ADDRESS: None,
       constants.HV_KVM_SPICE_BIND: None,
       }
-    cons = self._Test(instance, node, group, hvparams)
+    cons = self.MakeConsole(instance, node, group, hvparams)
     self.assertEqual(cons.kind, constants.CONS_MESSAGE)
 
 
 class TestVersionChecking(testutils.GanetiTestCase):
-  def testParseVersion(self):
-    parse = hv_kvm.KVMHypervisor._ParseKVMVersion
-    help_112 = testutils.ReadTestData("kvm_1.1.2_help.txt")
-    help_10 = testutils.ReadTestData("kvm_1.0_help.txt")
-    help_01590 = testutils.ReadTestData("kvm_0.15.90_help.txt")
-    help_0125 = testutils.ReadTestData("kvm_0.12.5_help.txt")
-    help_091 = testutils.ReadTestData("kvm_0.9.1_help.txt")
-    self.assertEqual(parse(help_112), ("1.1.2", 1, 1, 2))
-    self.assertEqual(parse(help_10), ("1.0", 1, 0, 0))
-    self.assertEqual(parse(help_01590), ("0.15.90", 0, 15, 90))
-    self.assertEqual(parse(help_0125), ("0.12.5", 0, 12, 5))
-    self.assertEqual(parse(help_091), ("0.9.1", 0, 9, 1))
+  @staticmethod
+  def ParseTestData(name):
+    help = testutils.ReadTestData(name)
+    return hv_kvm.KVMHypervisor._ParseKVMVersion(help)
+
+  def testParseVersion112(self):
+    self.assertEqual(
+        self.ParseTestData("kvm_1.1.2_help.txt"), ("1.1.2", 1, 1, 2))
+
+  def testParseVersion10(self):
+    self.assertEqual(self.ParseTestData("kvm_1.0_help.txt"), ("1.0", 1, 0, 0))
+
+  def testParseVersion01590(self):
+    self.assertEqual(
+        self.ParseTestData("kvm_0.15.90_help.txt"), ("0.15.90", 0, 15, 90))
+
+  def testParseVersion0125(self):
+    self.assertEqual(
+        self.ParseTestData("kvm_0.12.5_help.txt"), ("0.12.5", 0, 12, 5))
+
+  def testParseVersion091(self):
+    self.assertEqual(
+        self.ParseTestData("kvm_0.9.1_help.txt"), ("0.9.1", 0, 9, 1))
 
 
 class TestSpiceParameterList(unittest.TestCase):
-  def test(self):
-    defaults = constants.HVC_DEFAULTS[constants.HT_KVM]
+  def setUp(self):
+    self.defaults = constants.HVC_DEFAULTS[constants.HT_KVM]
 
-    params = \
-      compat.UniqueFrozenset(getattr(constants, name)
-                             for name in dir(constants)
-                             if name.startswith("HV_KVM_SPICE_"))
+  def testAudioCompressionDefaultOn(self):
+    self.assertTrue(self.defaults[constants.HV_KVM_SPICE_AUDIO_COMPR])
 
-    # Parameters whose default value evaluates to True and don't need to be set
-    defaults_true = frozenset(filter(defaults.__getitem__, params))
+  def testVdAgentDefaultOn(self):
+    self.assertTrue(self.defaults[constants.HV_KVM_SPICE_USE_VDAGENT])
 
-    self.assertEqual(defaults_true, frozenset([
-      constants.HV_KVM_SPICE_AUDIO_COMPR,
-      constants.HV_KVM_SPICE_USE_VDAGENT,
-      constants.HV_KVM_SPICE_TLS_CIPHERS,
-      ]))
+  def testTlsCiphersDefaultOn(self):
+    self.assertTrue(self.defaults[constants.HV_KVM_SPICE_TLS_CIPHERS])
 
-    # HV_KVM_SPICE_BIND decides whether the other parameters must be set if
-    # their default evaluates to False
-    assert constants.HV_KVM_SPICE_BIND in params
-    assert constants.HV_KVM_SPICE_BIND not in defaults_true
+  def testBindDefaultOff(self):
+    self.assertFalse(self.defaults[constants.HV_KVM_SPICE_BIND])
 
-    # Exclude some parameters
-    params -= defaults_true | frozenset([
-      constants.HV_KVM_SPICE_BIND,
-      ])
-
-    self.assertEqual(hv_kvm._SPICE_ADDITIONAL_PARAMS, params)
+  def testAdditionalParams(self):
+    params = compat.UniqueFrozenset(
+        getattr(constants, name)
+        for name in dir(constants)
+        if name.startswith("HV_KVM_SPICE_"))
+    fixed = set([
+        constants.HV_KVM_SPICE_BIND, constants.HV_KVM_SPICE_TLS_CIPHERS,
+        constants.HV_KVM_SPICE_USE_VDAGENT, constants.HV_KVM_SPICE_AUDIO_COMPR])
+    self.assertEqual(hv_kvm._SPICE_ADDITIONAL_PARAMS, params - fixed)
 
 
 class TestHelpRegexps(testutils.GanetiTestCase):
-  def testBootRe(self):
-    """Check _BOOT_RE
+  """Check _BOOT_RE
 
-    It has too match -drive.*boot=on|off except if there is another dash-option
-    at the beginning of the line.
+  It has to match -drive.*boot=on|off except if there is another dash-option
+  at the beginning of the line.
 
-    """
+  """
+
+  @staticmethod
+  def SearchTestData(name):
     boot_re = hv_kvm.KVMHypervisor._BOOT_RE
-    help_112 = testutils.ReadTestData("kvm_1.1.2_help.txt")
-    help_10 = testutils.ReadTestData("kvm_1.0_help.txt")
-    help_01590 = testutils.ReadTestData("kvm_0.15.90_help.txt")
-    help_0125 = testutils.ReadTestData("kvm_0.12.5_help.txt")
-    help_091 = testutils.ReadTestData("kvm_0.9.1_help.txt")
-    help_091_fake = testutils.ReadTestData("kvm_0.9.1_help_boot_test.txt")
+    help = testutils.ReadTestData(name)
+    return boot_re.search(help)
 
-    self.assertTrue(boot_re.search(help_091))
-    self.assertTrue(boot_re.search(help_0125))
-    self.assertFalse(boot_re.search(help_091_fake))
-    self.assertFalse(boot_re.search(help_112))
-    self.assertFalse(boot_re.search(help_10))
-    self.assertFalse(boot_re.search(help_01590))
+  def testBootRe112(self):
+    self.assertFalse(self.SearchTestData("kvm_1.1.2_help.txt"))
+
+  def testBootRe10(self):
+    self.assertFalse(self.SearchTestData("kvm_1.0_help.txt"))
+
+  def testBootRe01590(self):
+    self.assertFalse(self.SearchTestData("kvm_0.15.90_help.txt"))
+
+  def testBootRe0125(self):
+    self.assertTrue(self.SearchTestData("kvm_0.12.5_help.txt"))
+
+  def testBootRe091(self):
+    self.assertTrue(self.SearchTestData("kvm_0.9.1_help.txt"))
+
+  def testBootRe091_fake(self):
+    self.assertFalse(self.SearchTestData("kvm_0.9.1_help_boot_test.txt"))
 
 
 class TestGetTunFeatures(unittest.TestCase):
@@ -485,5 +504,73 @@
     self.assertTrue(devinfo.pci==5)
 
 
+class PostfixMatcher(object):
+  def __init__(self, string):
+    self.string = string
+
+  def __eq__(self, other):
+    return other.endswith(self.string)
+
+  def __repr__(self):
+    return "<Postfix %s>" % self.string
+
+class TestKvmRuntime(testutils.GanetiTestCase):
+  """The _ExecuteKvmRuntime is at the core of all KVM operations."""
+
+  def setUp(self):
+    super(TestKvmRuntime, self).setUp()
+    kvm_class = 'ganeti.hypervisor.hv_kvm.KVMHypervisor'
+    self.MockOut('qmp', mock.patch('ganeti.hypervisor.hv_kvm.QmpConnection'))
+    self.MockOut('run_cmd', mock.patch('ganeti.utils.RunCmd'))
+    self.MockOut('ensure_dirs', mock.patch('ganeti.utils.EnsureDirs'))
+    self.MockOut('write_file', mock.patch('ganeti.utils.WriteFile'))
+    self.MockOut(mock.patch(kvm_class + '.ValidateParameters'))
+    self.MockOut(mock.patch('ganeti.hypervisor.hv_kvm.OpenTap',
+                            return_value=('test_nic', [], [])))
+    self.MockOut(mock.patch(kvm_class + '._ConfigureNIC'))
+    self.MockOut('pid_alive', mock.patch(kvm_class + '._InstancePidAlive',
+                                         return_value=('file', -1, False)))
+    self.MockOut(mock.patch(kvm_class + '._ExecuteCpuAffinity'))
+    self.MockOut(mock.patch(kvm_class + '._CallMonitorCommand'))
+
+    self.cfg = ConfigMock()
+    params = constants.HVC_DEFAULTS[constants.HT_KVM].copy()
+    beparams = constants.BEC_DEFAULTS.copy()
+    self.instance = self.cfg.AddNewInstance(name='name.example.com',
+                                            hypervisor='kvm',
+                                            hvparams=params,
+                                            beparams=beparams)
+
+  def testDirectoriesCreated(self):
+    hypervisor = hv_kvm.KVMHypervisor()
+    self.mocks['ensure_dirs'].assert_called_with([
+        (PostfixMatcher('/run/ganeti/kvm-hypervisor'), 0775),
+        (PostfixMatcher('/run/ganeti/kvm-hypervisor/pid'), 0775),
+        (PostfixMatcher('/run/ganeti/kvm-hypervisor/uid'), 0775),
+        (PostfixMatcher('/run/ganeti/kvm-hypervisor/ctrl'), 0775),
+        (PostfixMatcher('/run/ganeti/kvm-hypervisor/conf'), 0775),
+        (PostfixMatcher('/run/ganeti/kvm-hypervisor/nic'), 0775),
+        (PostfixMatcher('/run/ganeti/kvm-hypervisor/chroot'), 0775),
+        (PostfixMatcher('/run/ganeti/kvm-hypervisor/chroot-quarantine'), 0775),
+        (PostfixMatcher('/run/ganeti/kvm-hypervisor/keymap'), 0775)])
+
+  def testStartInstance(self):
+    hypervisor = hv_kvm.KVMHypervisor()
+    def RunCmd(cmd, **kwargs):
+      if '--help' in cmd:
+        return mock.Mock(
+            failed=False, output=testutils.ReadTestData("kvm_1.1.2_help.txt"))
+      if '-S' in cmd:
+        self.mocks['pid_alive'].return_value = ('file', -1, True)
+        return mock.Mock(failed=False)
+      elif '-M' in cmd:
+        return mock.Mock(failed=False, output='')
+      elif '-device' in cmd:
+        return mock.Mock(failed=False, output='name "virtio-blk-pci"')
+      else:
+        raise errors.ProgrammerError('Unexpected command: %s' % cmd)
+    self.mocks['run_cmd'].side_effect = RunCmd
+    hypervisor.StartInstance(self.instance, [], False)
+
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
diff --git a/test/py/ganeti.mcpu_unittest.py b/test/py/ganeti.mcpu_unittest.py
index df95a8a..18fabd8 100755
--- a/test/py/ganeti.mcpu_unittest.py
+++ b/test/py/ganeti.mcpu_unittest.py
@@ -176,115 +176,5 @@
     self.assertEqual(op2.debug_level, 3)
 
 
-class _FakeLuWithLocks:
-  def __init__(self, needed_locks, share_locks):
-    self.needed_locks = needed_locks
-    self.share_locks = share_locks
-    self.locks = []
-
-  def owned_locks(self, *_):
-    return self.locks
-
-
-class TestVerifyLocks(unittest.TestCase):
-  def testNoLocks(self):
-    lu = _FakeLuWithLocks({}, {})
-    mcpu._VerifyLocks(lu,
-                      _mode_whitelist=NotImplemented,
-                      _nal_whitelist=NotImplemented)
-
-  def testNotAllSameMode(self):
-    for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
-      lu = _FakeLuWithLocks({
-        level: ["foo"],
-        }, {
-        level: 0,
-        locking.LEVEL_NODE_ALLOC: 0,
-        })
-      mcpu._VerifyLocks(lu, _mode_whitelist=[], _nal_whitelist=[])
-
-  def testDifferentMode(self):
-    for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
-      lu = _FakeLuWithLocks({
-        level: ["foo"],
-        }, {
-        level: 0,
-        locking.LEVEL_NODE_ALLOC: 1,
-        })
-      try:
-        mcpu._VerifyLocks(lu, _mode_whitelist=[], _nal_whitelist=[])
-      except AssertionError, err:
-        self.assertTrue("using the same mode as nodes" in str(err))
-      else:
-        self.fail("Exception not raised")
-
-      # Once more with the whitelist
-      mcpu._VerifyLocks(lu, _mode_whitelist=[_FakeLuWithLocks],
-                        _nal_whitelist=[])
-
-  def testSameMode(self):
-    for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
-      lu = _FakeLuWithLocks({
-        level: ["foo"],
-        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-        }, {
-        level: 1,
-        locking.LEVEL_NODE_ALLOC: 1,
-        })
-
-      try:
-        mcpu._VerifyLocks(lu, _mode_whitelist=[_FakeLuWithLocks],
-                          _nal_whitelist=[])
-      except AssertionError, err:
-        self.assertTrue("whitelisted to use different modes" in str(err))
-      else:
-        self.fail("Exception not raised")
-
-      # Once more without the whitelist
-      mcpu._VerifyLocks(lu, _mode_whitelist=[], _nal_whitelist=[])
-
-  def testAllWithoutAllocLock(self):
-    for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
-      lu = _FakeLuWithLocks({
-        level: locking.ALL_SET,
-        }, {
-        level: 0,
-        locking.LEVEL_NODE_ALLOC: 0,
-        })
-
-      try:
-        mcpu._VerifyLocks(lu, _mode_whitelist=[], _nal_whitelist=[])
-      except AssertionError, err:
-        self.assertTrue("allocation lock must be used if" in str(err))
-      else:
-        self.fail("Exception not raised")
-
-      # Once more with the whitelist
-      mcpu._VerifyLocks(lu, _mode_whitelist=[],
-                        _nal_whitelist=[_FakeLuWithLocks])
-
-  def testAllWithAllocLock(self):
-    for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
-      lu = _FakeLuWithLocks({
-        level: locking.ALL_SET,
-        locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
-        }, {
-        level: 0,
-        locking.LEVEL_NODE_ALLOC: 0,
-        })
-      lu.locks = [locking.NAL]
-
-      try:
-        mcpu._VerifyLocks(lu, _mode_whitelist=[],
-                          _nal_whitelist=[_FakeLuWithLocks])
-      except AssertionError, err:
-        self.assertTrue("whitelisted for not acquiring" in str(err))
-      else:
-        self.fail("Exception not raised")
-
-      # Once more without the whitelist
-      mcpu._VerifyLocks(lu, _mode_whitelist=[], _nal_whitelist=[])
-
-
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
diff --git a/test/py/ganeti.objects_unittest.py b/test/py/ganeti.objects_unittest.py
index 5ac459d..4020751 100755
--- a/test/py/ganeti.objects_unittest.py
+++ b/test/py/ganeti.objects_unittest.py
@@ -266,6 +266,8 @@
     instance2 = objects.Instance()
     instance2.disk_template = constants.DT_RBD
     cfg.instances = { "myinstance1": instance1, "myinstance2": instance2 }
+    disk2 = objects.Disk(dev_type=constants.DT_RBD)
+    cfg.disks = { "pinkbunnydisk": disk2 }
     nodegroup = objects.NodeGroup()
     nodegroup.ipolicy = {}
     nodegroup.ipolicy[constants.IPOLICY_DTS] = [instance1.disk_template, \
diff --git a/test/py/ganeti.opcodes_unittest.py b/test/py/ganeti.opcodes_unittest.py
index 010ea0c..7abb9b3 100755
--- a/test/py/ganeti.opcodes_unittest.py
+++ b/test/py/ganeti.opcodes_unittest.py
@@ -377,7 +377,9 @@
   def _GenericTests(self, fn):
     self.assertTrue(fn([]))
     self.assertTrue(fn([(constants.DDM_ADD, {})]))
+    self.assertTrue(fn([(constants.DDM_ATTACH, {})]))
     self.assertTrue(fn([(constants.DDM_REMOVE, {})]))
+    self.assertTrue(fn([(constants.DDM_DETACH, {})]))
     for i in [0, 1, 2, 3, 9, 10, 1024]:
       self.assertTrue(fn([(i, {})]))
 
@@ -404,6 +406,7 @@
     for param in constants.IDISK_PARAMS:
       self.assertTrue(fn([[constants.DDM_ADD, {param: 0}]]))
       self.assertTrue(fn([[constants.DDM_ADD, {param: param}]]))
+      self.assertTrue(fn([[constants.DDM_ATTACH, {param: param}]]))
 
 
 if __name__ == "__main__":
diff --git a/test/py/ganeti.query_unittest.py b/test/py/ganeti.query_unittest.py
index 8482d41..1e2e27f 100755
--- a/test/py/ganeti.query_unittest.py
+++ b/test/py/ganeti.query_unittest.py
@@ -870,8 +870,8 @@
     instbyname = dict((inst.name, inst) for inst in instances)
 
     disk_usage = dict((inst.uuid,
-                       gmi.ComputeDiskSize(inst.disk_template,
-                                           [{"size": disk.size}
+                       gmi.ComputeDiskSize([{"size": disk.size,
+                                             "dev_type": disk.dev_type}
                                            for disk in inst.disks]))
                       for inst in instances)
 
diff --git a/test/py/ganeti.storage.filestorage_unittest.py b/test/py/ganeti.storage.filestorage_unittest.py
index 4938e7e..4ac5743 100755
--- a/test/py/ganeti.storage.filestorage_unittest.py
+++ b/test/py/ganeti.storage.filestorage_unittest.py
@@ -319,5 +319,15 @@
       env.volume.Remove()
       env.volume.Exists(assert_exists=False)
 
+  def testRenameFile(self):
+    """Test if we can rename a file."""
+    with TestFileDeviceHelper.TempEnvironment(create_file=True) as env:
+      new_path = os.path.join(env.subdirectory, 'middle')
+      env.volume.Move(new_path)
+      self.assertEqual(new_path, env.volume.path)
+      env.volume.Exists(assert_exists=True)
+      env.path = new_path # update the path for the context manager
+
+
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
diff --git a/test/py/ganeti.utils_unittest.py b/test/py/ganeti.utils_unittest.py
index bc6b8af..5ebe78a 100755
--- a/test/py/ganeti.utils_unittest.py
+++ b/test/py/ganeti.utils_unittest.py
@@ -33,8 +33,11 @@
 import errno
 import fcntl
 import glob
+import mock
+import operator
 import os
 import os.path
+import random
 import re
 import shutil
 import signal
@@ -44,14 +47,13 @@
 import time
 import unittest
 import warnings
-import random
-import operator
 
 import testutils
 from ganeti import constants
 from ganeti import compat
 from ganeti import utils
 from ganeti import errors
+from ganeti import constants
 from ganeti.utils import RunCmd, \
      FirstFree, \
      RunParts
@@ -169,6 +171,7 @@
     self.failIf(f.NonMatching(["b12", "c"]))
     self.failUnless(f.NonMatching(["a", "1"]))
 
+
 class TestForceDictType(unittest.TestCase):
   """Test case for ForceDictType"""
   KEY_TYPES = {
@@ -399,5 +402,107 @@
                       "disk", disks)
 
 
+def Disk(dev_type):
+  return mock.Mock(dev_type=dev_type)
+
+
+def Drbd():
+  return Disk(constants.DT_DRBD8)
+
+
+def Rbd():
+  return Disk(constants.DT_RBD)
+
+
+class AllDiskTemplateTest(unittest.TestCase):
+  def testAllDiskless(self):
+    self.assertTrue(utils.AllDiskOfType([], [constants.DT_DISKLESS]))
+
+  def testOrDiskless(self):
+    self.assertTrue(utils.AllDiskOfType(
+        [], [constants.DT_DISKLESS, constants.DT_DRBD8]))
+
+  def testOrDrbd(self):
+    self.assertTrue(utils.AllDiskOfType(
+        [Drbd()], [constants.DT_DISKLESS, constants.DT_DRBD8]))
+
+  def testOrRbd(self):
+    self.assertTrue(utils.AllDiskOfType(
+        [Rbd()], [constants.DT_RBD, constants.DT_DRBD8]))
+
+  def testNotRbd(self):
+    self.assertFalse(utils.AllDiskOfType(
+        [Rbd()], [constants.DT_DRBD8]))
+
+  def testNotDiskless(self):
+    self.assertFalse(utils.AllDiskOfType(
+        [], [constants.DT_DRBD8]))
+
+  def testNotRbdDiskless(self):
+    self.assertFalse(utils.AllDiskOfType(
+        [Rbd()], [constants.DT_DISKLESS]))
+
+  def testHeterogeneous(self):
+    self.assertFalse(utils.AllDiskOfType(
+        [Rbd(), Drbd()], [constants.DT_DRBD8]))
+
+  def testHeterogeneousDiskless(self):
+    self.assertFalse(utils.AllDiskOfType(
+        [Rbd(), Drbd()], [constants.DT_DISKLESS]))
+
+
+class AnyDiskTemplateTest(unittest.TestCase):
+  def testAnyDiskless(self):
+    self.assertTrue(utils.AnyDiskOfType([], [constants.DT_DISKLESS]))
+
+  def testOrDiskless(self):
+    self.assertTrue(utils.AnyDiskOfType(
+        [], [constants.DT_DISKLESS, constants.DT_DRBD8]))
+
+  def testOrDrbd(self):
+    self.assertTrue(utils.AnyDiskOfType(
+        [Drbd()], [constants.DT_DISKLESS, constants.DT_DRBD8]))
+
+  def testOrRbd(self):
+    self.assertTrue(utils.AnyDiskOfType(
+        [Rbd()], [constants.DT_RBD, constants.DT_DRBD8]))
+
+  def testNotRbd(self):
+    self.assertFalse(utils.AnyDiskOfType(
+        [Rbd()], [constants.DT_DRBD8]))
+
+  def testNotDiskless(self):
+    self.assertFalse(utils.AnyDiskOfType(
+        [], [constants.DT_DRBD8]))
+
+  def testNotRbdDiskless(self):
+    self.assertFalse(utils.AnyDiskOfType(
+        [Rbd()], [constants.DT_DISKLESS]))
+
+  def testHeterogeneous(self):
+    self.assertTrue(utils.AnyDiskOfType(
+        [Rbd(), Drbd()], [constants.DT_DRBD8]))
+
+  def testHeterogeneousDiskless(self):
+    self.assertFalse(utils.AnyDiskOfType(
+        [Rbd(), Drbd()], [constants.DT_DISKLESS]))
+
+
+class GetDiskTemplateTest(unittest.TestCase):
+  def testUnique(self):
+    self.assertEqual(utils.GetDiskTemplate([Rbd()]), constants.DT_RBD)
+
+  def testDiskless(self):
+    self.assertEqual(utils.GetDiskTemplate([]), constants.DT_DISKLESS)
+
+  def testMultiple(self):
+    self.assertEqual(utils.GetDiskTemplate([Rbd(), Rbd()]),
+                     constants.DT_RBD)
+
+  def testMixed(self):
+    self.assertEqual(utils.GetDiskTemplate([Rbd(), Drbd()]),
+                     constants.DT_MIXED)
+
+
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
diff --git a/test/py/testutils.py b/test/py/testutils/__init__.py
similarity index 95%
rename from test/py/testutils.py
rename to test/py/testutils/__init__.py
index fe425aa..27ca425 100644
--- a/test/py/testutils.py
+++ b/test/py/testutils/__init__.py
@@ -124,6 +124,14 @@
   """
   def setUp(self):
     self._temp_files = []
+    self.patches = {}
+    self.mocks = {}
+
+  def MockOut(self, name, patch=None):
+    if patch is None:
+      patch = name
+    self.patches[name] = patch
+    self.mocks[name] = patch.start()
 
   def tearDown(self):
     while self._temp_files:
@@ -132,6 +140,12 @@
       except EnvironmentError:
         pass
 
+    for patch in self.patches.values():
+      patch.stop()
+
+    self.patches = {}
+    self.mocks = {}
+
   def assertFileContent(self, file_name, expected_content):
     """Checks that the content of a file is what we expect.
 
diff --git a/test/py/cmdlib/testsupport/config_mock.py b/test/py/testutils/config_mock.py
similarity index 95%
rename from test/py/cmdlib/testsupport/config_mock.py
rename to test/py/testutils/config_mock.py
index 12d9be9..0ca0dc7 100644
--- a/test/py/cmdlib/testsupport/config_mock.py
+++ b/test/py/testutils/config_mock.py
@@ -284,7 +284,6 @@
                             admin_state_source=admin_state_source,
                             nics=nics,
                             disks=[],
-                            disk_template=disk_template,
                             disks_active=disks_active,
                             network_port=network_port)
     self.AddInstance(inst, None)
@@ -345,6 +344,10 @@
     self.AddNetwork(net, None)
     return net
 
+  def AddOrphanDisk(self, **params):
+    disk = self.CreateDisk(**params)  # pylint: disable=W0142
+    self._UnlockedAddDisk(disk)
+
   def ConnectNetworkToGroup(self, net, group, netparams=None):
     """Connect the given network to the group.
 
@@ -385,6 +388,7 @@
                  dev_type=constants.DT_PLAIN,
                  logical_id=None,
                  children=None,
+                 nodes=None,
                  iv_name=None,
                  size=1024,
                  mode=constants.DISK_RDWR,
@@ -435,12 +439,20 @@
         meta_child = self.CreateDisk(dev_type=constants.DT_PLAIN,
                                      size=constants.DRBD_META_SIZE)
         children = [data_child, meta_child]
+
+      if nodes is None:
+        nodes = [pnode_uuid, snode_uuid]
     elif dev_type == constants.DT_PLAIN:
       if logical_id is None:
         logical_id = ("mockvg", "mock_disk_%d" % disk_id)
+      if nodes is None and primary_node is not None:
+        nodes = [primary_node]
     elif dev_type in constants.DTS_FILEBASED:
       if logical_id is None:
         logical_id = (constants.FD_LOOP, "/file/storage/disk%d" % disk_id)
+      if (nodes is None and primary_node is not None and
+          dev_type == constants.DT_FILE):
+        nodes = [primary_node]
     elif dev_type == constants.DT_BLOCK:
       if logical_id is None:
         logical_id = (constants.BLOCKDEV_DRIVER_MANUAL,
@@ -455,6 +467,8 @@
       raise NotImplementedError
     if children is None:
       children = []
+    if nodes is None:
+      nodes = []
     if iv_name is None:
       iv_name = "disk/%d" % instance_disk_index
 
@@ -463,6 +477,7 @@
                         dev_type=dev_type,
                         logical_id=logical_id,
                         children=children,
+                        nodes=nodes,
                         iv_name=iv_name,
                         size=size,
                         mode=mode,
@@ -581,10 +596,10 @@
   def ComputeDRBDMap(self):
     return dict((node_uuid, {}) for node_uuid in self._ConfigData().nodes)
 
-  def AllocateDRBDMinor(self, node_uuids, inst_uuid):
+  def AllocateDRBDMinor(self, node_uuids, disk_uuid):
     return map(lambda _: 0, node_uuids)
 
-  def _UnlockedReleaseDRBDMinors(self, inst_uuid):
+  def _UnlockedReleaseDRBDMinors(self, disk_uuid):
     pass
 
   def SetIPolicyField(self, category, field, value):
@@ -666,10 +681,10 @@
     self._default_group = self.AddNewNodeGroup(name="default")
     self._master_node = self.AddNewNode(uuid=master_node_uuid)
 
-  def _OpenConfig(self, _accept_foreign):
+  def _OpenConfig(self, _accept_foreign, force=False):
     self._config_data = self._mocked_config_store
 
-  def _WriteConfig(self, destination=None):
+  def _WriteConfig(self, destination=None, releaselock=False):
     self._mocked_config_store = self._ConfigData()
 
   def _GetRpc(self, _address_list):
@@ -833,3 +848,20 @@
     """
     if net_uuid:
       return self._UnlockedReserveIp(net_uuid, address, ec_id, check)
+
+  def AddInstance(self, instance, ec_id, replace=False):
+    """Add an instance to the config.
+
+    """
+    instance.serial_no = 1
+    instance.ctime = instance.mtime = time.time()
+    self._ConfigData().instances[instance.uuid] = instance
+    self._ConfigData().cluster.serial_no += 1 # pylint: disable=E1103
+    self._UnlockedReleaseDRBDMinors(instance.uuid)
+    self._UnlockedCommitTemporaryIps(ec_id)
+
+  def GetDisk(self, disk_uuid):
+    """Retrieves a disk object if present.
+
+    """
+    return self._ConfigData().disks[disk_uuid]
diff --git a/tools/cfgupgrade b/tools/cfgupgrade
index 6789a56..31da116 100755
--- a/tools/cfgupgrade
+++ b/tools/cfgupgrade
@@ -35,730 +35,17 @@
 
 """
 
-
-import os
-import os.path
-import sys
-import optparse
-import logging
-import time
-from cStringIO import StringIO
-
-from ganeti import constants
-from ganeti import serializer
-from ganeti import utils
-from ganeti import cli
-from ganeti import bootstrap
-from ganeti import config
-from ganeti import netutils
-from ganeti import pathutils
-
-from ganeti.utils import version
-
-
-options = None
-args = None
-
-
-#: Target major version we will upgrade to
-TARGET_MAJOR = 2
-#: Target minor version we will upgrade to
-TARGET_MINOR = 13
-#: Target major version for downgrade
-DOWNGRADE_MAJOR = 2
-#: Target minor version for downgrade
-DOWNGRADE_MINOR = 12
-
-# map of legacy device types
-# (mapping differing old LD_* constants to new DT_* constants)
-DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
-# (mapping differing new DT_* constants to old LD_* constants)
-DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
-
-
-class Error(Exception):
-  """Generic exception"""
-  pass
-
-
-def SetupLogging():
-  """Configures the logging module.
-
-  """
-  formatter = logging.Formatter("%(asctime)s: %(message)s")
-
-  stderr_handler = logging.StreamHandler()
-  stderr_handler.setFormatter(formatter)
-  if options.debug:
-    stderr_handler.setLevel(logging.NOTSET)
-  elif options.verbose:
-    stderr_handler.setLevel(logging.INFO)
-  else:
-    stderr_handler.setLevel(logging.WARNING)
-
-  root_logger = logging.getLogger("")
-  root_logger.setLevel(logging.NOTSET)
-  root_logger.addHandler(stderr_handler)
-
-
-def CheckHostname(path):
-  """Ensures hostname matches ssconf value.
-
-  @param path: Path to ssconf file
-
-  """
-  ssconf_master_node = utils.ReadOneLineFile(path)
-  hostname = netutils.GetHostname().name
-
-  if ssconf_master_node == hostname:
-    return True
-
-  logging.warning("Warning: ssconf says master node is '%s', but this"
-                  " machine's name is '%s'; this tool must be run on"
-                  " the master node", ssconf_master_node, hostname)
-  return False
-
-
-def _FillIPolicySpecs(default_ipolicy, ipolicy):
-  if "minmax" in ipolicy:
-    for (key, spec) in ipolicy["minmax"][0].items():
-      for (par, val) in default_ipolicy["minmax"][0][key].items():
-        if par not in spec:
-          spec[par] = val
-
-
-def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
-  minmax_keys = ["min", "max"]
-  if any((k in ipolicy) for k in minmax_keys):
-    minmax = {}
-    for key in minmax_keys:
-      if key in ipolicy:
-        if ipolicy[key]:
-          minmax[key] = ipolicy[key]
-        del ipolicy[key]
-    if minmax:
-      ipolicy["minmax"] = [minmax]
-  if isgroup and "std" in ipolicy:
-    del ipolicy["std"]
-  _FillIPolicySpecs(default_ipolicy, ipolicy)
-
-
-def UpgradeNetworks(config_data):
-  networks = config_data.get("networks", None)
-  if not networks:
-    config_data["networks"] = {}
-
-
-def UpgradeCluster(config_data):
-  cluster = config_data.get("cluster", None)
-  if cluster is None:
-    raise Error("Cannot find cluster")
-  ipolicy = cluster.setdefault("ipolicy", None)
-  if ipolicy:
-    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
-  ial_params = cluster.get("default_iallocator_params", None)
-  if not ial_params:
-    cluster["default_iallocator_params"] = {}
-  if not "candidate_certs" in cluster:
-    cluster["candidate_certs"] = {}
-  cluster["instance_communication_network"] = \
-    cluster.get("instance_communication_network", "")
-  cluster["install_image"] = \
-    cluster.get("install_image", "")
-  cluster["zeroing_image"] = \
-    cluster.get("zeroing_image", "")
-  cluster["compression_tools"] = \
-    cluster.get("compression_tools", constants.IEC_DEFAULT_TOOLS)
-  if "enabled_user_shutdown" not in cluster:
-    cluster["enabled_user_shutdown"] = False
-  cluster["data_collectors"] = cluster.get("data_collectors", {})
-  for name in constants.DATA_COLLECTOR_NAMES:
-    cluster["data_collectors"][name] = \
-      cluster["data_collectors"].get(
-          name, dict(active=True, interval=constants.MOND_TIME_INTERVAL * 1e6))
-
-
-def UpgradeGroups(config_data):
-  cl_ipolicy = config_data["cluster"].get("ipolicy")
-  for group in config_data["nodegroups"].values():
-    networks = group.get("networks", None)
-    if not networks:
-      group["networks"] = {}
-    ipolicy = group.get("ipolicy", None)
-    if ipolicy:
-      if cl_ipolicy is None:
-        raise Error("A group defines an instance policy but there is no"
-                    " instance policy at cluster level")
-      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
-
-
-def GetExclusiveStorageValue(config_data):
-  """Return a conservative value of the exclusive_storage flag.
-
-  Return C{True} if the cluster or at least a nodegroup have the flag set.
-
-  """
-  ret = False
-  cluster = config_data["cluster"]
-  ndparams = cluster.get("ndparams")
-  if ndparams is not None and ndparams.get("exclusive_storage"):
-    ret = True
-  for group in config_data["nodegroups"].values():
-    ndparams = group.get("ndparams")
-    if ndparams is not None and ndparams.get("exclusive_storage"):
-      ret = True
-  return ret
-
-
-def RemovePhysicalId(disk):
-  if "children" in disk:
-    for d in disk["children"]:
-      RemovePhysicalId(d)
-  if "physical_id" in disk:
-    del disk["physical_id"]
-
-
-def ChangeDiskDevType(disk, dev_type_map):
-  """Replaces disk's dev_type attributes according to the given map.
-
-  This can be used for both, up or downgrading the disks.
-  """
-  if disk["dev_type"] in dev_type_map:
-    disk["dev_type"] = dev_type_map[disk["dev_type"]]
-  if "children" in disk:
-    for child in disk["children"]:
-      ChangeDiskDevType(child, dev_type_map)
-
-
-def UpgradeDiskDevType(disk):
-  """Upgrades the disks' device type."""
-  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
-
-
-def _ConvertNicNameToUuid(iobj, network2uuid):
-  for nic in iobj["nics"]:
-    name = nic.get("network", None)
-    if name:
-      uuid = network2uuid.get(name, None)
-      if uuid:
-        print("NIC with network name %s found."
-              " Substituting with uuid %s." % (name, uuid))
-        nic["network"] = uuid
-
-
-def AssignUuid(disk):
-  if not "uuid" in disk:
-    disk["uuid"] = utils.io.NewUUID()
-  if "children" in disk:
-    for d in disk["children"]:
-      AssignUuid(d)
-
-
-def _ConvertDiskAndCheckMissingSpindles(iobj, instance):
-  missing_spindles = False
-  if "disks" not in iobj:
-    raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
-  disks = iobj["disks"]
-  if not all(isinstance(d, str) for d in disks):
-    #  Disks are not top level citizens
-    for idx, dobj in enumerate(disks):
-      RemovePhysicalId(dobj)
-
-      expected = "disk/%s" % idx
-      current = dobj.get("iv_name", "")
-      if current != expected:
-        logging.warning("Updating iv_name for instance %s/disk %s"
-                        " from '%s' to '%s'",
-                        instance, idx, current, expected)
-        dobj["iv_name"] = expected
-
-      if "dev_type" in dobj:
-        UpgradeDiskDevType(dobj)
-
-      if not "spindles" in dobj:
-        missing_spindles = True
-
-      AssignUuid(dobj)
-  return missing_spindles
-
-
-def UpgradeInstances(config_data):
-  """Upgrades the instances' configuration."""
-
-  network2uuid = dict((n["name"], n["uuid"])
-                      for n in config_data["networks"].values())
-  if "instances" not in config_data:
-    raise Error("Can't find the 'instances' key in the configuration!")
-
-  missing_spindles = False
-  for instance, iobj in config_data["instances"].items():
-    _ConvertNicNameToUuid(iobj, network2uuid)
-    if _ConvertDiskAndCheckMissingSpindles(iobj, instance):
-      missing_spindles = True
-    if "admin_state_source" not in iobj:
-      iobj["admin_state_source"] = constants.ADMIN_SOURCE
-
-  if GetExclusiveStorageValue(config_data) and missing_spindles:
-    # We cannot be sure that the instances that are missing spindles have
-    # exclusive storage enabled (the check would be more complicated), so we
-    # give a noncommittal message
-    logging.warning("Some instance disks could be needing to update the"
-                    " spindles parameter; you can check by running"
-                    " 'gnt-cluster verify', and fix any problem with"
-                    " 'gnt-cluster repair-disk-sizes'")
-
-
-def UpgradeRapiUsers():
-  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
-      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
-    if os.path.exists(options.RAPI_USERS_FILE):
-      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
-                  " already exists at %s" %
-                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
-    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
-                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
-    if not options.dry_run:
-      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
-                       mkdir=True, mkdir_mode=0750)
-
-  # Create a symlink for RAPI users file
-  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
-           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
-      os.path.isfile(options.RAPI_USERS_FILE)):
-    logging.info("Creating symlink from %s to %s",
-                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
-    if not options.dry_run:
-      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)
-
-
-def UpgradeWatcher():
-  # Remove old watcher state file if it exists
-  if os.path.exists(options.WATCHER_STATEFILE):
-    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
-    if not options.dry_run:
-      utils.RemoveFile(options.WATCHER_STATEFILE)
-
-
-def UpgradeFileStoragePaths(config_data):
-  # Write file storage paths
-  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
-    cluster = config_data["cluster"]
-    file_storage_dir = cluster.get("file_storage_dir")
-    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
-    del cluster
-
-    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
-                 " for file storage; writing existing configuration values"
-                 " into '%s'",
-                 options.FILE_STORAGE_PATHS_FILE)
-
-    if file_storage_dir:
-      logging.info("File storage directory: %s", file_storage_dir)
-    if shared_file_storage_dir:
-      logging.info("Shared file storage directory: %s",
-                   shared_file_storage_dir)
-
-    buf = StringIO()
-    buf.write("# List automatically generated from configuration by\n")
-    buf.write("# cfgupgrade at %s\n" % time.asctime())
-    if file_storage_dir:
-      buf.write("%s\n" % file_storage_dir)
-    if shared_file_storage_dir:
-      buf.write("%s\n" % shared_file_storage_dir)
-    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
-                    data=buf.getvalue(),
-                    mode=0600,
-                    dry_run=options.dry_run,
-                    backup=True)
-
-
-def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
-  if old_key not in nodes_by_old_key:
-    logging.warning("Can't find node '%s' in configuration, assuming that it's"
-                    " already up-to-date", old_key)
-    return old_key
-  return nodes_by_old_key[old_key][new_key_field]
-
-
-def ChangeNodeIndices(config_data, old_key_field, new_key_field):
-  def ChangeDiskNodeIndices(disk):
-    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
-    # considered when up/downgrading from/to any versions touching 2.9 on the
-    # way.
-    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
-    if disk["dev_type"] in drbd_disk_types:
-      for i in range(0, 2):
-        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
-                                                disk["logical_id"][i],
-                                                new_key_field)
-    if "children" in disk:
-      for child in disk["children"]:
-        ChangeDiskNodeIndices(child)
-
-  nodes_by_old_key = {}
-  nodes_by_new_key = {}
-  for (_, node) in config_data["nodes"].items():
-    nodes_by_old_key[node[old_key_field]] = node
-    nodes_by_new_key[node[new_key_field]] = node
-
-  config_data["nodes"] = nodes_by_new_key
-
-  cluster = config_data["cluster"]
-  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
-                                           cluster["master_node"],
-                                           new_key_field)
-
-  for inst in config_data["instances"].values():
-    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
-                                           inst["primary_node"],
-                                           new_key_field)
-
-  for disk in config_data["disks"].values():
-    ChangeDiskNodeIndices(disk)
-
-
-def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
-  insts_by_old_key = {}
-  insts_by_new_key = {}
-  for (_, inst) in config_data["instances"].items():
-    insts_by_old_key[inst[old_key_field]] = inst
-    insts_by_new_key[inst[new_key_field]] = inst
-
-  config_data["instances"] = insts_by_new_key
-
-
-def UpgradeNodeIndices(config_data):
-  ChangeNodeIndices(config_data, "name", "uuid")
-
-
-def UpgradeInstanceIndices(config_data):
-  ChangeInstanceIndices(config_data, "name", "uuid")
-
-
-def UpgradeFilters(config_data):
-  filters = config_data.get("filters", None)
-  if not filters:
-    config_data["filters"] = {}
-
-
-def UpgradeTopLevelDisks(config_data):
-  """Upgrades the disks as config top level citizens."""
-  if "instances" not in config_data:
-    raise Error("Can't find the 'instances' key in the configuration!")
-
-  if "disks" in config_data:
-    # Disks are already top level citizens
-    return
-
-  config_data["disks"] = dict()
-  for iobj in config_data["instances"].values():
-    disk_uuids = []
-    for disk in iobj["disks"]:
-      duuid = disk["uuid"]
-      disk["serial_no"] = 1
-      # Instances may not have the ctime value, and the Haskell serialization
-      # will have set it to zero.
-      disk["ctime"] = disk["mtime"] = iobj.get("ctime", 0)
-      config_data["disks"][duuid] = disk
-      disk_uuids.append(duuid)
-    iobj["disks"] = disk_uuids
-
-
-def UpgradeAll(config_data):
-  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
-  UpgradeRapiUsers()
-  UpgradeWatcher()
-  UpgradeFileStoragePaths(config_data)
-  UpgradeNetworks(config_data)
-  UpgradeCluster(config_data)
-  UpgradeGroups(config_data)
-  UpgradeInstances(config_data)
-  UpgradeTopLevelDisks(config_data)
-  UpgradeNodeIndices(config_data)
-  UpgradeInstanceIndices(config_data)
-  UpgradeFilters(config_data)
-
-
-# DOWNGRADE ------------------------------------------------------------
-
-def DowngradeExtAccess(config_data):
-  # Remove 'access' for ext storage from cluster diskparams
-  cluster_extparams = config_data["cluster"]["diskparams"].get("ext", None)
-  if (cluster_extparams is not None and
-      "access" in cluster_extparams):
-    del cluster_extparams["access"]
-
-  # Remove 'access' for ext storage from nodegroup diskparams
-  for group in config_data["nodegroups"].values():
-    group_extparams = group["diskparams"].get("ext", None)
-    if (group_extparams is not None and
-        "access" in group_extparams):
-      del group_extparams["access"]
-
-
-def DowngradeDataCollectors(config_data):
-  cluster = config_data["cluster"]
-  if "data_collectors" in cluster:
-    del cluster["data_collectors"]
-
-
-def DowngradeFilters(config_data):
-  if "filters" in config_data:
-    del config_data["filters"]
-
-
-def DowngradeLxcParams(hvparams):
-  hv = "lxc"
-  if hv not in hvparams:
-    return
-
-  params_to_del = [
-    "devices",
-    "drop_capabilities",
-    "extra_cgroups",
-    "extra_config",
-    "num_ttys",
-    "startup_timeout",
-  ]
-  for param in params_to_del:
-    if param in hvparams[hv]:
-      del hvparams[hv][param]
-
-
-def DowngradeAllLxcParams(config_data):
-  cluster = config_data["cluster"]
-  if "hvparams" in cluster:
-    DowngradeLxcParams(cluster["hvparams"])
-
-  for iobj in cluster.get("instances", {}):
-    if "hvparams" in iobj:
-      DowngradeLxcParams(iobj["hvparams"])
-
-
-def DowngradeAll(config_data):
-  # Any code specific to a particular version should be labeled that way, so
-  # it can be removed when updating to the next version.
-  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
-                                                DOWNGRADE_MINOR, 0)
-  DowngradeExtAccess(config_data)
-  DowngradeDataCollectors(config_data)
-  DowngradeFilters(config_data)
-  DowngradeAllLxcParams(config_data)
-
-
-def _ParseOptions():
-  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
-  parser.add_option("--dry-run", dest="dry_run",
-                    action="store_true",
-                    help="Try to do the conversion, but don't write"
-                         " output file")
-  parser.add_option(cli.FORCE_OPT)
-  parser.add_option(cli.DEBUG_OPT)
-  parser.add_option(cli.VERBOSE_OPT)
-  parser.add_option("--ignore-hostname", dest="ignore_hostname",
-                    action="store_true", default=False,
-                    help="Don't abort if hostname doesn't match")
-  parser.add_option("--path", help="Convert configuration in this"
-                    " directory instead of '%s'" % pathutils.DATA_DIR,
-                    default=pathutils.DATA_DIR, dest="data_dir")
-  parser.add_option("--confdir",
-                    help=("Use this directory instead of '%s'" %
-                          pathutils.CONF_DIR),
-                    default=pathutils.CONF_DIR, dest="conf_dir")
-  parser.add_option("--no-verify",
-                    help="Do not verify configuration after upgrade",
-                    action="store_true", dest="no_verify", default=False)
-  parser.add_option("--downgrade",
-                    help="Downgrade to the previous stable version",
-                    action="store_true", dest="downgrade", default=False)
-  return parser.parse_args()
-
-
-def _ComposePaths():
-  # We need to keep filenames locally because they might be renamed between
-  # versions.
-  options.data_dir = os.path.abspath(options.data_dir)
-  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
-  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
-  options.CLIENT_PEM_PATH = options.data_dir + "/client.pem"
-  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
-  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
-  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
-  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
-  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
-  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
-  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
-  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
-  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
-  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
-  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
-
-
-def _AskUser():
-  if not options.force:
-    if options.downgrade:
-      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
-                  " Some configuration data might be removed if they don't fit"
-                  " in the old format. Please make sure you have read the"
-                  " upgrade notes (available in the UPGRADE file and included"
-                  " in other documentation formats) to understand what they"
-                  " are. Continue with *DOWNGRADING* the configuration?" %
-                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
-    else:
-      usertext = ("Please make sure you have read the upgrade notes for"
-                  " Ganeti %s (available in the UPGRADE file and included"
-                  " in other documentation formats). Continue with upgrading"
-                  " configuration?" % constants.RELEASE_VERSION)
-    if not cli.AskUser(usertext):
-      sys.exit(constants.EXIT_FAILURE)
-
-
-def _Downgrade(config_major, config_minor, config_version, config_data,
-               config_revision):
-  if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
-          (config_major == DOWNGRADE_MAJOR and
-           config_minor == DOWNGRADE_MINOR)):
-    raise Error("Downgrade supported only from the latest version (%s.%s),"
-                " found %s (%s.%s.%s) instead" %
-                (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
-                 config_minor, config_revision))
-  DowngradeAll(config_data)
-
-
-def _TestLoadingConfigFile():
-  # test loading the config file
-  all_ok = True
-  if not (options.dry_run or options.no_verify):
-    logging.info("Testing the new config file...")
-    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
-                              accept_foreign=options.ignore_hostname,
-                              offline=True)
-    # if we reached this, it's all fine
-    vrfy = cfg.VerifyConfig()
-    if vrfy:
-      logging.error("Errors after conversion:")
-      for item in vrfy:
-        logging.error(" - %s", item)
-      all_ok = False
-    else:
-      logging.info("File loaded successfully after upgrading")
-    del cfg
-
-  if options.downgrade:
-    action = "downgraded"
-    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
-  else:
-    action = "upgraded"
-    out_ver = constants.RELEASE_VERSION
-  if all_ok:
-    cli.ToStderr("Configuration successfully %s to version %s.",
-                 action, out_ver)
-  else:
-    cli.ToStderr("Configuration %s to version %s, but there are errors."
-                 "\nPlease review the file.", action, out_ver)
-
-
-def main():
-  """Main program.
-
-  """
-  global options, args # pylint: disable=W0603
-
-  (options, args) = _ParseOptions()
-  _ComposePaths()
-
-  SetupLogging()
-
-  # Option checking
-  if args:
-    raise Error("No arguments expected")
-  if options.downgrade and not options.no_verify:
-    options.no_verify = True
-
-  # Check master name
-  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
-    logging.error("Aborting due to hostname mismatch")
-    sys.exit(constants.EXIT_FAILURE)
-
-  _AskUser()
-
-  # Check whether it's a Ganeti configuration directory
-  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
-          os.path.isfile(options.SERVER_PEM_PATH) and
-          os.path.isfile(options.KNOWN_HOSTS_PATH)):
-    raise Error(("%s does not seem to be a Ganeti configuration"
-                 " directory") % options.data_dir)
-
-  if not os.path.isdir(options.conf_dir):
-    raise Error("Not a directory: %s" % options.conf_dir)
-
-  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
-
-  try:
-    config_version = config_data["version"]
-  except KeyError:
-    raise Error("Unable to determine configuration version")
-
-  (config_major, config_minor, config_revision) = \
-    version.SplitVersion(config_version)
-
-  logging.info("Found configuration version %s (%d.%d.%d)",
-               config_version, config_major, config_minor, config_revision)
-
-  if "config_version" in config_data["cluster"]:
-    raise Error("Inconsistent configuration: found config_version in"
-                " configuration file")
-
-  # Downgrade to the previous stable version
-  if options.downgrade:
-    _Downgrade(config_major, config_minor, config_version, config_data,
-               config_revision)
-
-  # Upgrade from 2.{0..12} to 2.13
-  elif config_major == 2 and config_minor in range(0, 13):
-    if config_revision != 0:
-      logging.warning("Config revision is %s, not 0", config_revision)
-    UpgradeAll(config_data)
-
-  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
-    logging.info("No changes necessary")
-
-  else:
-    raise Error("Configuration version %d.%d.%d not supported by this tool" %
-                (config_major, config_minor, config_revision))
-
-  try:
-    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
-    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
-                    data=serializer.DumpJson(config_data),
-                    mode=0600,
-                    dry_run=options.dry_run,
-                    backup=True)
-
-    if not options.dry_run:
-      # This creates the cluster certificate if it does not exist yet.
-      # In this case, we do not automatically create a client certificate
-      # as well, because if the cluster certificate did not exist before,
-      # no client certificate will exist on any node yet. In this case
-      # all client certificate should be renewed by 'gnt-cluster
-      # renew-crypto --new-node-certificates'. This will be enforced
-      # by a nagging warning in 'gnt-cluster verify'.
-      bootstrap.GenerateClusterCrypto(
-        False, False, False, False, False, False, None,
-        nodecert_file=options.SERVER_PEM_PATH,
-        rapicert_file=options.RAPI_CERT_FILE,
-        spicecert_file=options.SPICE_CERT_FILE,
-        spicecacert_file=options.SPICE_CACERT_FILE,
-        hmackey_file=options.CONFD_HMAC_KEY,
-        cds_file=options.CDS_FILE)
-
-  except Exception:
-    logging.critical("Writing configuration failed. It is probably in an"
-                     " inconsistent state and needs manual intervention.")
-    raise
-
-  _TestLoadingConfigFile()
+from ganeti.tools.cfgupgrade import CfgUpgrade, Error, ParseOptions
 
 
 if __name__ == "__main__":
-  main()
+  opts, args = ParseOptions()
+  try:
+    CfgUpgrade(opts, args).Run()
+  except Error as e:
+    if opts.debug:
+      # If debugging, we want to see the original stack trace.
+      raise
+    else:
+      # Else silence it for the sake of convenience.
+      raise SystemExit(e)
diff --git a/tools/move-instance b/tools/move-instance
index 728b3bf..8913f62 100755
--- a/tools/move-instance
+++ b/tools/move-instance
@@ -468,6 +468,12 @@
     @param remote_import_fn: Callback for reporting received remote import
                              information
 
+    @return: opreturn of the move job
+    @raise errors.JobLost: If job can't be found
+    @raise errors.OpExecError: If job didn't succeed
+
+    @see: L{ganeti.rapi.client_utils.PollJob}
+
     """
     return rapi.client_utils.PollJob(cl, job_id,
                                      MoveJobPollReportCb(self.CheckAbort,
@@ -697,9 +703,6 @@
     logging.info("Retrieving instance information from source cluster")
     instinfo = self._GetInstanceInfo(src_client, mrt.PollJob,
                                      mrt.move.src_instance_name)
-    if instinfo["disk_template"] in constants.DTS_FILEBASED:
-      raise Error("Inter-cluster move of file-based instances is not"
-                  " supported.")
 
     logging.info("Preparing export on source cluster")
     expinfo = self._PrepareExport(src_client, mrt.PollJob,