← Back to team overview

credativ team mailing list archive

[Merge] lp:~0k.io/openupgrade-server/8.0-openupgrade-base-2 into lp:openupgrade-server/8.0

 

Valentin Lab has proposed merging lp:~0k.io/openupgrade-server/8.0-openupgrade-base-2 into lp:openupgrade-server/8.0.

Requested reviews:
  OpenUpgrade Committers (openupgrade-committers)

For more details, see:
https://code.launchpad.net/~0k.io/openupgrade-server/8.0-openupgrade-base-2/+merge/214770
-- 
https://code.launchpad.net/~0k.io/openupgrade-server/8.0-openupgrade-base-2/+merge/214770
Your team OpenUpgrade Committers is requested to review the proposed merge of lp:~0k.io/openupgrade-server/8.0-openupgrade-base-2 into lp:openupgrade-server/8.0.
=== modified file 'openerp/addons/base/ir/ir_model.py'
--- openerp/addons/base/ir/ir_model.py	2014-03-12 18:06:14 +0000
+++ openerp/addons/base/ir/ir_model.py	2014-04-08 14:07:20 +0000
@@ -35,6 +35,8 @@
 from openerp.tools.translate import _
 from openerp.osv.orm import except_orm, browse_record, MAGIC_COLUMNS
 
+from openerp.openupgrade import openupgrade_log, openupgrade
+
 _logger = logging.getLogger(__name__)
 
 MODULE_UNINSTALL_FLAG = '_force_unlink'
@@ -145,6 +147,11 @@
 
     def _drop_table(self, cr, uid, ids, context=None):
         for model in self.browse(cr, uid, ids, context):
+            # OpenUpgrade: do not run the new table cleanup
+            openupgrade.message(
+                cr, 'Unknown', False, False,
+                "Not dropping the table or view of model %s", model.model)
+            continue
             model_pool = self.pool[model.model]
             cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
             result = cr.fetchone()
@@ -304,6 +311,12 @@
         for field in self.browse(cr, uid, ids, context):
             if field.name in MAGIC_COLUMNS:
                 continue
+            # OpenUpgrade: do not run the new column cleanup
+            openupgrade.message(
+                cr, 'Unknown', False, False,
+                "Not dropping the column of field %s of model %s", field.name, field.model)
+            continue
+
             model = self.pool[field.model]
             cr.execute('select relkind from pg_class where relname=%s', (model._table,))
             result = cr.fetchone()
@@ -951,6 +964,10 @@
         return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
 
     def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
+        #OpenUpgrade: log entry (used in csv import)
+        if xml_id:
+            openupgrade_log.log_xml_id(cr, module, xml_id)
+
         model_obj = self.pool[model]
         if not context:
             context = {}
@@ -1177,7 +1194,17 @@
             for (model, res_id) in to_unlink:
                 if model in self.pool:
                     _logger.info('Deleting %s@%s', res_id, model)
-                    self.pool[model].unlink(cr, uid, [res_id])
+                    try:
+                        cr.execute('SAVEPOINT ir_model_data_delete');
+                        self.pool[model].unlink(cr, uid, [res_id])
+                        cr.execute('RELEASE SAVEPOINT ir_model_data_delete')
+                    except Exception:
+                        cr.execute('ROLLBACK TO SAVEPOINT ir_model_data_delete');
+                        _logger.warning(
+                            'Could not delete obsolete record with id: %d of model %s\n'
+                            'Please refer to the log message right above',
+                            res_id, model)
+
 
 class wizard_model_menu(osv.osv_memory):
     _name = 'wizard.ir.model.menu.create'

=== modified file 'openerp/addons/base/res/res_currency.py'
--- openerp/addons/base/res/res_currency.py	2013-11-15 13:25:53 +0000
+++ openerp/addons/base/res/res_currency.py	2014-04-08 14:07:20 +0000
@@ -106,9 +106,17 @@
         # we would allow duplicate "global" currencies (all having company_id == NULL) 
         cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'res_currency_unique_name_company_id_idx'""")
         if not cr.fetchone():
-            cr.execute("""CREATE UNIQUE INDEX res_currency_unique_name_company_id_idx
-                          ON res_currency
-                          (name, (COALESCE(company_id,-1)))""")
+            try:
+                cr.execute('SAVEPOINT index_currency');
+                cr.execute("""CREATE UNIQUE INDEX res_currency_unique_name_company_id_idx
+                           ON res_currency
+                           (name, (COALESCE(company_id,-1)))""")
+                cr.execute('RELEASE SAVEPOINT index_currency');
+            except Exception, e:
+                cr.execute('ROLLBACK TO SAVEPOINT index_currency');
+                import logging
+                logging.getLogger('OpenUpgrade').debug(
+                    'Could not create currency unique index: %s', e)
 
     def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
         res = super(res_currency, self).read(cr, user, ids, fields, context, load)

=== modified file 'openerp/modules/graph.py'
--- openerp/modules/graph.py	2013-03-27 17:06:39 +0000
+++ openerp/modules/graph.py	2014-04-08 14:07:20 +0000
@@ -92,12 +92,23 @@
             force = []
         packages = []
         len_graph = len(self)
+
+        # force additional dependencies for the upgrade process if given
+        # in config file
+        forced_deps = tools.config.get_misc('openupgrade', 'force_deps', '{}')
+        forced_deps = tools.config.get_misc('openupgrade',
+                                            'force_deps_' + release.version,
+                                            forced_deps)
+        forced_deps = tools.safe_eval.safe_eval(forced_deps)
+
         for module in module_list:
             # This will raise an exception if no/unreadable descriptor file.
             # NOTE The call to load_information_from_description_file is already
             # done by db.initialize, so it is possible to not do it again here.
             info = openerp.modules.module.load_information_from_description_file(module)
+
             if info and info['installable']:
+                info['depends'].extend(forced_deps.get(module, []))
                 packages.append((module, info)) # TODO directly a dict, like in get_modules_with_version
             else:
                 _logger.warning('module %s: not installable, skipped', module)

=== modified file 'openerp/modules/loading.py'
--- openerp/modules/loading.py	2014-03-17 15:18:10 +0000
+++ openerp/modules/loading.py	2014-04-08 14:07:20 +0000
@@ -43,11 +43,13 @@
 from openerp.modules.module import initialize_sys_path, \
     load_openerp_module, init_module_models, adapt_version
 
+from openerp.openupgrade import openupgrade_loading
+
 _logger = logging.getLogger(__name__)
 _test_logger = logging.getLogger('openerp.tests')
 
 
-def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None):
+def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None, upg_registry=None):
     """Migrates+Updates or Installs all module nodes from ``graph``
        :param graph: graph of module nodes to load
        :param status: status dictionary for keeping track of progress
@@ -121,6 +123,9 @@
     if status is None:
         status = {}
 
+    if skip_modules is None:
+        skip_modules = []
+
     processed_modules = []
     loaded_modules = []
     registry = openerp.registry(cr.dbname)
@@ -135,12 +140,18 @@
     for field in cr.dictfetchall():
         registry.fields_by_model.setdefault(field['model'], []).append(field)
 
+    #suppress commits to have the upgrade of one module in just one transation
+    cr.commit_org = cr.commit
+    cr.commit = lambda *args: None
+    cr.rollback_org = cr.rollback
+    cr.rollback = lambda *args: None
+
     # register, instantiate and initialize models for each modules
     for index, package in enumerate(graph):
         module_name = package.name
         module_id = package.id
 
-        if skip_modules and module_name in skip_modules:
+        if module_name in skip_modules or module_name in loaded_modules:
             continue
 
         _logger.debug('module %s: loading objects', package.name)
@@ -151,6 +162,13 @@
 
         loaded_modules.append(package.name)
         if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
+            # OpenUpgrade: add this module's models to the registry
+            local_registry = {}
+            for model in models:
+                openupgrade_loading.log_model(model, local_registry)
+            openupgrade_loading.compare_registries(
+                cr, package.name, upg_registry, local_registry)
+
             init_module_models(cr, package.name, models)
         registry._init_modules.add(package.name)
         status['progress'] = float(index) / len(graph)
@@ -179,7 +197,13 @@
                 _load_data(cr, module_name, idref, mode, kind='demo')
                 cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id))
 
-            migrations.migrate_module(package, 'post')
+            # OpenUpgrade: add 'try' block for logging exceptions
+            # as errors in post scripts seem to be dropped
+            try:
+                migrations.migrate_module(package, 'post')
+            except Exception, e:
+                _logger.error('Error executing post migration script for module %s: %s', package, e)
+                raise
 
             if has_demo:
                 # launch tests only in demo mode, allowing tests to use demo data.
@@ -206,12 +230,13 @@
                 if hasattr(package, kind):
                     delattr(package, kind)
 
-        cr.commit()
+        cr.commit_org()
 
     # The query won't be valid for models created later (i.e. custom model
     # created after the registry has been loaded), so empty its result.
     registry.fields_by_model = None
-    
+
+    cr.commit = cr.commit_org
     cr.commit()
 
     return loaded_modules, processed_modules
@@ -230,16 +255,17 @@
             incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()])
             _logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
 
-def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules, perform_checks):
+def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules, perform_checks, upg_registry):
     """Loads modules marked with ``states``, adding them to ``graph`` and
        ``loaded_modules`` and returns a list of installed/upgraded modules."""
     processed_modules = []
     while True:
         cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),))
         module_list = [name for (name,) in cr.fetchall() if name not in graph]
+        module_list = openupgrade_loading.add_module_dependencies(cr, module_list)
         graph.add_modules(cr, module_list, force)
         _logger.debug('Updating graph with %d more modules', len(module_list))
-        loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules, perform_checks=perform_checks)
+        loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules, perform_checks=perform_checks, upg_registry=upg_registry)
         processed_modules.extend(processed)
         loaded_modules.extend(loaded)
         if not processed: break
@@ -255,6 +281,7 @@
     if force_demo:
         force.append('demo')
 
+    upg_registry = {}
     cr = db.cursor()
     try:
         if not openerp.modules.db.is_initialized(cr):
@@ -282,7 +309,7 @@
         # processed_modules: for cleanup step after install
         # loaded_modules: to avoid double loading
         report = registry._assertion_report
-        loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=update_module, report=report)
+        loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=update_module, report=report, upg_registry=upg_registry)
 
         if tools.config['load_language']:
             for lang in tools.config['load_language'].split(','):
@@ -331,11 +358,11 @@
             previously_processed = len(processed_modules)
             processed_modules += load_marked_modules(cr, graph,
                 ['installed', 'to upgrade', 'to remove'],
-                force, status, report, loaded_modules, update_module)
+                force, status, report, loaded_modules, update_module, upg_registry)
             if update_module:
                 processed_modules += load_marked_modules(cr, graph,
                     ['to install'], force, status, report,
-                    loaded_modules, update_module)
+                    loaded_modules, update_module, upg_registry)
 
         # load custom models
         cr.execute('select model from ir_model where state=%s', ('manual',))

=== modified file 'openerp/modules/migration.py'
--- openerp/modules/migration.py	2014-01-10 16:27:05 +0000
+++ openerp/modules/migration.py	2014-04-08 14:07:20 +0000
@@ -165,14 +165,16 @@
                         try:
                             mod = imp.load_source(name, pyfile, fp2)
                             _logger.info('module %(addon)s: Running migration %(version)s %(name)s' % mergedict({'name': mod.__name__}, strfmt))
-                            migrate = mod.migrate
                         except ImportError:
                             _logger.exception('module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % mergedict({'file': pyfile}, strfmt))
                             raise
-                        except AttributeError:
+
+                        _logger.info('module %(addon)s: Running migration %(version)s %(name)s' % mergedict({'name': mod.__name__}, strfmt))
+
+                        if hasattr(mod, 'migrate'):
+                            mod.migrate(self.cr, pkg.installed_version)
+                        else:
                             _logger.error('module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt)
-                        else:
-                            migrate(self.cr, pkg.installed_version)
                     finally:
                         if fp:
                             fp.close()

=== added directory 'openerp/openupgrade'
=== added file 'openerp/openupgrade/__init__.py'
=== added file 'openerp/openupgrade/openupgrade.py'
--- openerp/openupgrade/openupgrade.py	1970-01-01 00:00:00 +0000
+++ openerp/openupgrade/openupgrade.py	2014-04-08 14:07:20 +0000
@@ -0,0 +1,479 @@
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+#    OpenERP, Open Source Management Solution
+#    This module copyright (C) 2011-2013 Therp BV (<http://therp.nl>)
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU Affero General Public License as
+#    published by the Free Software Foundation, either version 3 of the
+#    License, or (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU Affero General Public License for more details.
+#
+#    You should have received a copy of the GNU Affero General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+##############################################################################
+
+import os
+import inspect
+import logging
+from openerp import release, osv, pooler, tools, SUPERUSER_ID
+import openupgrade_tools
+
+# The server log level has not been set at this point
+# so to log at loglevel debug we need to set it
+# manually here. As a consequence, DEBUG messages from
+# this file are always logged
+logger = logging.getLogger('OpenUpgrade')
+logger.setLevel(logging.DEBUG)
+
+__all__ = [
+    'migrate',
+    'load_data',
+    'rename_columns',
+    'rename_tables',
+    'rename_models',
+    'rename_xmlids',
+    'drop_columns',
+    'delete_model_workflow',
+    'warn_possible_dataloss',
+    'set_defaults',
+    'logged_query',
+    'column_exists',
+    'table_exists',
+    'update_module_names',
+    'add_ir_model_fields',
+    'get_legacy_name',
+    'm2o_to_m2m',
+    'message',
+]    
+
+def load_data(cr, module_name, filename, idref=None, mode='init'):
+    """
+    Load an xml or csv data file from your post script. The usual case for this is the
+    occurrence of newly added essential or useful data in the module that is
+    marked with "noupdate='1'" and without "forcecreate='1'" so that it will
+    not be loaded by the usual upgrade mechanism. Leaving the 'mode' argument to
+    its default 'init' will load the data from your migration script.
+    
+    Theoretically, you could simply load a stock file from the module, but be 
+    careful not to reinitialize any data that could have been customized.
+    Preferably, select only the newly added items. Copy these to a file
+    in your migrations directory and load that file.
+    Leave it to the user to actually delete existing resources that are
+    marked with 'noupdate' (other named items will be deleted
+    automatically).
+
+
+    :param module_name: the name of the module
+    :param filename: the path to the filename, relative to the module \
+    directory.
+    :param idref: optional hash with ?id mapping cache?
+    :param mode: one of 'init', 'update', 'demo'. Always use 'init' for adding new items \
+    from files that are marked with 'noupdate'. Defaults to 'init'.
+
+    """
+
+    if idref is None:
+        idref = {}
+    logger.info('%s: loading %s' % (module_name, filename))
+    _, ext = os.path.splitext(filename)
+    pathname = os.path.join(module_name, filename)
+    fp = tools.file_open(pathname)
+    try:
+        if ext == '.csv':
+            noupdate = True
+            tools.convert_csv_import(cr, module_name, pathname, fp.read(), idref, mode, noupdate)
+        else:
+            tools.convert_xml_import(cr, module_name, fp, idref, mode=mode)
+    finally:
+        fp.close()
+
+# for backwards compatibility
+load_xml = load_data
+table_exists = openupgrade_tools.table_exists
+
+def rename_columns(cr, column_spec):
+    """
+    Rename table columns. Typically called in the pre script.
+
+    :param column_spec: a hash with table keys, with lists of tuples as values. \
+    Tuples consist of (old_name, new_name). Use None for new_name to trigger a \
+    conversion of old_name using get_legacy_name()
+    """
+    for table in column_spec.keys():
+        for (old, new) in column_spec[table]:
+            if new is None:
+                new = get_legacy_name(old)
+            logger.info("table %s, column %s: renaming to %s",
+                     table, old, new)
+            cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (table, old, new,))
+            cr.execute('DROP INDEX IF EXISTS "%s_%s_index"' % (table, old))
+
+def rename_tables(cr, table_spec):
+    """
+    Rename tables. Typically called in the pre script.
+    This function also renames the id sequence if it exists and if it is
+    not modified in the same run.
+
+    :param table_spec: a list of tuples (old table name, new table name).
+
+    """
+    # Append id sequences
+    to_rename = [x[0] for x in table_spec]
+    for old, new in list(table_spec):
+        if (table_exists(cr, old + '_id_seq') and
+            old + '_id_seq' not in to_rename): 
+            table_spec.append((old + '_id_seq', new + '_id_seq'))
+    for (old, new) in table_spec:
+        logger.info("table %s: renaming to %s",
+                    old, new)
+        cr.execute('ALTER TABLE "%s" RENAME TO "%s"' % (old, new,))
+
+def rename_models(cr, model_spec):
+    """
+    Rename models. Typically called in the pre script.
+    :param model_spec: a list of tuples (old model name, new model name).
+    
+    Use case: if a model changes name, but still implements equivalent
+    functionality you will want to update references in for instance
+    relation fields.
+
+    """
+    for (old, new) in model_spec:
+        logger.info("model %s: renaming to %s",
+                    old, new)
+        cr.execute('UPDATE ir_model SET model = %s '
+                   'WHERE model = %s', (new, old,))
+        cr.execute('UPDATE ir_model_fields SET relation = %s '
+                   'WHERE relation = %s', (new, old,))
+    # TODO: signal where the model occurs in references to ir_model
+
+def rename_xmlids(cr, xmlids_spec):
+    """
+    Rename XML IDs. Typically called in the pre script.
+    One usage example is when an ID changes module. In OpenERP 6 for example,
+    a number of res_groups IDs moved to module base from other modules (
+    although they were still being defined in their respective module).
+    """
+    for (old, new) in xmlids_spec:
+        if not old.split('.') or not new.split('.'):
+            logger.error(
+            'Cannot rename XMLID %s to %s: need the module '
+            'reference to be specified in the IDs' % (old, new))
+        else:
+            query = ("UPDATE ir_model_data SET module = %s, name = %s "
+                     "WHERE module = %s and name = %s")
+            logged_query(cr, query, tuple(new.split('.') + old.split('.')))
+
+def drop_columns(cr, column_spec):
+    """
+    Drop columns but perform an additional check if a column exists.
+    This covers the case of function fields that may or may not be stored.
+    Consider that this may not be obvious: an additional module can govern
+    a function fields' store properties.
+
+    :param column_spec: a list of (table, column) tuples
+    """
+    for (table, column) in column_spec:
+        logger.info("table %s: drop column %s",
+                    table, column)
+        if column_exists(cr, table, column):
+            cr.execute('ALTER TABLE "%s" DROP COLUMN "%s"' % 
+                       (table, column))
+        else:
+            logger.warn("table %s: column %s did not exist",
+                    table, column)
+
+def delete_model_workflow(cr, model):
+    """ 
+    Forcefully remove active workflows for obsolete models,
+    to prevent foreign key issues when the orm deletes the model.
+    """
+    logged_query(
+        cr,
+        "DELETE FROM wkf_workitem WHERE act_id in "
+        "( SELECT wkf_activity.id "
+        "  FROM wkf_activity, wkf "
+        "  WHERE wkf_id = wkf.id AND "
+        "  wkf.osv = %s"
+        ")", (model,))
+    logged_query(
+        cr,
+        "DELETE FROM wkf WHERE osv = %s", (model,))
+
+def warn_possible_dataloss(cr, pool, old_module, fields):
+    """
+    Use that function in the following case : 
+    if a field of a model was moved from a 'A' module to a 'B' module. 
+    ('B' depend on 'A'), 
+    This function will test if 'B' is installed. 
+    If not, count the number of different value and possibly warn the user.
+    Use orm, so call from the post script.
+    
+    :param old_module: name of the old module
+    :param fields: list of dictionary with the following keys :
+        'table' : name of the table where the field is.
+        'field' : name of the field that are moving.
+        'new_module' : name of the new module
+
+    .. versionadded:: 7.0
+    """
+    module_obj = pool.get('ir.module.module')
+    for field in fields: 
+        module_ids = module_obj.search(cr, SUPERUSER_ID, [
+                ('name', '=', field['new_module']),
+                ('state', 'in', ['installed', 'to upgrade', 'to install'])
+            ])
+        if not module_ids: 
+            cr.execute(
+                "SELECT count(*) FROM (SELECT %s from %s group by %s) "
+                "as tmp" % (
+                    field['field'], field['table'], field['field']))
+            row = cr.fetchone()
+            if row[0] == 1: 
+                # not a problem, that field wasn't used.
+                # Just a loss of functionality
+                logger.info(
+                    "Field '%s' from module '%s' was moved to module "
+                    "'%s' which is not installed: "
+                    "No dataloss detected, only loss of functionality"
+                    %(field['field'], old_module, field['new_module']))
+            else: 
+                # there is data loss after the migration.
+                message(
+                    cr, old_module,
+                    "Field '%s' was moved to module "
+                    "'%s' which is not installed: "
+                    "There were %s distinct values in this field.",
+                    field['field'], field['new_module'], row[0])
+
+def set_defaults(cr, pool, default_spec, force=False):
+    """
+    Set default value. Useful for fields that are newly required. Uses orm, so
+    call from the post script.
+    
+    :param default_spec: a hash with model names as keys. Values are lists of \
+    tuples (field, value). None as a value has a special meaning: it assigns \
+    the default value. If this value is provided by a function, the function is \
+    called as the user that created the resource.
+    :param force: overwrite existing values. To be used for assigning a non- \
+    default value (presumably in the case of a new column). The ORM assigns \
+    the default value as declared in the model in an earlier stage of the \
+    process. Beware of issues with resources loaded from new data that \
+    actually do require the model's default, in combination with the post \
+    script possible being run multiple times.
+    """
+
+    def write_value(ids, field, value):
+        logger.debug("model %s, field %s: setting default value of resources %s to %s",
+                 model, field, ids, unicode(value))
+        for res_id in ids:
+            # Iterating over ids here as a workaround for lp:1131653
+            obj.write(cr, SUPERUSER_ID, [res_id], {field: value})
+
+    for model in default_spec.keys():
+        obj = pool.get(model)
+        if not obj:
+            raise osv.except_osv("Migration: error setting default, no such model: %s" % model, "")
+
+        for field, value in default_spec[model]:
+            domain = not force and [(field, '=', False)] or []
+            ids = obj.search(cr, SUPERUSER_ID, domain)
+            if not ids:
+                continue
+            if value is None:
+                # Set the value by calling the _defaults of the object.
+                # Typically used for company_id on various models, and in that
+                # case the result depends on the user associated with the object.
+                # We retrieve create_uid for this purpose and need to call the _defaults
+                # function per resource. Otherwise, write all resources at once.
+                if field in obj._defaults:
+                    if not callable(obj._defaults[field]):
+                        write_value(ids, field, obj._defaults[field])
+                    else:
+                        # existence users is covered by foreign keys, so this is not needed
+                        # cr.execute("SELECT %s.id, res_users.id FROM %s LEFT OUTER JOIN res_users ON (%s.create_uid = res_users.id) WHERE %s.id IN %s" %
+                        #                     (obj._table, obj._table, obj._table, obj._table, tuple(ids),))
+                        cr.execute("SELECT id, COALESCE(create_uid, 1) FROM %s " % obj._table + "WHERE id in %s", (tuple(ids),))
+                        # Execute the function once per user_id
+                        user_id_map = {}
+                        for row in cr.fetchall():
+                            user_id_map.setdefault(row[1], []).append(row[0])
+                        for user_id in user_id_map:
+                            write_value(
+                                user_id_map[user_id], field,
+                                obj._defaults[field](obj, cr, user_id, None))
+                else:
+                    error = ("OpenUpgrade: error setting default, field %s with "
+                             "None default value not in %s' _defaults" % (
+                            field, model))
+                    logger.error(error)
+                    # this exeption seems to get lost in a higher up try block
+                    osv.except_osv("OpenUpgrade", error)
+            else:
+                write_value(ids, field, value)
+    
+def logged_query(cr, query, args=None):
+    """
+    Logs query and affected rows at level DEBUG
+    """
+    if args is None:
+        args = []
+    res = cr.execute(query, args)
+    logger.debug('Running %s', query % tuple(args))
+    logger.debug('%s rows affected', cr.rowcount)
+    return cr.rowcount
+
+def column_exists(cr, table, column):
+    """ Check whether a certain column exists """
+    cr.execute(
+        'SELECT count(attname) FROM pg_attribute '
+        'WHERE attrelid = '
+        '( SELECT oid FROM pg_class WHERE relname = %s ) '
+        'AND attname = %s',
+        (table, column));
+    return cr.fetchone()[0] == 1
+
+def update_module_names(cr, namespec):
+    """
+    Deal with changed module names of certified modules
+    in order to prevent  'certificate not unique' error,
+    as well as updating the module reference in the
+    XML id.
+    
+    :param namespec: tuple of (old name, new name)
+    """
+    for (old_name, new_name) in namespec:
+        query = ("UPDATE ir_module_module SET name = %s "
+                 "WHERE name = %s")
+        logged_query(cr, query, (new_name, old_name))
+        query = ("UPDATE ir_model_data SET module = %s "
+                 "WHERE module = %s ")
+        logged_query(cr, query, (new_name, old_name))
+        query = ("UPDATE ir_module_module_dependency SET name = %s "
+                 "WHERE name = %s")
+        logged_query(cr, query, (new_name, old_name))
+
+def add_ir_model_fields(cr, columnspec):
+    """
+    Typically, new columns on ir_model_fields need to be added in a very
+    early stage in the upgrade process of the base module, in raw sql
+    as they need to be in place before any model gets initialized.
+    Do not use for fields with additional SQL constraints, such as a
+    reference to another table or the cascade constraint, but craft your
+    own statement taking them into account.
+    
+    :param columnspec: tuple of (column name, column type)
+    """
+    for column in columnspec:
+        query = 'ALTER TABLE ir_model_fields ADD COLUMN %s %s' % (
+            column)
+        logged_query(cr, query, [])
+
+def get_legacy_name(original_name):
+    """
+    Returns a versioned name for legacy tables/columns/etc
+    Use this function instead of some custom name to avoid
+    collisions with future or past legacy tables/columns/etc
+
+    :param original_name: the original name of the column
+    :param version: current version as passed to migrate()
+    """
+    return 'openupgrade_legacy_'+('_').join(
+        map(str, release.version_info[0:2]))+'_'+original_name
+
+def m2o_to_m2m(cr, model, table, field, source_field):
+    """
+    Recreate relations in many2many fields that were formerly
+    many2one fields. Use rename_columns in your pre-migrate
+    script to retain the column's old value, then call m2o_to_m2m
+    in your post-migrate script.
+
+    :param model: The target model pool object
+    :param table: The source table
+    :param field: The field name of the target model
+    :param source_field: the many2one column on the source table.
+
+    .. versionadded:: 7.0
+    """
+    cr.execute('SELECT id, %(field)s '
+               'FROM %(table)s '
+               'WHERE %(field)s is not null' % {
+                   'table': table,
+                   'field': source_field,
+                   })
+    for row in cr.fetchall():
+        model.write(cr, SUPERUSER_ID, row[0], {field: [(4, row[1])]})
+
+def message(cr, module, table, column,
+            message, *args, **kwargs):
+    """
+    Log handler for non-critical notifications about the upgrade.
+    To be extended with logging to a table for reporting purposes.
+
+    :param module: the module name that the message concerns
+    :param table: the model that this message concerns (may be False, \
+    but preferably not if 'column' is defined)
+    :param column: the column that this message concerns (may be False)
+
+    .. versionadded:: 7.0
+    """
+    argslist = list(args or [])
+    prefix = ': '
+    if column:
+        argslist.insert(0, column)
+        prefix = ', column %s' + prefix
+    if table:
+        argslist.insert(0, table)
+        prefix = ', table %s' + prefix
+    argslist.insert(0, module)
+    prefix = 'Module %s' + prefix
+
+    logger.warn(prefix + message, *argslist, **kwargs)
+
+def migrate():
+    """
+    This is the decorator for the migrate() function
+    in migration scripts.
+    Return when the 'version' argument is not defined,
+    and log execeptions.
+    Retrieve debug context data from the frame above for
+    logging purposes.
+    """
+    def wrap(func):
+        def wrapped_function(cr, version):
+            stage =  'unknown'
+            module = 'unknown'
+            filename = 'unknown'
+            try:
+                frame = inspect.getargvalues(inspect.stack()[1][0])
+                stage = frame.locals['stage']
+                module = frame.locals['pkg'].name
+                filename = frame.locals['fp'].name
+            except Exception, e:
+                logger.error(
+                    "'migrate' decorator: failed to inspect "
+                    "the frame above: %s" % e)
+                pass
+            if not version:
+                return
+            logger.info(
+                "%s: %s-migration script called with version %s" %
+                (module, stage, version))
+            try:
+                # The actual function is called here
+                func(cr, version)
+            except Exception, e:
+                logger.error(
+                    "%s: error in migration script %s: %s" % 
+                    (module, filename, str(e).decode('utf8')))
+                logger.exception(e)
+                raise
+        return wrapped_function
+    return wrap

=== added file 'openerp/openupgrade/openupgrade_loading.py'
--- openerp/openupgrade/openupgrade_loading.py	1970-01-01 00:00:00 +0000
+++ openerp/openupgrade/openupgrade_loading.py	2014-04-08 14:07:20 +0000
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+#    OpenERP, Open Source Management Solution
+#    This module copyright (C) 2011-2012 Therp BV (<http://therp.nl>)
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU Affero General Public License as
+#    published by the Free Software Foundation, either version 3 of the
+#    License, or (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU Affero General Public License for more details.
+#
+#    You should have received a copy of the GNU Affero General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+##############################################################################
+
+import types
+from openerp import release
+from openerp.osv.orm import TransientModel
+from openerp.osv import fields
+from openerp.openupgrade.openupgrade import table_exists
+from openerp.tools import config, safe_eval
+
+# A collection of functions used in 
+# openerp/modules/loading.py
+
+def add_module_dependencies(cr, module_list):
+    """
+    Select (new) dependencies from the modules in the list
+    so that we can inject them into the graph at upgrade
+    time. Used in the modified OpenUpgrade Server,
+    not to be called from migration scripts
+
+    Also take the OpenUpgrade configuration directives 'forced_deps'
+    and 'autoinstall' into account. From any additional modules
+    that these directives can add, the dependencies are added as
+    well (but these directives are not checked for the occurrence
+    of any of the dependencies).
+    """
+    if not module_list:
+        return module_list
+
+    forced_deps = safe_eval.safe_eval(
+        config.get_misc(
+            'openupgrade', 'forced_deps_' + release.version, 
+            config.get_misc('openupgrade', 'forced_deps', '{}')))
+
+    autoinstall = safe_eval.safe_eval(
+        config.get_misc(
+            'openupgrade', 'autoinstall_' + release.version, 
+            config.get_misc('openupgrade', 'autoinstall', '{}')))
+
+    for module in list(module_list):
+        module_list += forced_deps.get(module, [])
+        module_list += autoinstall.get(module, [])
+
+    cr.execute("""
+        SELECT ir_module_module_dependency.name
+        FROM
+            ir_module_module,
+            ir_module_module_dependency
+        WHERE
+            module_id = ir_module_module.id
+            AND ir_module_module.name in %s
+        """, (tuple(module_list),))
+
+    return list(set(
+            module_list + [x[0] for x in cr.fetchall()]
+            ))
+
+def log_model(model, local_registry):
+    """
+    OpenUpgrade: Store the characteristics of the BaseModel and its fields
+    in the local registry, so that we can compare changes with the
+    main registry
+    """
+
+    if not model._name: # new in 6.1
+        return
+
+    # persistent models only
+    if isinstance(model, TransientModel):
+        return
+
+    model_registry = local_registry.setdefault(
+            model._name, {})
+    if model._inherits:
+        model_registry['_inherits'] = {'_inherits': unicode(model._inherits)}
+    for k, v in model._columns.items():
+        properties = { 
+            'type': v._type,
+            'isfunction': (
+                isinstance(v, fields.function) and 'function' or ''),
+            'relation': (
+                v._type in ('many2many', 'many2one','one2many')
+                and v._obj or ''
+                ),
+            'required': v.required and 'required' or '',
+            'selection_keys': '',
+            'req_default': '',
+            'inherits': '',
+            }
+        if v._type == 'selection':
+            if hasattr(v.selection, "__iter__"):
+                properties['selection_keys'] = unicode(
+                    sorted([x[0] for x in v.selection]))
+            else:
+                properties['selection_keys'] = 'function'
+        if v.required and k in model._defaults:
+            if isinstance(model._defaults[k], types.FunctionType):
+                # todo: in OpenERP 5 (and in 6 as well),
+                # literals are wrapped in a lambda function
+                properties['req_default'] = 'function'
+            else:
+                properties['req_default'] = unicode(
+                    model._defaults[k])
+        for key, value in properties.items():
+            if value:
+                model_registry.setdefault(k, {})[key] = value
+
+def get_record_id(cr, module, model, field, mode):
+    """
+    OpenUpgrade: get or create the id from the record table matching
+    the key parameter values
+    """
+    cr.execute(
+        "SELECT id FROM openupgrade_record "
+        "WHERE module = %s AND model = %s AND "
+        "field = %s AND mode = %s AND type = %s",
+        (module, model, field, mode, 'field')
+        )
+    record = cr.fetchone()
+    if record:
+        return record[0]
+    cr.execute(
+        "INSERT INTO openupgrade_record "
+        "(module, model, field, mode, type) "
+        "VALUES (%s, %s, %s, %s, %s)",
+        (module, model, field, mode, 'field')
+        )
+    cr.execute(
+        "SELECT id FROM openupgrade_record "
+        "WHERE module = %s AND model = %s AND "
+        "field = %s AND mode = %s AND type = %s",
+        (module, model, field, mode, 'field')
+        )
+    return cr.fetchone()[0]
+
+def compare_registries(cr, module, registry, local_registry):
+    """
+    OpenUpgrade: Compare the local registry with the global registry,
+    log any differences and merge the local registry with
+    the global one.
+    """
+    if not table_exists(cr, 'openupgrade_record'):
+        return
+    for model, fields in local_registry.items():
+        registry.setdefault(model, {})
+        for field, attributes in fields.items():
+            old_field = registry[model].setdefault(field, {})
+            mode = old_field and 'modify' or 'create'
+            record_id = False
+            for key, value in attributes.items():
+                if key not in old_field or old_field[key] != value:
+                    if not record_id:
+                        record_id = get_record_id(
+                            cr, module, model, field, mode)
+                    cr.execute(
+                        "SELECT id FROM openupgrade_attribute "
+                        "WHERE name = %s AND value = %s AND "
+                        "record_id = %s",
+                        (key, value, record_id)
+                        )
+                    if not cr.fetchone():
+                        cr.execute(
+                            "INSERT INTO openupgrade_attribute "
+                            "(name, value, record_id) VALUES (%s, %s, %s)",
+                            (key, value, record_id)
+                            )
+                    old_field[key] = value

=== added file 'openerp/openupgrade/openupgrade_log.py'
--- openerp/openupgrade/openupgrade_log.py	1970-01-01 00:00:00 +0000
+++ openerp/openupgrade/openupgrade_log.py	2014-04-08 14:07:20 +0000
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+from openupgrade_tools import table_exists
+
+def log_xml_id(cr, module, xml_id):
+    """
+    Log xml_ids at load time in the records table.
+    Called from openerp/tools/convert.py:xml_import._test_xml_id()
+
+    # Catcha's
+    - The module needs to be loaded with 'init', or the calling method
+    won't be called. This can be brought about by installing the
+    module or updating the 'state' field of the module to 'to install'
+    or call the server with '--init <module>' and the database argument.
+    
+    - Do you get the right results immediately when installing the module?
+    No, sorry. This method retrieves the model from the ir_model_table, but when
+    the xml id is encountered for the first time, this method is called
+    before the item is present in this table. Therefore, you will not
+    get any meaningful results until the *second* time that you 'init'
+    the module.
+
+    - The good news is that the openupgrade_records module that comes
+    with this distribution allows you to deal with all of this with
+    one click on the menu item Settings -> Customizations ->
+    Database Structure -> OpenUpgrade -> Generate Records
+
+    - You cannot reinitialize the modules in your production database
+    and expect to keep working on it happily ever after. Do not perform
+    this routine on your production database.
+
+    :param module: The module that contains the xml_id
+    :param xml_id: the xml_id, with or without 'module.' prefix
+    """
+    if not table_exists(cr, 'openupgrade_record'):
+        return
+    if not '.' in xml_id:
+        xml_id = '%s.%s' % (module, xml_id)
+    cr.execute(
+        "SELECT model FROM ir_model_data "
+        "WHERE module = %s AND name = %s",
+        xml_id.split('.'))
+    record = cr.fetchone()
+    if not record:
+        print "Cannot find xml_id %s" % xml_id
+        return
+    else:
+        cr.execute(
+            "SELECT id FROM openupgrade_record "
+            "WHERE module=%s AND model=%s AND name=%s AND type=%s",
+            (module, record[0], xml_id, 'xmlid'))
+        if not cr.fetchone():
+            cr.execute(
+                "INSERT INTO openupgrade_record "
+                "(module, model, name, type) values(%s, %s, %s, %s)",
+                (module, record[0], xml_id, 'xmlid'))
+

=== added file 'openerp/openupgrade/openupgrade_tools.py'
--- openerp/openupgrade/openupgrade_tools.py	1970-01-01 00:00:00 +0000
+++ openerp/openupgrade/openupgrade_tools.py	2014-04-08 14:07:20 +0000
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+def table_exists(cr, table):
+    """ Check whether a certain table or view exists """
+    cr.execute(
+        'SELECT count(relname) FROM pg_class WHERE relname = %s',
+        (table,))
+    return cr.fetchone()[0] == 1
+

=== modified file 'openerp/osv/orm.py'
--- openerp/osv/orm.py	2014-03-18 12:41:12 +0000
+++ openerp/osv/orm.py	2014-04-08 14:07:20 +0000
@@ -1306,6 +1306,7 @@
 
         position = 0
         try:
+            cr.execute('SAVEPOINT convert_records')
             for res_id, xml_id, res, info in self._convert_records(cr, uid,
                             self._extract_records(cr, uid, fields, datas,
                                                   context=context, log=log),
@@ -1323,8 +1324,9 @@
                     if context.get('defer_parent_store_computation'):
                         self._parent_store_compute(cr)
                     cr.commit()
+            cr.execute('RELEASE SAVEPOINT convert_records')
         except Exception, e:
-            cr.rollback()
+            cr.execute('ROLLBACK TO SAVEPOINT convert_records')
             return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
 
         if context.get('defer_parent_store_computation'):
@@ -2827,11 +2829,15 @@
                                 # add the NOT NULL constraint
                                 cr.commit()
                                 try:
+                                    #use savepoints for openupgrade instead of transactions
+                                    cr.execute('SAVEPOINT add_constraint');
                                     cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
+                                    cr.execute('RELEASE SAVEPOINT add_constraint');
                                     cr.commit()
                                     _schema.debug("Table '%s': column '%s': added NOT NULL constraint",
                                         self._table, k)
                                 except Exception:
+                                    cr.execute('ROLLBACK TO SAVEPOINT add_constraint');
                                     msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
                                         "If you want to have it, you should update the records and execute manually:\n"\
                                         "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
@@ -2909,11 +2915,14 @@
                                 cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
                             if f.required:
                                 try:
-                                    cr.commit()
+                                    #use savepoints for openupgrade instead of transactions
+                                    cr.execute('SAVEPOINT add_constraint');
                                     cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
                                     _schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
                                         self._table, k)
+                                    cr.execute('RELEASE SAVEPOINT add_constraint');
                                 except Exception:
+                                    cr.execute('ROLLBACK TO SAVEPOINT add_constraint');
                                     msg = "WARNING: unable to set column %s of table %s not null !\n"\
                                         "Try to re-run: openerp-server --update=module\n"\
                                         "If it doesn't work, update records and execute manually:\n"\
@@ -3104,12 +3113,14 @@
             sql_actions.sort(key=lambda x: x['order'])
             for sql_action in [action for action in sql_actions if action['execute']]:
                 try:
+                    #use savepoints for openupgrade instead of transactions
+                    cr.execute('SAVEPOINT add_constraint2');
                     cr.execute(sql_action['query'])
-                    cr.commit()
+                    cr.execute('RELEASE SAVEPOINT add_constraint2');
                     _schema.debug(sql_action['msg_ok'])
                 except:
                     _schema.warning(sql_action['msg_err'])
-                    cr.rollback()
+                    cr.execute('ROLLBACK TO SAVEPOINT add_constraint2');
 
 
     def _execute_sql(self, cr):

=== modified file 'openerp/tools/convert.py'
--- openerp/tools/convert.py	2014-01-16 09:17:16 +0000
+++ openerp/tools/convert.py	2014-04-08 14:07:20 +0000
@@ -66,6 +66,8 @@
 unsafe_eval = eval
 from safe_eval import safe_eval as eval
 
+from openerp.openupgrade import openupgrade_log
+
 class ParseError(Exception):
     def __init__(self, msg, text, filename, lineno):
         self.msg = msg
@@ -296,6 +298,7 @@
 
         if len(id) > 64:
             _logger.error('id: %s is to long (max: 64)', id)
+        openupgrade_log.log_xml_id(self.cr, self.module, xml_id)
 
     def _tag_delete(self, cr, rec, data_node=None):
         d_model = rec.get("model",'')

=== added file 'scripts/compare_noupdate_xml_records.py'
--- scripts/compare_noupdate_xml_records.py	1970-01-01 00:00:00 +0000
+++ scripts/compare_noupdate_xml_records.py	2014-04-08 14:07:20 +0000
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+#    OpenERP, Open Source Management Solution
+#    This module copyright (C) 2013 Therp BV (<http://therp.nl>).
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU Affero General Public License as
+#    published by the Free Software Foundation, either version 3 of the
+#    License, or (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU Affero General Public License for more details.
+#
+#    You should have received a copy of the GNU Affero General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+##############################################################################
+
+import sys, os, ast, argparse
+from copy import deepcopy
+from lxml import etree
+
+def read_manifest(addon_dir):
+    with open(os.path.join(addon_dir, '__openerp__.py'), 'r') as f:
+        manifest_string = f.read()
+    return ast.literal_eval(manifest_string)
+
+# from openerp.tools
+def nodeattr2bool(node, attr, default=False):
+    if not node.get(attr):
+        return default
+    val = node.get(attr).strip()
+    if not val:
+        return default
+    return val.lower() not in ('0', 'false', 'off')
+
+def get_node_dict(element):
+    res = {}
+    for child in element:
+        if 'name' in child.attrib:
+            key = "./%s[@name='%s']" % (
+                child.tag, child.attrib['name'])
+            res[key] = child
+    return res
+
+def get_node_value(element):
+    if 'eval' in element.attrib.keys():
+        return element.attrib['eval']
+    if 'ref' in element.attrib.keys():
+        return element.attrib['ref']
+    if not len(element):
+        return element.text
+    return etree.tostring(element)
+
+def update_node(target, source):
+    for element in source:
+        if 'name' in element.attrib:
+            query = "./%s[@name='%s']" % (
+                element.tag, element.attrib['name'])
+        else:
+            # query = "./%s" % element.tag
+            continue
+        for existing in target.xpath(query):
+            target.remove(existing)
+        target.append(element)
+
+def get_records(addon_dir):
+    addon_dir = addon_dir.rstrip(os.sep)
+    addon_name = os.path.basename(addon_dir)
+    manifest = read_manifest(addon_dir)
+    # The order of the keys are important.
+    # Load files in the same order as in 
+    # module/loading.py:load_module_graph
+    keys = ['init_xml', 'update_xml', 'data']
+    records_update = {}
+    records_noupdate = {}
+
+    def process_data_node(data_node):
+        noupdate = nodeattr2bool(data_node, 'noupdate', False)
+        record_nodes = data_node.xpath("./record")
+        for record in record_nodes:
+            xml_id = record.get("id")
+            if ('.' in xml_id
+                    and xml_id.startswith(addon_name + '.')):
+                xml_id = xml_id[len(addon_name) + 1:]
+            for records in records_noupdate, records_update:
+                # records can occur multiple times in the same module
+                # with different noupdate settings
+                if xml_id in records:
+                    # merge records (overwriting an existing element
+                    # with the same tag). The order processing the
+                    # various directives from the manifest is
+                    # important here
+                    update_node(records[xml_id], record)
+                    break
+            else:
+                target_dict = (
+                    records_noupdate if noupdate else records_update)
+                target_dict[xml_id] = record
+
+    for key in keys:
+        if not manifest.get(key):
+            continue
+        for xml_file in manifest[key]:
+            xml_path = xml_file.split('/')
+            try:
+                tree = etree.parse(os.path.join(addon_dir, *xml_path))
+            except etree.XMLSyntaxError:
+                continue
+            for data_node in tree.xpath("/openerp/data"):
+                process_data_node(data_node)
+    return records_update, records_noupdate
+
+def main(argv=None):
+    """
+    Attempt to represent the differences in data records flagged with
+    'noupdate' between to different versions of the same OpenERP module.
+
+    Print out a complete XML data file that can be loaded in a post-migration
+    script using openupgrade::load_xml().
+
+    Known issues:
+    - Does not detect if a deleted value belongs to a field
+      which has been removed.
+    - Ignores forcecreate=False. This hardly occurs, but you should
+      check manually for new data records with this tag. Note that
+      'True' is the default value for data elements without this tag.
+    - Does not take csv data into account (obviously)
+    - Is not able to check cross module data
+    - etree's pretty_print is not *that* pretty
+    - Does not take translations into account (e.g. in the case of
+      email templates)
+    - Does not handle the shorthand records <menu>, <act_window> etc.,
+      although that could be done using the same expansion logic as
+      is used in their parsers in openerp/tools/convert.py 
+    """
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        'olddir', metavar='older_module_directory')
+    parser.add_argument(
+        'newdir', metavar='newer_module_directory')
+    arguments = parser.parse_args(argv)
+
+    old_update, old_noupdate = get_records(arguments.olddir)
+    new_update, new_noupdate = get_records(arguments.newdir)
+
+    data = etree.Element("data")
+
+    for xml_id, record_new in new_noupdate.items():
+        record_old = None
+        if xml_id in old_update:
+            record_old = old_update[xml_id]
+        elif xml_id in old_noupdate:
+            record_old = old_noupdate[xml_id]
+    
+        if record_old is None:
+            continue
+
+        element = etree.Element(
+            "record", id=xml_id, model=record_new.attrib['model'])
+        record_old_dict = get_node_dict(record_old)
+        record_new_dict = get_node_dict(record_new)
+        for key in record_old_dict.keys():
+            if not record_new.xpath(key):
+                # The element is no longer present.
+                # Overwrite an existing value with an
+                # empty one. Of course, we do not know
+                # if this field has actually been removed
+                attribs = deepcopy(record_old_dict[key]).attrib
+                for attr in ['eval', 'ref']:
+                    if attr in attribs:
+                        del attribs[attr]
+                element.append(etree.Element(record_old_dict[key].tag, attribs))
+            else:
+                oldrepr = get_node_value(record_old_dict[key])
+                newrepr = get_node_value(record_new_dict[key])
+            
+                if oldrepr != newrepr:
+                    element.append(deepcopy(record_new_dict[key]))
+
+        for key in record_new_dict.keys():
+            if not record_old.xpath(key):
+                element.append(deepcopy(record_new_dict[key]))
+
+        if len(element):
+            data.append(element)
+
+    openerp = etree.Element("openerp")
+    openerp.append(data)
+    document = etree.ElementTree(openerp)
+
+    print etree.tostring(
+        document, pretty_print=True, xml_declaration=True, encoding='utf-8')
+
+if __name__ == "__main__":
+    main()
+

=== added file 'scripts/migrate.py'
--- scripts/migrate.py	1970-01-01 00:00:00 +0000
+++ scripts/migrate.py	2014-04-08 14:07:20 +0000
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+
+import os
+import sys
+import StringIO
+import psycopg2
+import psycopg2.extensions
+from optparse import OptionParser
+from ConfigParser import SafeConfigParser
+from bzrlib.branch import Branch
+from bzrlib.repository import Repository
+from bzrlib.workingtree import WorkingTree
+import bzrlib.plugin
+import bzrlib.builtins
+import bzrlib.info
+
+migrations={
+  '7.0': {
+      'addons': {
+          'addons': 'lp:openupgrade-addons/7.0',
+          'web': {'url': 'lp:openerp-web/7.0', 'addons_dir': 'addons'},
+        },
+      'server': {
+          'url': 'lp:openupgrade-server/7.0', 
+          'addons_dir': os.path.join('openerp','addons'),
+          'root_dir': os.path.join(''),
+          'cmd': 'openerp-server --update=all --database=%(db)s '+
+            '--config=%(config)s --stop-after-init --no-xmlrpc --no-netrpc',
+        },
+    },
+  '6.1': {
+      'addons': {
+          'addons': 'lp:openupgrade-addons/6.1',
+          'web': {'url': 'lp:openerp-web/6.1', 'addons_dir': 'addons'},
+        },
+      'server': {
+          'url': 'lp:openupgrade-server/6.1', 
+          'addons_dir': os.path.join('openerp','addons'),
+          'root_dir': os.path.join(''),
+          'cmd': 'openerp-server --update=all --database=%(db)s '+
+            '--config=%(config)s --stop-after-init --no-xmlrpc --no-netrpc',
+        },
+    },
+  '6.0': {
+      'addons': {
+          'addons': 'lp:openupgrade-addons/6.0',
+        },
+      'server': {
+          'url': 'lp:openupgrade-server/6.0',
+          'addons_dir': os.path.join('bin','addons'),
+          'root_dir': os.path.join('bin'),
+          'cmd': 'bin/openerp-server.py --update=all --database=%(db)s '+
+            '--config=%(config)s --stop-after-init --no-xmlrpc --no-netrpc',
+        },
+    },
+}
+config = SafeConfigParser()
+parser = OptionParser(description="""Migrate script for the impatient or lazy.
+Makes a copy of your database, downloads the files necessary to migrate
+it as requested and runs the migration on the copy (so your original 
+database will not be touched). While the migration is running only errors are 
+shown, for a detailed log see ${branch-dir}/migration.log
+""")
+parser.add_option("-C", "--config", action="store", type="string", 
+        dest="config", 
+        help="current openerp config (required)")
+parser.add_option("-D", "--database", action="store", type="string", 
+        dest="database", 
+        help="current openerp database (required if not given in config)")
+parser.add_option("-B", "--branch-dir", action="store", type="string", 
+        dest="branch_dir", 
+        help="the directory to download openupgrade-server code to [%default]", 
+        default='/var/tmp/openupgrade')
+parser.add_option("-R", "--run-migrations", action="store", type="string", 
+        dest="migrations", 
+        help="comma separated list of migrations to run, ie. \""+
+                ','.join(sorted([a for a in migrations]))+
+                "\" (required)")
+parser.add_option("-A", "--add", action="store", type="string", dest="add",
+        help="load a python module that declares a dict 'migrations' which is "+
+        "merged with the one of this script (see the source for details). "
+        "You also can pass a string that evaluates to a dict. For the banking "
+        "addons, pass "
+        "\"{'6.1': {'addons': {'banking': 'lp:banking-addons/6.1'}}}\"")
+parser.add_option("-I", "--inplace", action="store_true", dest="inplace",
+        help="don't copy database before attempting upgrade (dangerous)")
+(options, args) = parser.parse_args()
+
+if (not options.config or not options.migrations 
+        or not reduce(lambda a,b: a and (b in migrations), 
+                        options.migrations.split(','), True)):
+  parser.print_help()
+  sys.exit()
+
+config.read(options.config)
+
+conn_parms = {}
+for parm in ('host', 'port', 'user', 'password'):                             
+    db_parm = 'db_' + parm
+    if config.has_option('options', db_parm):
+        conn_parms[parm] = config.get('options', db_parm)
+
+if not 'user' in conn_parms:
+    print 'No user found in configuration'
+    sys.exit()
+db_user = conn_parms['user']
+
+db_name=options.database or config.get('options', 'db_name')
+
+if not db_name or db_name=='' or db_name.isspace() or db_name.lower()=='false':
+  parser.print_help()
+  sys.exit()
+
+conn_parms['database'] = db_name
+
+if options.inplace:
+  db=db_name
+else:
+  db=db_name+'_migrated'
+
+if options.add:
+    merge_migrations={}
+    if os.path.isfile(options.add):
+        import imp
+        merge_migrations_mod=imp.load_source('merge_migrations_mod', 
+                options.add)
+        merge_migrations=merge_migrations_mod.migrations
+    else:
+        merge_migrations=eval(options.add)
+
+    def deep_update(dict1, dict2):
+        result={}
+        for (name,value) in dict1.iteritems():
+            if dict2.has_key(name):
+                if isinstance(dict1[name], dict) and isinstance(dict2[name], 
+                    dict):
+                    result[name]=deep_update(dict1[name], dict2[name])
+                else:
+                    result[name]=dict2[name]
+            else:
+                result[name]=dict1[name]
+        for (name,value) in dict2.iteritems():
+            if name not in dict1:
+                result[name]=value
+        return result
+
+    migrations=deep_update(migrations, merge_migrations)
+
+for version in options.migrations.split(','):
+    if version not in migrations:
+        print '%s is not a valid version! (valid verions are %s)' % (version,
+                ','.join(sorted([a for a in migrations])))
+
+bzrlib.plugin.load_plugins()
+bzrlib.trace.enable_default_logging()
+logfile=os.path.join(options.branch_dir,'migration.log')
+
+if not os.path.exists(options.branch_dir):
+    os.mkdir(options.branch_dir)
+
+for version in options.migrations.split(','):
+    if not os.path.exists(os.path.join(options.branch_dir,version)):
+        os.mkdir(os.path.join(options.branch_dir,version))
+    for (name,url) in dict(migrations[version]['addons'], 
+            server=migrations[version]['server']['url']).iteritems():
+        link=url.get('link', False) if isinstance(url, dict) else False
+        url=url['url'] if isinstance(url, dict) else url
+        if os.path.exists(os.path.join(options.branch_dir,version,name)):
+            if link:
+                continue
+            cmd_revno=bzrlib.builtins.cmd_revno()
+            cmd_revno.outf=StringIO.StringIO()
+            cmd_revno.run(location=os.path.join(options.branch_dir,version,
+                name))
+            print 'updating %s rev%s' %(os.path.join(version,name),
+                    cmd_revno.outf.getvalue().strip())
+            cmd_update=bzrlib.builtins.cmd_update()
+            cmd_update.outf=StringIO.StringIO()
+            cmd_update.outf.encoding='utf8'
+            cmd_update.run(dir=os.path.join(options.branch_dir,version,
+                name))
+            if hasattr(cmd_update, '_operation'):
+                cmd_update.cleanup_now()
+            print 'now at rev'+cmd_revno.outf.getvalue().strip()
+        else:
+            if link:
+                print 'linking %s to %s'%(url, 
+                        os.path.join(options.branch_dir,version,name))
+                os.symlink(url, os.path.join(options.branch_dir,version,name))
+            else:
+                print 'getting '+url
+                cmd_checkout=bzrlib.builtins.cmd_checkout()
+                cmd_checkout.outf=StringIO.StringIO()
+                cmd_checkout.run(url, os.path.join(options.branch_dir,version,
+                    name), lightweight=True)
+
+if not options.inplace:
+    print('copying database %(db_name)s to %(db)s...' % {'db_name': db_name, 
+                                                         'db': db})
+    conn = psycopg2.connect(**conn_parms)
+    conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+    cur=conn.cursor()
+    cur.execute('drop database if exists "%(db)s"' % {'db': db})
+    cur.execute('create database "%(db)s"' % {'db': db})
+    cur.close()
+
+    os.environ['PGUSER'] = db_user             
+    if ('host' in conn_parms and conn_parms['host']
+    and not os.environ.get('PGHOST')):
+        os.environ['PGHOST'] = conn_parms['host']             
+
+    if ('port' in conn_parms and conn_parms['port']
+    and not os.environ.get('PGPORT')):
+        os.environ['PGPORT'] = conn_parms['port']             
+
+    password_set = False
+    if ('password' in conn_parms and conn_parms['password']
+    and not os.environ.get('PGPASSWORD')):
+        os.environ['PGPASSWORD'] = conn_parms['password']             
+        password_set = True
+
+    os.system(
+        ('pg_dump --format=custom --no-password %(db_name)s ' +
+         '| pg_restore --no-password --dbname=%(db)s') %
+        {'db_name': db_name, 'db': db}
+    )
+
+    if password_set:
+        del os.environ['PGPASSWORD'] 
+
+for version in options.migrations.split(','):
+  print 'running migration for '+version
+  config.set('options', 'without_demo', 'True')
+  config.set('options', 'logfile', logfile)
+  config.set('options', 'port', 'False')
+  config.set('options', 'netport', 'False')
+  config.set('options', 'xmlrpc_port', 'False')
+  config.set('options', 'netrpc_port', 'False')
+  config.set('options', 'addons_path', 
+   ','.join([os.path.join(options.branch_dir,
+       version,'server',migrations[version]['server']['addons_dir'])] +
+       [
+           os.path.join(options.branch_dir,version,name,
+               url.get('addons_dir', '') if isinstance(url, dict) else '') 
+           for (name,url) in migrations[version]['addons'].iteritems()
+       ]
+       )
+   )
+  config.set('options', 'root_path', os.path.join(options.branch_dir,version,
+      'server', migrations[version]['server']['root_dir']))
+  config.write(open(
+      os.path.join(options.branch_dir,version,'server.cfg'), 'w+'))
+  os.system(
+          os.path.join(options.branch_dir,version,'server',
+              migrations[version]['server']['cmd'] % {
+                  'db': db, 
+                  'config': os.path.join(options.branch_dir,version,
+                      'server.cfg')
+                  }
+              )
+          )


Follow ups