← Back to team overview

openerp-community team mailing list archive

Re: document module enhancement 6.0/6.1 or backport 7.0

 

Dear Stéphane,

Thanks again for your hint. I have finalized the work of Arnaud in terms
of avoiding unintentional deletion of files that are referenced by more
than one document.

Storing two documents with files that have the same name but different
content has to be tackled at import time by recognising this behaviour
and use a different filename for storage. I have this on my list but
will be addressed later.

Please find attached the diff vs.
https://code.launchpad.net/~openerp-dev/openobject-addons/trunk-bug-1062258-api
(patch enhancement)

and vs. community branch ~ocb/ocb-addons/6.1

Best Regards

Etienne






On 16.07.2013 12:59, Bidoul, Stéphane wrote:
> Etienne,
> 
> You may want to look
> at https://bugs.launchpad.net/bugs/1062258 regarding file storage in 6.1.
> 
> -sbi
> 
> 
> On Mon, Jul 8, 2013 at 10:58 PM, Etienne Hirt <hirt@xxxxxxxxxxxxxxxxxxxx
> <mailto:hirt@xxxxxxxxxxxxxxxxxxxx>> wrote:
> 
>     Dear all,
> 
>     Looking at the external file storage in 6.0/6.1 I found that:
>     * the same file can be added several times without any notification. Is
>     often intentional.
>     * when deleting one of these (intentional) duplicate objects the file is
>     deleted, too
>     * when deleting a document from an object, this object might be linked
>     to a message and therefore only the res_id/model relation should be
>     removed.
> 
>     The structure in 6.0/6.1 seems very complex compared to
>     ir_attachement/document in Version 7.0. Additionally at least the
>     deletion of files still linked is solved.
> 
>     Has anybody performed a backport as basis for further work and/or
>     otherwise worked on the above mentioned for us missing features?
> 
>     Best Regards
> 
>     Etienne
> 
>     _______________________________________________
>     Mailing list: https://launchpad.net/~openerp-community
>     Post to     : openerp-community@xxxxxxxxxxxxxxxxxxx
>     <mailto:openerp-community@xxxxxxxxxxxxxxxxxxx>
>     Unsubscribe : https://launchpad.net/~openerp-community
>     More help   : https://help.launchpad.net/ListHelp
> 
> 
Index: /usr/share/pyshared/openerp-server/addons/document/document.py
===================================================================
--- /usr/share/pyshared/openerp-server/addons/document/document.py	(revision 15627)
+++ /usr/share/pyshared/openerp-server/addons/document/document.py	(revision 15628)
@@ -29,6 +29,7 @@
 from tools.translate import _
 import nodes
 import logging
+import pdb
 
 DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
 
@@ -147,6 +148,9 @@
         # filename_uniq is not possible in pure SQL
     ]
     def _check_duplication(self, cr, uid, vals, ids=[], op='create'):
+        """
+        Returns True if not same filename is attached already to an object (res_model, res_id) 
+        """    
         name = vals.get('name', False)
         parent_id = vals.get('parent_id', False)
         res_model = vals.get('res_model', False)
@@ -291,19 +295,23 @@
             if vals.get('file_size'):
                 del vals['file_size']
         result = self._check_duplication(cr, uid, vals)
+        #pdb.set_trace()
         if not result:
+            #already attached to the object. Overwrite File Data only
             domain = [
                 ('res_id', '=', vals['res_id']),
                 ('res_model', '=', vals['res_model']),
                 ('datas_fname', '=', vals['datas_fname']),
             ]
             attach_ids = self.search(cr, uid, domain, context=context)
+            #@Todo: Ask if update is intentionally
             super(document_file, self).write(cr, uid, attach_ids, 
                                              {'datas' : vals['datas']},
                                              context=context)
             result = attach_ids[0]
         else:
-            #raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
+            #Attach new document and write file.
+            #todo: Check for already existing file! in data set?   
             result = super(document_file, self).create(cr, uid, vals, context)
         return result
 
@@ -324,18 +332,17 @@
         return False
 
     def unlink(self, cr, uid, ids, context=None):
+        #pdb.set_trace()
         attachment_ref = self.pool.get('ir.attachment')
         stor = self.pool.get('document.storage')
-        unres = []
         # We have to do the unlink in 2 stages: prepare a list of actual
         # files to be unlinked, update the db (safer to do first, can be
         # rolled back) and then unlink the files. The list wouldn't exist
         # after we discard the objects
         ids = self.search(cr, uid, [('id','in',ids)])
-        canUnlink = 0   #If canUnlink is bigger than 1 it means that the document has more than 1 attachement.
-                        #We therefore cannot unlink that document.
         for f in self.browse(cr, uid, ids, context=context):
             # TODO: update the node cache
+            unres = []
             par = f.parent_id
             storage_id = None
             while par:
@@ -346,10 +353,11 @@
             #We get the ids of attachement that correspond to the document
             attachment_ids = attachment_ref.search(cr, uid, [('store_fname', '=', f.store_fname), ('parent_id.name', '=', f.parent_id.name)], context=context)
             #If we have more than 1 attachment for a same file, we will not unlink it.
-            for att in attachment_ref.browse(cr,uid,attachment_ids,context=context):
-                canUnlink += 1
-            #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
-            if canUnlink == 1:            
+            canUnlink = len(attachment_ids)
+            #If canUnlink is bigger than 1 it means that the document has more than 1 attachement.
+            #We therefore cannot unlink that document.
+            if canUnlink == 1:
+                #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail            
                 if storage_id:
                     r = stor.prepare_unlink(cr, uid, storage_id, f)
                     if r:
@@ -357,8 +365,8 @@
                 else:
                     logging.getLogger('document').warning("Unlinking attachment #%s %s that has no storage",
                                                     f.id, f.name)
-        res = super(document_file, self).unlink(cr, uid, ids, context)
-        if canUnlink == 1:
+            #do the unlink per document in order to delete the filestorage with the last deleted document of the same file!        
+            res = super(document_file, self).unlink(cr, uid, [f.id], context)
             stor.do_unlink(cr, uid, unres)
         return res
 
Index: /usr/share/pyshared/openerp-server/addons/document/document.py
===================================================================
--- /usr/share/pyshared/openerp-server/addons/document/document.py	(revision 15591)
+++ /usr/share/pyshared/openerp-server/addons/document/document.py	(revision 15628)
@@ -29,6 +29,7 @@
 from tools.translate import _
 import nodes
 import logging
+import pdb
 
 DMS_ROOT_PATH = tools.config.get('document_path', os.path.join(tools.config['root_path'], 'filestore'))
 
@@ -147,6 +148,9 @@
         # filename_uniq is not possible in pure SQL
     ]
     def _check_duplication(self, cr, uid, vals, ids=[], op='create'):
+        """
+        Returns True if not same filename is attached already to an object (res_model, res_id) 
+        """    
         name = vals.get('name', False)
         parent_id = vals.get('parent_id', False)
         res_model = vals.get('res_model', False)
@@ -291,19 +295,23 @@
             if vals.get('file_size'):
                 del vals['file_size']
         result = self._check_duplication(cr, uid, vals)
+        #pdb.set_trace()
         if not result:
+            #already attached to the object. Overwrite File Data only
             domain = [
                 ('res_id', '=', vals['res_id']),
                 ('res_model', '=', vals['res_model']),
                 ('datas_fname', '=', vals['datas_fname']),
             ]
             attach_ids = self.search(cr, uid, domain, context=context)
+            #@Todo: Ask if update is intentionally
             super(document_file, self).write(cr, uid, attach_ids, 
                                              {'datas' : vals['datas']},
                                              context=context)
             result = attach_ids[0]
         else:
-            #raise osv.except_osv(_('ValidateError'), _('File name must be unique!'))
+            #Attach new document and write file.
+            #todo: Check for already existing file! in data set?   
             result = super(document_file, self).create(cr, uid, vals, context)
         return result
 
@@ -324,8 +332,9 @@
         return False
 
     def unlink(self, cr, uid, ids, context=None):
+        #pdb.set_trace()
+        attachment_ref = self.pool.get('ir.attachment')
         stor = self.pool.get('document.storage')
-        unres = []
         # We have to do the unlink in 2 stages: prepare a list of actual
         # files to be unlinked, update the db (safer to do first, can be
         # rolled back) and then unlink the files. The list wouldn't exist
@@ -333,6 +342,7 @@
         ids = self.search(cr, uid, [('id','in',ids)])
         for f in self.browse(cr, uid, ids, context=context):
             # TODO: update the node cache
+            unres = []
             par = f.parent_id
             storage_id = None
             while par:
@@ -340,16 +350,24 @@
                     storage_id = par.storage_id
                     break
                 par = par.parent_id
-            #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
-            if storage_id:
-                r = stor.prepare_unlink(cr, uid, storage_id, f)
-                if r:
-                    unres.append(r)
-            else:
-                logging.getLogger('document').warning("Unlinking attachment #%s %s that has no storage",
-                                                f.id, f.name)
-        res = super(document_file, self).unlink(cr, uid, ids, context)
-        stor.do_unlink(cr, uid, unres)
+            #We get the ids of attachement that correspond to the document
+            attachment_ids = attachment_ref.search(cr, uid, [('store_fname', '=', f.store_fname), ('parent_id.name', '=', f.parent_id.name)], context=context)
+            #If we have more than 1 attachment for a same file, we will not unlink it.
+            canUnlink = len(attachment_ids)
+            #If canUnlink is bigger than 1 it means that the document has more than 1 attachement.
+            #We therefore cannot unlink that document.
+            if canUnlink == 1:
+                #assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail            
+                if storage_id:
+                    r = stor.prepare_unlink(cr, uid, storage_id, f)
+                    if r:
+                        unres.append(r)
+                else:
+                    logging.getLogger('document').warning("Unlinking attachment #%s %s that has no storage",
+                                                    f.id, f.name)
+            #do the unlink per document in order to delete the filestorage with the last deleted document of the same file!        
+            res = super(document_file, self).unlink(cr, uid, [f.id], context)
+            stor.do_unlink(cr, uid, unres)
         return res
 
 document_file()

References