openerp-dev-web team mailing list archive
-
openerp-dev-web team
-
Mailing list archive
-
Message #04989
[Merge] lp:~openerp-dev/openobject-addons/6.0-bug-744175-xrg into lp:openobject-addons/6.0
Stephane Wirtel (OpenERP) has proposed merging lp:~openerp-dev/openobject-addons/6.0-bug-744175-xrg into lp:openobject-addons/6.0.
Requested reviews:
OpenERP Core Team (openerp)
For more details, see:
https://code.launchpad.net/~openerp-dev/openobject-addons/6.0-bug-744175-xrg/+merge/56148
There is an OPW on this branch
I asked to the customer to check if this branch fixes its problem, and waiting for a reply.
--
https://code.launchpad.net/~openerp-dev/openobject-addons/6.0-bug-744175-xrg/+merge/56148
Your team OpenERP R&D Team is subscribed to branch lp:~openerp-dev/openobject-addons/6.0-bug-744175-xrg.
=== modified file 'document/document_directory.py'
--- document/document_directory.py 2011-01-14 09:34:28 +0000
+++ document/document_directory.py 2011-04-04 12:41:11 +0000
@@ -25,6 +25,7 @@
import nodes
from tools.translate import _
+import logging
class document_directory(osv.osv):
_name = 'document.directory'
@@ -124,18 +125,48 @@
res.append((d.id, s or d.name))
return res
- def get_full_path(self, cr, uid, dir_id, context=None):
+ def get_full_path(self, cr, uid, dir_id, res_vector=None, context=None):
""" Return the full path to this directory, in a list, root first
"""
if isinstance(dir_id, (tuple, list)):
assert len(dir_id) == 1
dir_id = dir_id[0]
-
+ _log = logging.getLogger('document')
def _parent(dir_id, path):
parent=self.browse(cr, uid, dir_id)
if parent.parent_id and not parent.ressource_parent_type_id:
_parent(parent.parent_id.id,path)
path.append(parent.name)
+ if parent.ressource_type_id:
+ # here, we try to fill the names of dynamic nodes in the
+ # path leading to the directory.
+ # Since document.directory records do /not/ contain information
+ # (aka. the vector) about the dynamic stuff, we need to trust
+ # some external variable. If not available, we place an item
+ # of (<model name>, False) in the path. The caller must handle
+ # that or bork.
+ if res_vector is None:
+ _log.debug("get_full_path: missing a vector for %s", parent.ressource_type_id.model)
+ elif res_vector and parent.ressource_type_id.model == res_vector[0]:
+ obj = self.pool.get(res_vector[0])
+ rbro = obj.browse(cr, uid, res_vector[1], context=context)
+ fld_name = (parent.resource_field and parent.resource_field.name) or 'name'
+
+ if parent.ressource_tree:
+ rev_names = []
+ while rbro:
+ par = getattr(rbro, obj._parent_name, False)
+ if not par:
+ break
+ rev_names.append(rbro[fld_name])
+ rev_names.reverse()
+ path += map(nodes.filter_res_name, rev_names)
+ else:
+ # print "appending for %s:%s: %s" %(res_vector[0], res_vector[1], rbro[fld_name])
+ path.append(nodes.filter_res_name(rbro[fld_name]))
+ else:
+ _log.debug("get_full_path: missing a vector for: %s (had %s) ", parent.ressource_type_id.model, res_vector[0])
+ path.append((parent.ressource_type_id.model, False))
else:
path.append(parent.name)
return path
=== modified file 'document/document_storage.py'
--- document/document_storage.py 2011-01-14 00:11:01 +0000
+++ document/document_storage.py 2011-04-04 12:41:11 +0000
@@ -142,9 +142,10 @@
fsize = os.stat(fname).st_size
cr.execute("UPDATE ir_attachment " \
" SET index_content = %s, file_type = %s, " \
- " file_size = %s " \
+ " file_size = %s, "\
+ " write_date = now(), write_uid = %s " \
" WHERE id = %s",
- (icont_u, mime, fsize, par.file_id))
+ (icont_u, mime, fsize, par.context.uid, par.file_id))
par.content_length = fsize
par.content_type = mime
cr.commit()
@@ -157,9 +158,10 @@
par = self._get_parent()
cr = pooler.get_db(par.context.dbname).cursor()
fsize = os.stat(fname).st_size
- cr.execute("UPDATE ir_attachment SET file_size = %s " \
+ cr.execute("UPDATE ir_attachment SET file_size = %s, " \
+ " write_date = now(), write_uid = %s " \
" WHERE id = %s",
- (fsize, par.file_id))
+ (fsize, par.context.uid, par.file_id))
par.content_length = fsize
cr.commit()
cr.close()
@@ -228,17 +230,19 @@
out = psycopg2.Binary(data)
cr.execute("UPDATE ir_attachment " \
"SET db_datas = %s, file_size=%s, " \
- " index_content= %s, file_type=%s " \
+ " index_content= %s, file_type=%s, " \
+ " write_date = now(), write_uid = %s " \
" WHERE id = %s",
- (out, len(data), icont_u, mime, par.file_id))
+ (out, len(data), icont_u, mime, par.context.uid, par.file_id))
elif self.mode == 'a':
data = self.getvalue()
out = psycopg2.Binary(data)
cr.execute("UPDATE ir_attachment " \
"SET db_datas = COALESCE(db_datas,'') || %s, " \
- " file_size = COALESCE(file_size, 0) + %s " \
+ " file_size = COALESCE(file_size, 0) + %s, " \
+ " write_date = now(), write_uid = %s " \
" WHERE id = %s",
- (out, len(data), par.file_id))
+ (out, len(data), par.context.uid, par.file_id))
cr.commit()
except Exception:
logging.getLogger('document.storage').exception('Cannot update db file #%d for close:', par.file_id)
@@ -306,18 +310,20 @@
icont_u = ''
cr.execute('UPDATE ir_attachment SET db_datas = %s::bytea, file_size=%s, ' \
- 'index_content = %s, file_type = %s ' \
+ 'index_content = %s, file_type = %s, ' \
+ ' write_date = now(), write_uid = %s ' \
'WHERE id = %s',
- (base64.encodestring(data), len(data), icont_u, mime, par.file_id))
+ (base64.encodestring(data), len(data), icont_u, mime, par.context.uid, par.file_id))
elif self.mode == 'a':
data = self.getvalue()
# Yes, we're obviously using the wrong representation for storing our
# data as base64-in-bytea
cr.execute("UPDATE ir_attachment " \
"SET db_datas = encode( (COALESCE(decode(encode(db_datas,'escape'),'base64'),'') || decode(%s, 'base64')),'base64')::bytea , " \
- " file_size = COALESCE(file_size, 0) + %s " \
+ " file_size = COALESCE(file_size, 0) + %s, " \
+ " write_date = now(), write_uid = %s " \
" WHERE id = %s",
- (base64.encodestring(data), len(data), par.file_id))
+ (base64.encodestring(data), len(data), par.context.uid, par.file_id))
cr.commit()
except Exception:
logging.getLogger('document.storage').exception('Cannot update db file #%d for close:', par.file_id)
@@ -627,8 +633,8 @@
# a hack: /assume/ that the calling write operation will not try
# to write the fname and size, and update them in the db concurrently.
# We cannot use a write() here, because we are already in one.
- cr.execute('UPDATE ir_attachment SET store_fname = %s, file_size = %s, index_content = %s, file_type = %s WHERE id = %s',
- (store_fname, filesize, icont_u, mime, file_node.file_id))
+ cr.execute('UPDATE ir_attachment SET store_fname = %s, file_size = %s, index_content = %s, file_type = %s, write_date = now(), write_uid = %s WHERE id = %s',
+ (store_fname, filesize, icont_u, mime, uid, file_node.file_id))
file_node.content_length = filesize
file_node.content_type = mime
return True
=== modified file 'document/nodes.py'
--- document/nodes.py 2011-01-14 09:34:28 +0000
+++ document/nodes.py 2011-04-04 12:41:11 +0000
@@ -57,6 +57,17 @@
cre = cre[:fdot]
return time.mktime(time.strptime(cre,'%Y-%m-%d %H:%M:%S')) + frac
+def filter_res_name(name):
+ """ Filter forbidden chars from a resource name to node names
+
+ Resource names (eg. project.project.name) could contain any chars,
+ some of them being illegal for filesystem representation.
+ This is the central point to do this kind of filtering.
+ Remember that the substitution char must be '_', because in SQL ILIKE
+ it will match back the original (single) char.
+ """
+ return name.replace('/','_') # any other weird char?
+
def get_node_context(cr, uid, context):
return node_context(cr, uid, context)
@@ -866,7 +877,7 @@
# Yes! we can't do better but skip nameless records.
# Escape the name for characters not supported in filenames
- res_name = res_name.replace('/','_') # any other weird char?
+ res_name = filter_res_name(res_name)
if name and (res_name != name):
# we have matched _ to any character, but we only meant to match
@@ -1043,7 +1054,7 @@
res_name = getattr(bo, namefield)
if not res_name:
continue
- res_name = res_name.replace('/', '_')
+ res_name = filter_res_name(res_name)
if name and (res_name != name):
continue
# TODO Revise
@@ -1177,6 +1188,11 @@
self.write_date = fil.write_date or fil.create_date
self.content_length = fil.file_size
self.displayname = fil.name
+ self.res_vector = None
+ if fil.res_model:
+ self.res_vector = (fil.res_model, fil.res_id)
+ elif fil.partner_id:
+ self.res_vector = ('res.partner', fil.partner_id.id)
self.uidperms = 14
if parent:
@@ -1251,7 +1267,8 @@
dirpath = []
if fbro.parent_id:
dirobj = self.context._dirobj.pool.get('document.directory')
- dirpath = dirobj.get_full_path(cr, uid, fbro.parent_id.id, context=self.context.context)
+ dirpath = dirobj.get_full_path(cr, uid, fbro.parent_id.id,
+ res_vector=self.res_vector, context=self.context.context)
if fbro.datas_fname:
dirpath.append(fbro.datas_fname)
else:
@@ -1280,9 +1297,6 @@
def get_data_len(self, cr, fil_obj = None):
# TODO: verify with the storage object!
- bin_size = self.context.context.get('bin_size', False)
- if bin_size and not self.content_length:
- self.content_length = fil_obj.db_datas
return self.content_length
def set_data(self, cr, data, fil_obj = None):
=== added file 'document/test/document_test3.yml'
--- document/test/document_test3.yml 1970-01-01 00:00:00 +0000
+++ document/test/document_test3.yml 2011-04-04 12:41:11 +0000
@@ -0,0 +1,115 @@
+-
+ I will now test the realstore functionality of DMS
+-
+ !assert {model: document.storage, id: storage_default }:
+ - id != False
+-
+ I create a realstore folder, with some arbitrary storage path
+-
+ !python {model: document.storage}: |
+ import tempfile
+ tdir = tempfile.mkdtemp()
+ print "I will be storing at %s" % tdir
+ context['tests_doc_tmpdir'] = tdir
+-
+ !record {model: document.storage, id: test_realstore_id }:
+ name: Realstore testing
+ type: realstore
+-
+ !python {model: document.storage }: |
+ id = ref('test_realstore_id')
+ self.write(cr, uid, [id,], {'path': context['tests_doc_tmpdir']})
+-
+ I create a "Testing Realstore" folder where all the test data will go.
+-
+ !record {model: document.directory, id: dir_tests_realstore }:
+ name: 'Testing Realstore'
+ parent_id: dir_root
+ storage_id: test_realstore_id
+-
+ I create an attachment into the realstore
+-
+ !record {model: ir.attachment, id: file_test_rs1 }:
+ name: Test file.txt
+ parent_id: dir_tests_realstore
+-
+ I delete the attachment from the root folder
+-
+ !python {model: ir.attachment}: |
+ self.unlink(cr, uid, [ref('file_test_rs1')])
+-
+ I create a second attachment into the Testing folder.
+-
+ !record {model: ir.attachment, id: file_test_rs2 }:
+ name: Test file 2
+ parent_id: dir_tests_realstore
+-
+ I update the attachment with data, namely "abcd"
+-
+ !record {model: ir.attachment, id: file_test_rs2 }:
+ datas: "YWJjZA==\n"
+-
+ I test that the datas of the attachment are correct
+-
+ !assert {model: ir.attachment, id: file_test_rs2 }:
+ - datas == "YWJjZA==\n"
+ - file_size == 4
+ - file_type == 'text/plain'
+-
+ I open the real file and check the data
+-
+ !python {model: ir.attachment}: |
+ import os
+ rpath = os.path.join(context['tests_doc_tmpdir'], 'Documents', 'Testing Realstore', 'Test file 2')
+ assert os.path.exists(rpath), "Cannot find %s!" % rpath
+ print "Found path:", rpath
+-
+ I now check for Realstore & Dynamic folders
+-
+ I create a dynamic folder for companies
+-
+ !record {model: document.directory, id: test_dynfolder_1 }:
+ name: Companies
+ parent_id: dir_tests_realstore
+ type: ressource
+ ressource_type_id: base.model_res_company
+ resource_find_all: False
+ company_id: False
+-
+ I attach one document for the dynamic folder of companies
+-
+ !record {model: ir.attachment, id: file_test_rs3 }:
+ name: Test file 3
+ parent_id: test_dynfolder_1
+ datas: "YWJjZA==\n"
+ res_model: res.company
+ res_id: !eval ref('base.main_company')
+-
+ I open the real dynamic file and check the data
+-
+ !python {model: ir.attachment}: |
+ import os
+ comp_obj = self.pool.get('res.company')
+ comp_name = comp_obj.browse(cr, uid, ref('base.main_company')).name
+ rpath = os.path.join(context['tests_doc_tmpdir'], 'Documents', 'Testing Realstore', \
+ 'Companies', comp_name, 'Test file 3')
+ assert os.path.exists(rpath), "Cannot find %s!" % rpath
+ print "Found path:", rpath
+-
+ I delete the attachments
+-
+ !python {model: ir.attachment}: |
+ self.unlink(cr, uid, [ref('file_test_rs2')])
+ self.unlink(cr, uid, [ref('file_test_rs3')])
+-
+ I delete the tests folder
+-
+ !python {model: document.directory}: |
+ self.unlink(cr, uid, [ref('dir_tests_realstore'), ref('test_dynfolder_1')])
+ cr.commit()
+-
+ I delete the realstore
+-
+ !python {model: document.storage}: |
+ self.unlink(cr, uid, [ref('test_realstore_id')])
+ cr.commit()
=== modified file 'document_ftp/ftpserver/abstracted_fs.py'
--- document_ftp/ftpserver/abstracted_fs.py 2011-01-14 09:34:28 +0000
+++ document_ftp/ftpserver/abstracted_fs.py 2011-04-04 12:41:11 +0000
@@ -1,6 +1,5 @@
# -*- encoding: utf-8 -*-
-import os
import time
from tarfile import filemode
import logging
@@ -33,6 +32,42 @@
from ftpserver import _to_decode, _to_unicode
+class ftp_path(object):
+ """Util functions for ftp (Unix) paths, instead of os.path
+
+ os.path will behave differently according to platform. For FTP paths
+ we always want the Unix behavior
+ """
+
+ @staticmethod
+ def join(*pathelems):
+ return '/'.join(pathelems)
+
+ @staticmethod
+ def isabs(path):
+ return path.startswith('/')
+
+ @staticmethod
+ def split(path):
+ return path.rsplit('/',1)
+
+ @staticmethod
+ def normpath(path):
+ if '//' not in path and '..' not in path and './' not in path:
+ return path
+
+ pathelems = path.split('/')
+ res = []
+ for p in pathelems:
+ if len(res) and not p:
+ continue
+ if p == '.':
+ continue
+ if p == '..' and len(res):
+ res.pop()
+ continue
+ res.append(p)
+ return '/'.join(res)
class abstracted_fs(object):
"""A class used to interact with the file system, providing a high
@@ -92,15 +127,10 @@
Pathname returned is relative!.
"""
- p = os.path.normpath(ftppath)
+ p = ftp_path.normpath(ftppath)
# normalize string in a standard web-path notation having '/'
# as separator. xrg: is that really in the spec?
p = p.replace("\\", "/")
- # os.path.normpath supports UNC paths (e.g. "//a/b/c") but we
- # don't need them. In case we get an UNC path we collapse
- # redundant separators appearing at the beginning of the string
- while p[:2] == '//':
- p = p[1:]
if p == '.':
return ''
return p
@@ -119,7 +149,7 @@
if node:
paths = node.full_path()
res = '/' + node.context.dbname + '/' + \
- _to_decode(os.path.join(*paths))
+ _to_decode(ftp_path.join(*paths))
return res
@@ -188,6 +218,7 @@
raise NotImplementedError # TODO
text = not 'b' in mode
+ node = None # for pyflakes
# for unique file , maintain version if duplicate file
if dir:
cr = dir.cr
@@ -255,16 +286,18 @@
"""
path = self.ftpnorm(line)
if self.cwd_node is None:
- if not os.path.isabs(path):
- path = os.path.join(self.root, path)
+ if not path:
+ path = self.root or '/'
+ elif not ftp_path.isabs(path):
+ path = ftp_path.join(self.root, path)
if path == '/' and mode in ('list', 'cwd'):
return (None, None, None )
- path = _to_unicode(os.path.normpath(path)) # again, for '/db/../ss'
+ path = _to_unicode(ftp_path.normpath(path)) # again, for '/db/../ss'
if path == '.': path = ''
- if os.path.isabs(path) and self.cwd_node is not None \
+ if ftp_path.isabs(path) and self.cwd_node is not None \
and path.startswith(self.cwd):
# make relative, so that cwd_node is used again
path = path[len(self.cwd):]
@@ -273,18 +306,19 @@
p_parts = path.split('/') # hard-code the unix sep here, by spec.
- assert '..' not in p_parts
rem_path = None
if mode in ('create',):
rem_path = p_parts[-1]
p_parts = p_parts[:-1]
+ assert rem_path != '..' # certainly invalid
- if os.path.isabs(path):
+ if ftp_path.isabs(path):
# we have to start from root, again
while p_parts and p_parts[0] == '':
p_parts = p_parts[1:]
# self._log.debug("Path parts: %r ", p_parts)
+ assert '..' not in p_parts
if not p_parts:
raise IOError(errno.EPERM, 'Cannot perform operation at root dir')
dbname = p_parts[0]
@@ -311,10 +345,32 @@
if p_parts and p_parts[-1] == '':
p_parts = p_parts[:-1]
cr, uid = self.get_node_cr_uid(self.cwd_node)
+ start_node = self.cwd_node
+ while p_parts and p_parts[0] == '..':
+ if start_node.parent:
+ p_parts = p_parts[1:]
+ if isinstance(start_node.path, (list, tuple)):
+ # node.parent is NOT a direct parent!
+ inm_path = list(start_node.path[:-1])
+ while p_parts and inm_path and p_parts[0] == '..':
+ inm_path = inm_path[:-1]
+ p_parts = p_parts[1:]
+ if inm_path:
+ p_parts = inm_path + p_parts
+ start_node = start_node.parent
+ else:
+ # node has no (known) parent
+ if len(p_parts) > 1:
+ raise IOError(errno.ENOENT, 'Path does not exist')
+ elif mode in ('list', 'cwd'):
+ return (None, None, None )
+ else:
+ raise IOError(errno.ENOENT, 'Invalid path for %s operation' % mode)
+ assert '..' not in p_parts
if p_parts:
- node = self.cwd_node.get_uri(cr, p_parts)
+ node = start_node.get_uri(cr, p_parts)
else:
- node = self.cwd_node
+ node = start_node
if node is False and mode not in ('???'):
cr.close()
raise IOError(errno.ENOENT, 'Path does not exist')
@@ -506,7 +562,7 @@
if not glob.has_magic(ftppath):
return self.get_list_dir(self.ftp2fs(rawline, datacr))
else:
- basedir, basename = os.path.split(ftppath)
+ basedir, basename = ftp_path.split(ftppath)
if glob.has_magic(basedir):
return iter(['Directory recursion not supported.\r\n'])
else:
=== modified file 'document_ftp/test/document_ftp_test2.yml'
--- document_ftp/test/document_ftp_test2.yml 2011-01-14 00:11:01 +0000
+++ document_ftp/test/document_ftp_test2.yml 2011-04-04 12:41:11 +0000
@@ -222,6 +222,26 @@
ftp.close()
# TODO move
-
+ I check the functionality of cd ".." command
+-
+ !python {model: ir.attachment}: |
+ from document_ftp import test_easyftp as te
+ ftp = te.get_ftp_folder(cr, uid, self, 'Documents/Test-Folder2')
+ pwd = ftp.pwd().rsplit('/',1)[-1]
+ assert pwd == 'Test-Folder2', pwd
+ try:
+ ftp.cwd('../Test-Folder3')
+ except Exception, e:
+ raise AssertionError("FTP error: " + str(e))
+ pwd = ftp.pwd().rsplit('/',1)[-1]
+ assert pwd == 'Test-Folder3', pwd
+ try:
+ ftp.cwd('..')
+ except Exception, e:
+ raise AssertionError("FTP error: " + str(e))
+ pwd = ftp.pwd().rsplit('/',1)[-1]
+ assert pwd == 'Documents', pwd
+-
I remove the 'Test-Folder3'
-
!python {model: ir.attachment}: |
Follow ups