banking-addons-team team mailing list archive
-
banking-addons-team team
-
Mailing list archive
-
Message #00513
lp:~akretion-team/banking-addons/bank-statement-reconcile-61-ref-base-import into lp:banking-addons/bank-statement-reconcile-61
Benoit Guillot - http://www.akretion.com has proposed merging lp:~akretion-team/banking-addons/bank-statement-reconcile-61-ref-base-import into lp:banking-addons/bank-statement-reconcile-61.
Requested reviews:
Banking Addons Team (banking-addons-team)
For more details, see:
https://code.launchpad.net/~akretion-team/banking-addons/bank-statement-reconcile-61-ref-base-import/+merge/159104
In this merge proposal, I submit some changes in the module account_statement_base_import.
- Fix argument keys_to_validate in the init of the file_parser
- add the possibility to force the dialect for the csv dictreader
- fix the field : import_type of the class account_statement_profile, add a hook to add a new type
- remove sapce at the end of lines due to my IDE
- fix typo in the name of the method :prepare_statement_lines_vals
--
https://code.launchpad.net/~akretion-team/banking-addons/bank-statement-reconcile-61-ref-base-import/+merge/159104
Your team Banking Addons Team is requested to review the proposed merge of lp:~akretion-team/banking-addons/bank-statement-reconcile-61-ref-base-import into lp:banking-addons/bank-statement-reconcile-61.
=== modified file 'account_statement_base_import/parser/file_parser.py'
--- account_statement_base_import/parser/file_parser.py 2012-11-26 10:23:58 +0000
+++ account_statement_base_import/parser/file_parser.py 2013-04-16 08:36:25 +0000
@@ -32,13 +32,13 @@
"""
Generic abstract class for defining parser for .csv or .xls file format.
"""
-
- def __init__(self, parse_name, keys_to_validate=[], ftype='csv', convertion_dict=None, header=None, *args, **kwargs):
+
+ def __init__(self, parse_name, keys_to_validate=None, ftype='csv', convertion_dict=None, header=None, dialect=None, *args, **kwargs):
"""
:param char: parse_name : The name of the parser
:param list: keys_to_validate : contain the key that need to be present in the file
:param char ftype: extension of the file (could be csv or xls)
- :param: convertion_dict : keys and type to convert of every column in the file like
+ :param: convertion_dict : keys and type to convert of every column in the file like
{
'ref': unicode,
'label': unicode,
@@ -48,7 +48,7 @@
}
:param list: header : specify header fields if the csv file has no header
"""
-
+
super(FileParser, self).__init__(parse_name, *args, **kwargs)
if ftype in ('csv', 'xls'):
self.ftype = ftype
@@ -60,6 +60,7 @@
self._datemode = 0 # used only for xls documents,
# 0 means Windows mode (1900 based dates).
# Set in _parse_xls, from the contents of the file
+ self.dialect = dialect
def _custom_format(self, *args, **kwargs):
"""
@@ -78,7 +79,7 @@
Launch the parsing through .csv or .xls depending on the
given ftype
"""
-
+
res = None
if self.ftype == 'csv':
res = self._parse_csv()
@@ -94,7 +95,7 @@
We skip the validation step if the file header is provided separately
(in the field: fieldnames).
"""
- if self.fieldnames is None:
+ if self.fieldnames is None and self.keys_to_validate:
parsed_cols = self.result_row_list[0].keys()
for col in self.keys_to_validate:
if col not in parsed_cols:
@@ -116,8 +117,8 @@
csv_file = tempfile.NamedTemporaryFile()
csv_file.write(self.filebuffer)
csv_file.flush()
- with open(csv_file.name, 'rU') as fobj:
- reader = UnicodeDictReader(fobj, fieldnames=self.fieldnames)
+ with open(csv_file.name, 'rU') as fobj:
+ reader = UnicodeDictReader(fobj, fieldnames=self.fieldnames, dialect=self.dialect)
return list(reader)
def _parse_xls(self):
=== modified file 'account_statement_base_import/parser/parser.py'
--- account_statement_base_import/parser/parser.py 2012-11-23 16:27:24 +0000
+++ account_statement_base_import/parser/parser.py 2013-04-16 08:36:25 +0000
@@ -26,10 +26,14 @@
pos = utf8_data.tell()
sample_data = utf8_data.read(1024)
utf8_data.seek(pos)
- dialect = sniffer.sniff(sample_data, delimiters=',;\t')
+ if not kwargs.get('dialect'):
+ dialect = sniffer.sniff(sample_data, delimiters=',;\t')
+ del kwargs['dialect']
+ else:
+ dialect = kwargs.pop('dialect')
csv_reader = csv.DictReader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
- yield dict([(key, unicode(value, 'utf-8')) for key, value in row.iteritems()])
+ yield dict([(unicode(key, 'utf-8'), unicode(value, 'utf-8')) for key, value in row.iteritems()])
class BankStatementImportParser(object):
"""
@@ -38,7 +42,7 @@
own. If your file is a .csv or .xls format, you should consider inheirt
from the FileParser instead.
"""
-
+
def __init__(self, parser_name, *args, **kwargs):
# The name of the parser as it will be called
self.parser_name = parser_name
@@ -50,7 +54,7 @@
# Concatenate here the global commission taken by the bank/office
# for this statement.
self.commission_global_amount = None
-
+
@classmethod
def parser_for(cls, parser_name):
"""
@@ -58,17 +62,17 @@
return the good class from his name.
"""
return False
-
+
def _decode_64b_stream(self):
"""
Decode self.filebuffer in base 64 and override it
"""
self.filebuffer = base64.b64decode(self.filebuffer)
return True
-
+
def _format(self, decode_base_64=True, **kwargs):
"""
- Decode into base 64 if asked and Format the given filebuffer by calling
+ Decode into base 64 if asked and Format the given filebuffer by calling
_custom_format method.
"""
if decode_base_64:
@@ -83,43 +87,43 @@
"""
return NotImplementedError
-
+
def _pre(self, *args, **kwargs):
"""
- Implement a method in your parser to make a pre-treatment on datas before parsing
+ Implement a method in your parser to make a pre-treatment on datas before parsing
them, like concatenate stuff, and so... Work on self.filebuffer
"""
return NotImplementedError
def _parse(self, *args, **kwargs):
"""
- Implement a method in your parser to save the result of parsing self.filebuffer
+ Implement a method in your parser to save the result of parsing self.filebuffer
in self.result_row_list instance property.
"""
return NotImplementedError
-
+
def _validate(self, *args, **kwargs):
"""
Implement a method in your parser to validate the self.result_row_list instance
property and raise an error if not valid.
"""
return NotImplementedError
-
+
def _post(self, *args, **kwargs):
"""
Implement a method in your parser to make some last changes on the result of parsing
- the datas, like converting dates, computing commission, ...
+ the datas, like converting dates, computing commission, ...
Work on self.result_row_list and put the commission global amount if any
in the self.commission_global_amount one.
"""
return NotImplementedError
-
-
-
+
+
+
def get_st_line_vals(self, line, *args, **kwargs):
"""
- Implement a method in your parser that must return a dict of vals that can be
- passed to create method of statement line in order to record it. It is the responsibility
+ Implement a method in your parser that must return a dict of vals that can be
+ passed to create method of statement line in order to record it. It is the responsibility
of every parser to give this dict of vals, so each one can implement his
own way of recording the lines.
:param: line: a dict of vals that represent a line of result_row_list
@@ -133,17 +137,17 @@
}
"""
return NotImplementedError
-
+
def get_st_line_commision(self, *args, **kwargs):
"""
This is called by the importation method to create the commission line in
the bank statement. We will always create one line for the commission in the
- bank statement, but it could be computated from a value of each line, or given
+ bank statement, but it could be computated from a value of each line, or given
in a single line for the whole file.
return: float of the whole commission (self.commission_global_amount)
"""
return self.commission_global_amount
-
+
def parse(self, filebuffer, *args, **kwargs):
"""
This will be the method that will be called by wizard, button and so
@@ -151,7 +155,7 @@
that need to be define for each parser.
Return:
[] of rows as {'key':value}
-
+
Note: The row_list must contain only value that are present in the account.
bank.statement.line object !!!
"""
@@ -165,7 +169,7 @@
self._validate(*args, **kwargs)
self._post(*args, **kwargs)
return self.result_row_list
-
+
def itersubclasses(cls, _seen=None):
"""
itersubclasses(cls)
@@ -179,7 +183,7 @@
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
- >>>
+ >>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
@@ -204,7 +208,7 @@
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
-
+
def new_bank_statement_parser(parser_name, *args, **kwargs):
"""
Return an instance of the good parser class base on the providen name
@@ -215,4 +219,3 @@
if cls.parser_for(parser_name):
return cls(parser_name, *args, **kwargs)
raise ValueError
-
=== modified file 'account_statement_base_import/statement.py'
--- account_statement_base_import/statement.py 2012-09-25 08:05:34 +0000
+++ account_statement_base_import/statement.py 2013-04-16 08:36:25 +0000
@@ -30,33 +30,39 @@
import sys
import traceback
+
class AccountStatementProfil(Model):
_inherit = "account.statement.profile"
-
-
+
+
def get_import_type_selection(self, cr, uid, context=None):
"""
Has to be inherited to add parser
"""
return [('generic_csvxls_so', 'Generic .csv/.xls based on SO Name')]
-
-
+
+ def _get_import_type_selection(self, cr, uid, context=None):
+ """
+ Has to be inherited to add parser
+ """
+ return self.get_import_type_selection(cr, uid, context=context)
+
_columns = {
- 'launch_import_completion': fields.boolean("Launch completion after import",
+ 'launch_import_completion': fields.boolean("Launch completion after import",
help="Tic that box to automatically launch the completion on each imported\
file using this profile."),
'last_import_date': fields.datetime("Last Import Date"),
'rec_log': fields.text('log', readonly=True),
- 'import_type': fields.selection(get_import_type_selection, 'Type of import', required=True,
+ 'import_type': fields.selection(_get_import_type_selection, 'Type of import', required=True,
help = "Choose here the method by which you want to import bank statement for this profile."),
-
+
}
-
+
def write_logs_after_import(self, cr, uid, ids, statement_id, num_lines, context):
"""
- Write the log in the logger + in the log field of the profile to report the user about
+ Write the log in the logger + in the log field of the profile to report the user about
what has been done.
-
+
:param int/long statement_id: ID of the concerned account.bank.statement
:param int/long num_lines: Number of line that have been parsed
:return: True
@@ -71,11 +77,11 @@
+ _("Bank Statement ID %s have been imported with %s lines ") %(statement_id, num_lines)]
log = "\n".join(log_line)
self.write(cr, uid, id, {'rec_log' : log, 'last_import_date':import_date}, context=context)
- logger.notifyChannel('Bank Statement Import', netsvc.LOG_INFO,
+ logger.notifyChannel('Bank Statement Import', netsvc.LOG_INFO,
"Bank Statement ID %s have been imported with %s lines "%(statement_id, num_lines))
return True
-
- def prepare_global_commission_line_vals(self, cr, uid, parser,
+
+ def prepare_global_commission_line_vals(self, cr, uid, parser,
result_row_list, profile, statement_id, context):
"""
Prepare the global commission line if there is one. The global
@@ -110,15 +116,15 @@
'already_completed': True,
}
return comm_values
-
- def prepare_statetement_lines_vals(self, cursor, uid, parser_vals,
+
+ def prepare_statement_lines_vals(self, cursor, uid, parser_vals,
account_payable, account_receivable, statement_id, context):
"""
Hook to build the values of a line from the parser returned values. At
least it fullfill the statement_id and account_id. Overide it to add your
- own completion if needed.
-
- :param dict of vals from parser for account.bank.statement.line (called by
+ own completion if needed.
+
+ :param dict of vals from parser for account.bank.statement.line (called by
parser.get_st_line_vals)
:param int/long account_payable: ID of the receivable account to use
:param int/long account_receivable: ID of the payable account to use
@@ -136,7 +142,7 @@
account_payable
)
return values
-
+
def statement_import(self, cursor, uid, ids, profile_id, file_stream, ftype="csv", context=None):
"""
Create a bank statement with the given profile and parser. It will fullfill the bank statement
@@ -144,7 +150,7 @@
the right account). This will be done in a second step with the completion rules.
It will also create the commission line if it apply and record the providen file as
an attachement of the bank statement.
-
+
:param int/long profile_id: ID of the profile used to import the file
:param filebuffer file_stream: binary of the providen file
:param char: ftype represent the file exstension (csv by default)
@@ -160,7 +166,7 @@
_("No Profile !"),
_("You must provide a valid profile to import a bank statement !"))
prof = prof_obj.browse(cursor,uid,profile_id,context)
-
+
parser = new_bank_statement_parser(prof.import_type, ftype=ftype)
result_row_list = parser.parse(file_stream)
# Check all key are present in account.bank.statement.line !!
@@ -170,15 +176,15 @@
raise osv.except_osv(
_("Missing column !"),
_("Column %s you try to import is not present in the bank statement line !") %(col))
-
- statement_id = statement_obj.create(cursor,uid,{'profile_id':prof.id,},context)
+
+ statement_id = statement_obj.create(cursor,uid,{'profile_id':prof.id,},context)
account_receivable, account_payable = statement_obj.get_default_pay_receiv_accounts(cursor, uid, context)
try:
# Record every line in the bank statement and compute the global commission
# based on the commission_amount column
for line in result_row_list:
parser_vals = parser.get_st_line_vals(line)
- values = self.prepare_statetement_lines_vals(cursor, uid, parser_vals, account_payable,
+ values = self.prepare_statement_lines_vals(cursor, uid, parser_vals, account_payable,
account_receivable, statement_id, context)
# we finally create the line in system
statement_line_obj.create(cursor, uid, values, context=context)
@@ -186,7 +192,7 @@
comm_vals = self.prepare_global_commission_line_vals(cursor, uid, parser, result_row_list, prof, statement_id, context)
if comm_vals:
res = statement_line_obj.create(cursor, uid, comm_vals,context=context)
-
+
attachment_obj.create(
cursor,
uid,
@@ -199,16 +205,17 @@
'res_id': statement_id,
},
context=context
- )
+ )
# If user ask to launch completion at end of import, do it !
if prof.launch_import_completion:
statement_obj.button_auto_completion(cursor, uid, [statement_id], context)
-
+
# Write the needed log infos on profile
self.write_logs_after_import(cursor, uid, prof.id, statement_id,
len(result_row_list), context)
-
+
except Exception, exc:
+ #??? unlink without commit the cursor, usefull?
statement_obj.unlink(cursor, uid, [statement_id])
error_type, error_value, trbk = sys.exc_info()
st = "Error: %s\nDescription: %s\nTraceback:" % (error_type.__name__, error_value)
@@ -217,7 +224,6 @@
_("Statement import error"),
_("The statement cannot be created : %s") %(st))
return statement_id
-
class AccountStatementLine(Model):
@@ -229,7 +235,7 @@
_inherit = "account.bank.statement.line"
_columns={
- 'commission_amount': fields.sparse(type='float', string='Line Commission Amount',
+ 'commission_amount': fields.sparse(type='float', string='Line Commission Amount',
serialization_field='additionnal_bank_fields'),
}
=== modified file 'account_statement_base_import/statement_view.xml'
--- account_statement_base_import/statement_view.xml 2012-08-02 12:46:12 +0000
+++ account_statement_base_import/statement_view.xml 2013-04-16 08:36:25 +0000
@@ -16,7 +16,7 @@
<field name="import_type"/>
<button name="%(account_statement_base_import.statement_importer_action)d"
string="Import Bank Statement"
- type="action" icon="gtk-ok"
+ type="action" icon="gtk-ok"
colspan = "2"/>
<separator colspan="4" string="Import Logs"/>
<field name="rec_log" colspan="4" nolabel="1"/>
@@ -39,6 +39,5 @@
</field>
</record>
-
</data>
</openerp>
=== modified file 'account_statement_base_import/wizard/import_statement.py'
--- account_statement_base_import/wizard/import_statement.py 2012-11-27 10:51:47 +0000
+++ account_statement_base_import/wizard/import_statement.py 2013-04-16 08:36:25 +0000
@@ -29,7 +29,7 @@
class CreditPartnerStatementImporter(osv.osv_memory):
_name = "credit.statement.import"
-
+
def default_get(self, cr, uid, fields, context=None):
if context is None: context = {}
res = {}
@@ -41,7 +41,7 @@
other_vals = self.onchange_profile_id(cr, uid, [], res['profile_id'], context=context)
res.update(other_vals.get('value',{}))
return res
-
+
_columns = {
'profile_id': fields.many2one('account.statement.profile',
'Import configuration parameter',
@@ -63,23 +63,23 @@
),
'receivable_account_id': fields.many2one('account.account',
'Force Receivable/Payable Account'),
- 'force_partner_on_bank': fields.boolean('Force partner on bank move',
+ 'force_partner_on_bank': fields.boolean('Force partner on bank move',
help="Tic that box if you want to use the credit insitute partner\
in the counterpart of the treasury/banking move."
),
- 'balance_check': fields.boolean('Balance check',
+ 'balance_check': fields.boolean('Balance check',
help="Tic that box if you want OpenERP to control the start/end balance\
before confirming a bank statement. If don't ticked, no balance control will be done."
),
- }
-
+ }
+
def onchange_profile_id(self, cr, uid, ids, profile_id, context=None):
res={}
if profile_id:
c = self.pool.get("account.statement.profile").browse(cr,uid,profile_id)
res = {'value': {'partner_id': c.partner_id and c.partner_id.id or False,
'journal_id': c.journal_id and c.journal_id.id or False, 'commission_account_id': \
- c.commission_account_id and c.commission_account_id.id or False,
+ c.commission_account_id and c.commission_account_id.id or False,
'receivable_account_id': c.receivable_account_id and c.receivable_account_id.id or False,
'commission_a':c.commission_analytic_id and c.commission_analytic_id.id or False,
'force_partner_on_bank':c.force_partner_on_bank,
Follow ups