mirror of
https://github.com/bringout/oca-ocb-core.git
synced 2026-04-20 13:32:04 +02:00
19.0 vanilla
This commit is contained in:
parent
0a7ae8db93
commit
991d2234ca
416 changed files with 646602 additions and 300844 deletions
|
|
@ -0,0 +1,47 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import typing
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from odoo.cli.upgrade_code import FileManager
|
||||
|
||||
|
||||
def upgrade(file_manager: FileManager):
|
||||
log = logging.getLogger(__name__)
|
||||
sql_expression_re = re.compile(r"\b_sql_constraints\s*=\s*\[([^\]]+)]")
|
||||
ind = ' ' * 4
|
||||
|
||||
def build_sql_object(match):
|
||||
# get the tuple of expressions
|
||||
try:
|
||||
constraints = ast.literal_eval('[' + match.group(1) + ']')
|
||||
except SyntaxError:
|
||||
# skip if we cannot match
|
||||
return match.group(0)
|
||||
result = []
|
||||
for name, definition, *messages in constraints:
|
||||
message = messages[0] if messages else ''
|
||||
constructor = 'Constraint'
|
||||
if message:
|
||||
# format on 2 lines
|
||||
message_repr = json.dumps(message) # so that the message is in double quotes
|
||||
args = f"\n{ind * 2}{definition!r},\n{ind * 2}{message_repr},\n{ind}"
|
||||
elif len(definition) > 60:
|
||||
args = f"\n{ind * 2}{definition!r}"
|
||||
else:
|
||||
args = repr(definition)
|
||||
result.append(f"_{name} = models.{constructor}({args})")
|
||||
return f"\n{ind}".join(result)
|
||||
|
||||
for file in file_manager:
|
||||
if file.path.suffix != '.py':
|
||||
continue
|
||||
content = file.content
|
||||
content = sql_expression_re.sub(build_sql_object, content)
|
||||
if sql_expression_re.search(content):
|
||||
log.warning("Failed to replace in file %s", file.path)
|
||||
file.content = content
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
def upgrade(file_manager):
|
||||
files = [
|
||||
f for f in file_manager
|
||||
if 'controllers' in f.path.parts
|
||||
if f.path.suffix == '.py'
|
||||
]
|
||||
|
||||
for fileno, file in enumerate(files):
|
||||
file.content = file.content.replace(
|
||||
'type="json",', 'type="jsonrpc",'
|
||||
).replace(
|
||||
"type='json',", "type='jsonrpc',"
|
||||
)
|
||||
file_manager.print_progress(fileno, len(files))
|
||||
|
|
@ -0,0 +1,185 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import difflib
|
||||
import glob
|
||||
import re
|
||||
import typing
|
||||
from collections import defaultdict
|
||||
from io import StringIO
|
||||
|
||||
from lxml.builder import E
|
||||
import lxml.etree as etree
|
||||
import polib
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from odoo.cli.upgrade_code import FileManager
|
||||
|
||||
|
||||
MODELS = {
|
||||
'account.account.tag': ['name'],
|
||||
'account.cash.rounding': ['name'],
|
||||
'account.disallowed.expenses.category': ['name'],
|
||||
'account.incoterms': ['name'],
|
||||
'account.journal': ['name'],
|
||||
'account.payment.method': ['name'],
|
||||
'account.report': ['name'],
|
||||
'account.report.column': ['name'],
|
||||
'account.report.line': ['name'],
|
||||
'account.tax.group': ['name'],
|
||||
'account.tax.report': ['name'],
|
||||
'hr.contract.salary.benefit': ['name', 'description', 'fold_label'],
|
||||
'hr.contract.salary.benefit.value': ['name'],
|
||||
'hr.contract.salary.personal.info': ['name', 'helper', 'value', 'placeholder'],
|
||||
'hr.leave.type': ['name'],
|
||||
'hr.payroll.dashboard.warning': ['name'],
|
||||
'hr.payroll.structure': ['payslip_name'],
|
||||
'hr.salary.rule': ['name'],
|
||||
'hr.salary.rule.category': ['name'],
|
||||
'hr.work.entry.type': ['name'],
|
||||
'l10n_br.operation.type': ['name'],
|
||||
'l10n_eg_edi.activity.type': ['name'],
|
||||
'l10n_eg_edi.uom.code': ['name'],
|
||||
'l10n_es_edi_facturae.ac_role_type': ['name'],
|
||||
'l10n_it.document.type': ['name'],
|
||||
'l10n_latam.document.type': ['name'],
|
||||
'l10n_latam.identification.type': ['name'],
|
||||
'l10n_mx_edi.res.locality': ['name'],
|
||||
'l10n_pe.res.city.district': ['name'],
|
||||
'l10n_ro_saft.account.asset.category': ['description'],
|
||||
'l10n_ro_saft.tax.type': ['description'],
|
||||
'product.template': ['name'],
|
||||
'res.city': ['name'],
|
||||
'res.country.state': ['name'],
|
||||
'res.currency': ['l10n_cl_short_name', 'l10n_cl_currency_code'],
|
||||
'res.partner.category': ['name'],
|
||||
'res.partner.title': ['name', 'shortcut'],
|
||||
}
|
||||
|
||||
def parse_xmlid(xmlid: str, default_module: str) -> tuple[str, str]:
|
||||
split_id = xmlid.split('.', maxsplit=1)
|
||||
if len(split_id) == 1:
|
||||
return default_module, split_id[0]
|
||||
return split_id[0], split_id[1]
|
||||
|
||||
|
||||
def data_file_module_name(f):
|
||||
return f.path.parts[f.path.parts.index('data') - 1]
|
||||
|
||||
|
||||
def upgrade(file_manager: FileManager):
|
||||
translation_files = [
|
||||
f for f in file_manager
|
||||
if f.path.suffix in ('.po', '.pot')
|
||||
and f.path.parts[-3].startswith('l10n_')
|
||||
and f.path.parts[-2] == 'i18n'
|
||||
]
|
||||
nb_translation_files = len(translation_files)
|
||||
data_files = [
|
||||
f for f in file_manager
|
||||
if f.path.suffix in ('.xml', '.csv')
|
||||
and 'data' in f.path.parts
|
||||
and data_file_module_name(f).startswith('l10n_')
|
||||
]
|
||||
nb_data_files = len(data_files)
|
||||
|
||||
translations = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))) # {module: {model: {xmlid: {fname: {lang: msgstr}}}}}
|
||||
for i, file in enumerate(translation_files):
|
||||
file_manager.print_progress(i, nb_translation_files + nb_data_files, file.path)
|
||||
module_name = file.path.parts[-3]
|
||||
lang = file.path.stem
|
||||
pofile = polib.pofile(str(file.path))
|
||||
original_pofile = polib.pofile(str(file.path))
|
||||
for entry in pofile:
|
||||
if file.path.suffix == '.po':
|
||||
for occurence in entry.occurrences:
|
||||
if occurence[0].startswith('model:') or occurence[0].startswith('model_terms:'):
|
||||
xmlid = occurence[0].split(':')[2]
|
||||
model, fname = occurence[0].split(':')[1].split(',')
|
||||
if model in MODELS and fname in MODELS[model]:
|
||||
translations[module_name][model][xmlid][fname][lang] = entry.msgstr
|
||||
entry.occurrences = [
|
||||
occurence
|
||||
for occurence in entry.occurrences
|
||||
if not any(
|
||||
occurence[0].startswith(f'model:{model},{fname}')
|
||||
or occurence[0].startswith(f'model_terms:{model},{fname}')
|
||||
for model in MODELS
|
||||
for fname in MODELS[model]
|
||||
)
|
||||
]
|
||||
if not entry.occurrences:
|
||||
entry.obsolete = True
|
||||
|
||||
for entry in pofile.obsolete_entries():
|
||||
pofile.remove(entry)
|
||||
if pofile != original_pofile:
|
||||
file.content = str(pofile)
|
||||
|
||||
for i, file in enumerate(data_files):
|
||||
file_manager.print_progress(nb_translation_files + i, nb_translation_files + nb_data_files, file.path)
|
||||
module_name = data_file_module_name(file)
|
||||
if file.path.suffix == '.xml':
|
||||
tree = etree.parse(str(file.path))
|
||||
for record_node in tree.xpath(f"""//record[{' or '.join(f"@model='{m}'" for m in MODELS)}]"""):
|
||||
model = record_node.attrib['model']
|
||||
xmlid = '.'.join(parse_xmlid(record_node.attrib['id'], module_name))
|
||||
for fname in MODELS[model]:
|
||||
base_node = record_node.find(f"field[@name='{fname}']")
|
||||
if base_node is not None:
|
||||
default_tail = base_node.getparent().getchildren()[0].tail
|
||||
translated_node = None
|
||||
for lang, translated in translations[module_name][model][xmlid][fname].items():
|
||||
if translated and record_node.find(f"field[@name='{fname}@{lang}']") is None:
|
||||
translated_node = E('field', translated, name=f'{fname}@{lang}')
|
||||
translated_node.tail = default_tail
|
||||
base_node.addnext(translated_node)
|
||||
if translated_node is not None:
|
||||
base_node.tail = default_tail
|
||||
|
||||
file.content = ''.join(
|
||||
diff[2:]
|
||||
for diff in difflib.ndiff(
|
||||
file.content.splitlines(keepends=True),
|
||||
etree.tostring(tree, encoding="utf-8").decode().splitlines(keepends=True),
|
||||
)
|
||||
# avoid any diff generated by lxml and only keep diff for the lines added
|
||||
if diff.startswith((' ', '-'))
|
||||
or re.match(r"""\+\s*<field name="\w+@""", diff)
|
||||
)
|
||||
elif file.path.suffix == '.csv':
|
||||
model = file.path.stem
|
||||
csv_file = csv.DictReader(file.content.splitlines())
|
||||
csv_data = list(csv_file)
|
||||
first_row = csv_data[0]
|
||||
first_xmlid = '.'.join(parse_xmlid(first_row['id'], module_name))
|
||||
fnames = model in MODELS and sorted(set(first_row.keys()) & set(MODELS[model]))
|
||||
if fnames:
|
||||
langs = sorted({
|
||||
lang
|
||||
for fname in fnames
|
||||
for lang, val in translations[module_name][model][first_xmlid][fname].items()
|
||||
if val
|
||||
})
|
||||
if langs:
|
||||
buffer = StringIO()
|
||||
writer = csv.DictWriter(
|
||||
buffer,
|
||||
fieldnames=csv_file.fieldnames + [
|
||||
f'{fname}@{lang}'
|
||||
for lang in langs
|
||||
for fname in fnames
|
||||
if f'{fname}@{lang}' not in csv_file.fieldnames
|
||||
],
|
||||
delimiter=',',
|
||||
quotechar='"',
|
||||
quoting=csv.QUOTE_ALL
|
||||
)
|
||||
writer.writeheader()
|
||||
for row in csv_data:
|
||||
xmlid = '.'.join(parse_xmlid(row['id'], module_name))
|
||||
for lang in langs:
|
||||
for fname in fnames:
|
||||
row[f'{fname}@{lang}'] = translations[module_name][model][xmlid][fname].get(lang, "")
|
||||
writer.writerow(row)
|
||||
file.content = buffer.getvalue()
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,18 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from odoo.cli.upgrade_code import FileManager
|
||||
|
||||
|
||||
def upgrade(file_manager: FileManager):
|
||||
model_properties_re = re.compile(r"\._(cr|uid|context)\b")
|
||||
|
||||
for file in file_manager:
|
||||
if file.path.suffix != '.py':
|
||||
continue
|
||||
content = file.content
|
||||
content = model_properties_re.sub(r'.env.\1', content)
|
||||
file.content = content
|
||||
|
|
@ -0,0 +1,274 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import contextlib
|
||||
import datetime
|
||||
import functools
|
||||
import io
|
||||
import logging
|
||||
import re
|
||||
import typing
|
||||
|
||||
from lxml import etree
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from odoo.cli.upgrade_code import FileManager
|
||||
|
||||
|
||||
class NoChange(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvertUnaryTransformer(ast.NodeTransformer):
|
||||
"""Inline constant value "-X" which is a unary operator into the constant value."""
|
||||
def visit_UnaryOp(self, node: ast.UnaryOp):
|
||||
if isinstance(node.op, ast.USub) and isinstance(value := node.operand, ast.Constant):
|
||||
value.value = -value.value
|
||||
return value
|
||||
return node
|
||||
|
||||
|
||||
class UpgradeDomainTransformer(ast.NodeTransformer):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.log = None
|
||||
self._invert_transformer = InvertUnaryTransformer()
|
||||
|
||||
def transform(self, domain):
|
||||
self.log = None
|
||||
node = ast.parse(domain.strip(), mode='eval')
|
||||
node = self._invert_transformer.visit(node)
|
||||
result = self.visit(node)
|
||||
if self.log:
|
||||
raise NoChange(*self.log)
|
||||
elif self.log is None:
|
||||
raise NoChange()
|
||||
return ast.unparse(result)
|
||||
|
||||
def _cannot_parse(self, node, msg):
|
||||
if not self.log:
|
||||
self.log = []
|
||||
self.log.append(msg + ' ' + ast.unparse(node))
|
||||
return node
|
||||
|
||||
def visit_List(self, node: ast.List):
|
||||
# same implementation as for tuples
|
||||
return self.visit_Tuple(node)
|
||||
|
||||
def visit_Tuple(self, node):
|
||||
if len(node.elts) != 3 or not isinstance(node.elts[0], ast.Constant):
|
||||
return self.generic_visit(node)
|
||||
value_node = node.elts[2]
|
||||
if isinstance(value_node, (ast.Tuple, ast.List)):
|
||||
# convert values one by one
|
||||
value_node.elts = [
|
||||
self.visit_Tuple(ast.Tuple([ast.Constant('x'), ast.Constant('='), el])).elts[2]
|
||||
for el in value_node.elts
|
||||
]
|
||||
return node
|
||||
value = self.visit(value_node)
|
||||
if isinstance(value, str):
|
||||
# remove now
|
||||
value = value.removeprefix('now ')
|
||||
# remove today (if possible)
|
||||
if value.startswith('today ') and re.search(r'=\d+[dmy]|=[a-z]', value):
|
||||
value = value.removeprefix('today ')
|
||||
# update the operator?
|
||||
if '!' in value:
|
||||
value = value.replace('!', '')
|
||||
operator = node.elts[1].value
|
||||
if operator == '>':
|
||||
operator += '='
|
||||
elif operator == '<=':
|
||||
operator = operator[:-1]
|
||||
else:
|
||||
return self._cannot_parse(node)
|
||||
node.elts[1].value = operator
|
||||
node.elts[2] = ast.Constant(value)
|
||||
if not self.log:
|
||||
self.log = []
|
||||
return node
|
||||
|
||||
@staticmethod
|
||||
def parse_offset_keywords(kws: list[ast.keyword]):
|
||||
values = {kw.arg: kw.value.value for kw in kws if isinstance(kw.value, ast.Constant)}
|
||||
if len(values) != len(kws):
|
||||
return None
|
||||
result = ""
|
||||
|
||||
def build(value, suffix, eq=False):
|
||||
nonlocal result
|
||||
if eq:
|
||||
sign = '='
|
||||
elif value < 0:
|
||||
sign = '-'
|
||||
value = -value
|
||||
else:
|
||||
sign = '+'
|
||||
result += f" {sign}{value}{suffix}"
|
||||
|
||||
match values:
|
||||
case {'weekday': 0, 'days': days}:
|
||||
values.pop('weekday')
|
||||
result += ' =monday'
|
||||
days -= 1
|
||||
if days:
|
||||
values['days'] = days
|
||||
else:
|
||||
values.pop('days')
|
||||
|
||||
for name, suffix in (
|
||||
('days', 'd'),
|
||||
('months', 'm'),
|
||||
('years', 'y'),
|
||||
('weeks', 'w'),
|
||||
('hours', 'H'),
|
||||
('minutes', 'M'),
|
||||
('seconds', 'S'),
|
||||
):
|
||||
if value := values.pop(name, None):
|
||||
build(value, suffix)
|
||||
for name, suffix in (
|
||||
('day', 'd'),
|
||||
('month', 'm'),
|
||||
('year', 'y'),
|
||||
('hour', 'H'),
|
||||
('minute', 'M'),
|
||||
('second', 'S'),
|
||||
):
|
||||
if value := values.pop(name, None):
|
||||
build(value, suffix, eq=True)
|
||||
if values:
|
||||
# not everything was parsed
|
||||
return None
|
||||
return result
|
||||
|
||||
def visit_Call(self, node: ast.Call):
|
||||
value = None
|
||||
match node.func, node.args, node.keywords:
|
||||
case ast.Name(id='context_today'), [], []:
|
||||
return "now"
|
||||
case ast.Attribute(value=ast.Attribute(value=ast.Name(id='datetime'), attr='datetime'), attr='now'), [], []:
|
||||
return "now"
|
||||
case ast.Attribute(value=value_node, attr='to_utc'), [], []:
|
||||
value = self.visit(value_node)
|
||||
case ast.Attribute(value=value, attr='strftime'), [ast.Constant(value=format)], _:
|
||||
if isinstance(value, ast.Name) and value.id == 'time':
|
||||
# time.strftime is sometimes called directly
|
||||
value = "now"
|
||||
else:
|
||||
value = self.visit(value)
|
||||
if isinstance(value, str):
|
||||
if len(format) <= 10:
|
||||
value = value.replace('now', 'today')
|
||||
if '-01' in format: # some people format the date by setting day to 1
|
||||
value += ' =1d'
|
||||
case ast.Name(id='relativedelta'), [], kws:
|
||||
value = self.parse_offset_keywords(kws)
|
||||
case ast.Attribute(value=ast.Name(id='datetime'), attr='timedelta'), [], kws:
|
||||
value = self.parse_offset_keywords(kws)
|
||||
case (ast.Attribute(value=ast.Name(id='datetime'), attr='timedelta'), [const], []) if isinstance(const, ast.Constant):
|
||||
value = self.parse_offset_keywords([ast.keyword('days', const)])
|
||||
case ast.Attribute(value=ast.Attribute(value=ast.Name(id='datetime'), attr='datetime'), attr='combine'), [value_node, time_node], []:
|
||||
value = self.visit(value_node)
|
||||
time_value = self.visit(time_node)
|
||||
if isinstance(value, str) and isinstance(time_value, datetime.time):
|
||||
if time_value == datetime.time.min:
|
||||
return value.replace('now', 'today')
|
||||
if time_value == datetime.time(23, 59, 59):
|
||||
return value.replace('now', 'today') + " +1d!"
|
||||
return self._cannot_parse(node, "call_combine")
|
||||
case ast.Attribute(value=ast.Name(id='datetime'), attr='time'), args, []:
|
||||
with contextlib.suppress(ValueError):
|
||||
return datetime.time(*(n.value for n in args))
|
||||
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
return self._cannot_parse(node, "call")
|
||||
|
||||
def visit_BinOp(self, node):
|
||||
left = self.visit(node.left)
|
||||
right = self.visit(node.right)
|
||||
if isinstance(left, str) and isinstance(right, str):
|
||||
if isinstance(node.op, ast.Add):
|
||||
return left + right
|
||||
if isinstance(node.op, ast.Sub):
|
||||
right = right.translate(str.maketrans({'+': '-', '-': '+'}))
|
||||
return left + right
|
||||
return self._cannot_parse(node, 'binop')
|
||||
return node
|
||||
|
||||
|
||||
def upgrade(file_manager: FileManager):
|
||||
upgrade_domain = UpgradeDomainTransformer()
|
||||
no_whitespace = functools.partial(re.compile(r'\s', re.MULTILINE).sub, '')
|
||||
for file in file_manager:
|
||||
if not (file.path.parent.name in ('data', 'report', 'views') and file.path.suffix == '.xml'):
|
||||
continue
|
||||
content = file.content
|
||||
# tree = etree.fromstring(content) # does not support declarations
|
||||
try:
|
||||
tree = etree.parse(io.BytesIO(bytes(content, 'utf-8')))
|
||||
except Exception as e: # noqa: BLE001
|
||||
_logger.info("Failed to parse the file %s: %s", file.path, e)
|
||||
continue
|
||||
replacements = {}
|
||||
all_domains = [el.attrib['domain'] for el in tree.findall('.//filter[@domain]')]
|
||||
all_domains.extend(el.text for el in tree.findall(".//field[@name='domain_force']"))
|
||||
all_domains.extend(el.text for el in tree.findall(".//field[@name='domain']"))
|
||||
for domain in all_domains:
|
||||
if not domain:
|
||||
continue
|
||||
try:
|
||||
new_domain = upgrade_domain.transform(domain)
|
||||
replacements[no_whitespace(domain)] = new_domain
|
||||
except NoChange as e:
|
||||
_logger.debug("No change %s", e)
|
||||
except Exception: # noqa: BLE001
|
||||
# check if contains dynamic part
|
||||
level = logging.INFO if re.search(r"%\([a-z0-9\.]+\)[sd]", domain) else logging.WARNING
|
||||
_logger.log(level, "Failed to parse the domain %r", domain)
|
||||
if not replacements:
|
||||
continue
|
||||
|
||||
def replacement_attr(match):
|
||||
value = etree.fromstring(f"<x {match[0]} />").attrib["domain"]
|
||||
domain = replacements.get(no_whitespace(value))
|
||||
if not domain:
|
||||
return match[0]
|
||||
domain = domain.replace('&', '&').replace('<', '<').replace('>', '>')
|
||||
raw_value = repr(domain).strip('"')
|
||||
return f"{match[1]}{raw_value}{match[3]}"
|
||||
|
||||
def replacement_tag(match):
|
||||
value = etree.fromstring(f"<x>{match[2]}</x>").text
|
||||
domain = replacements.get(no_whitespace(value))
|
||||
if not domain:
|
||||
return match[0]
|
||||
domain = domain.replace('&', '&').replace('<', '<').replace('>', '>')
|
||||
return f"{match[1]}{domain}{match[3]}"
|
||||
|
||||
content = re.sub(r'(domain=")(.+?)(")', replacement_attr, content, flags=re.MULTILINE | re.DOTALL)
|
||||
content = re.sub(r'(name="(?:domain|domain_force)"[^>]*>)(.+?)(<)', replacement_tag, content, flags=re.MULTILINE | re.DOTALL)
|
||||
file.content = content
|
||||
|
||||
|
||||
def test(domain, result=''):
|
||||
output = UpgradeDomainTransformer().transform(domain)
|
||||
_logger.debug("%s", output)
|
||||
if result:
|
||||
assert output == result, f"Failed to parse {domain!r}; got {output!r} instead of {result!r}"
|
||||
else:
|
||||
assert output != domain, f"Failed to change {domain!r}"
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
test("[('dt', '>', context_today())]", "[('dt', '>', 'now')]")
|
||||
test("[('dt', '>', context_today() - relativedelta(days=3))]", "[('dt', '>', '-3d')]")
|
||||
test("[('dt', '>', (context_today() + relativedelta(months=-1)).strftime('%Y-%m-%d'))]", "[('dt', '>', 'today -1m')]")
|
||||
test("[('dt', '>=', context_today() - relativedelta(day=1))]", "[('dt', '>=', '=1d')]")
|
||||
test("[('dt', '>', (datetime.datetime.combine(context_today() + relativedelta(days=1,weekday=0), datetime.time(0,0,0)).to_utc()))]", "[('dt', '>', '=monday')]")
|
||||
test("['|', ('start_date', 'in', [context_today().strftime('%Y-%m-01'), (context_today() - relativedelta(months=1)).strftime('%Y-%m-01')]), '&', '&', ('start_date', '>=', (context_today() - relativedelta(months=5)).strftime('%Y-%m-01')), ('end_date', '<', (context_today() + relativedelta(months=3)).strftime('%Y-%m-01')), ('periodicity', '=', 'trimester')]")
|
||||
|
|
@ -0,0 +1,207 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import difflib
|
||||
import logging
|
||||
import re
|
||||
import typing
|
||||
from collections import defaultdict
|
||||
from io import StringIO
|
||||
|
||||
import lxml.etree as etree
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from odoo.cli.upgrade_code import FileManager
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
manual = {
|
||||
'base.ng': {'100': -1},
|
||||
}
|
||||
|
||||
|
||||
def template2country(template):
|
||||
return f"base.{template[:2]}"
|
||||
|
||||
|
||||
def data_file_module_name(f):
|
||||
return f.path.parts[f.path.parts.index('data') - 1]
|
||||
|
||||
|
||||
def tax_grouper(row_iter):
|
||||
current_batch = [next(row_iter)]
|
||||
for row in row_iter:
|
||||
if row['id']:
|
||||
yield current_batch
|
||||
current_batch = [row]
|
||||
else:
|
||||
current_batch.append(row)
|
||||
yield current_batch
|
||||
|
||||
|
||||
def tag_factor(tax_rows):
|
||||
tag2factor = defaultdict(lambda: defaultdict(float))
|
||||
for row in tax_rows:
|
||||
document_type = row['repartition_line_ids/document_type']
|
||||
factor_percent = float(row.get('repartition_line_ids/factor_percent') or 100)
|
||||
if tags := row.get('repartition_line_ids/tag_ids'):
|
||||
for tag in tags.split('||'):
|
||||
tag2factor[document_type][tag] += factor_percent
|
||||
return tag2factor
|
||||
|
||||
|
||||
def test_tag_signs(tag_signs):
|
||||
assert tag_signs['base.be']['03'] == -1, tag_signs['base.be']
|
||||
assert tag_signs['base.be']['49'] == 1, tag_signs['base.be']
|
||||
assert tag_signs['base.be']['54'] == -1, tag_signs['base.be']
|
||||
assert tag_signs['base.be']['62'] == 1, tag_signs['base.be']
|
||||
assert tag_signs['base.be']['64'] == 1, tag_signs['base.be']
|
||||
assert tag_signs['base.be']['81'] == 1, tag_signs['base.be']
|
||||
assert tag_signs['base.be']['85'] == -1, tag_signs['base.be']
|
||||
assert tag_signs['base.it']['4v'] == -1, tag_signs['base.it']
|
||||
|
||||
|
||||
def remove_sign(tag_string, tag_signs, type_tax_use, document_type, tag2factor):
|
||||
tags = []
|
||||
if not tag_string:
|
||||
return tag_string
|
||||
for tag in tag_string.split('||'):
|
||||
tag = tag.strip()
|
||||
if not tag.startswith(('-', '+')):
|
||||
tags.append(tag)
|
||||
continue
|
||||
sign_str, new_tag = tag[0], tag[1:]
|
||||
tags.append(new_tag)
|
||||
|
||||
if type_tax_use not in ('sale', 'purchase'):
|
||||
continue
|
||||
|
||||
report_sign = 1 if sign_str == '+' else -1 # tax_negate
|
||||
if (type_tax_use, document_type) in [('sale', 'invoice'), ('purchase', 'refund')]: # tax_tag_invert
|
||||
report_sign *= -1
|
||||
|
||||
if existing_sign := tag_signs.get(new_tag):
|
||||
if existing_sign not in (report_sign, 'error'):
|
||||
tag_signs[new_tag] = 'error'
|
||||
else:
|
||||
tag_signs[new_tag] = report_sign
|
||||
|
||||
return '||'.join(tags)
|
||||
|
||||
|
||||
def upgrade(file_manager: FileManager):
|
||||
tax_template_files = [
|
||||
f for f in file_manager
|
||||
if f.path.suffix == '.csv'
|
||||
and f.path.parts[-2] == 'template'
|
||||
and f.path.stem.startswith('account.tax-')
|
||||
]
|
||||
nb_template_files = len(tax_template_files)
|
||||
tax_report_files = [
|
||||
f for f in file_manager
|
||||
if f.path.suffix == '.xml'
|
||||
and 'data' in f.path.parts
|
||||
and data_file_module_name(f).startswith('l10n_')
|
||||
]
|
||||
nb_report_files = len(tax_report_files)
|
||||
|
||||
tag_signs = defaultdict(dict)
|
||||
for i, file in enumerate(tax_template_files):
|
||||
file_manager.print_progress(i, nb_template_files + nb_report_files, file.path)
|
||||
country = template2country(file.path.stem.split('-', maxsplit=1)[1])
|
||||
country_tax_signs = tag_signs[country]
|
||||
csv_file = csv.DictReader(file.content.splitlines())
|
||||
csv_data = list(csv_file)
|
||||
if 'repartition_line_ids/document_type' not in csv_data[0]:
|
||||
continue
|
||||
|
||||
group_data = {}
|
||||
for row in csv_data:
|
||||
if row.get('amount_type') == 'group':
|
||||
for xmlid in row['children_tax_ids'].split(","):
|
||||
assert xmlid not in group_data or group_data[xmlid] == row['type_tax_use']
|
||||
group_data[xmlid] = row['type_tax_use']
|
||||
|
||||
buffer = StringIO()
|
||||
writer = csv.DictWriter(
|
||||
buffer,
|
||||
fieldnames=csv_file.fieldnames,
|
||||
delimiter=',',
|
||||
quotechar='"',
|
||||
quoting=csv.QUOTE_ALL,
|
||||
lineterminator='\n',
|
||||
)
|
||||
writer.writeheader()
|
||||
for tax_rows in tax_grouper(iter(csv_data)):
|
||||
type_tax_use = tax_rows[0]['type_tax_use']
|
||||
if type_tax_use == 'none':
|
||||
type_tax_use = group_data.get(tax_rows[0]['id']) or 'none'
|
||||
assert type_tax_use
|
||||
tag2factor = tag_factor(tax_rows)
|
||||
for row in tax_rows:
|
||||
document_type = row['repartition_line_ids/document_type']
|
||||
writer.writerow({
|
||||
fname: (
|
||||
remove_sign(value, country_tax_signs, type_tax_use, document_type, tag2factor[document_type])
|
||||
if fname == 'repartition_line_ids/tag_ids' else
|
||||
value
|
||||
)
|
||||
for fname, value in row.items()
|
||||
})
|
||||
file.content = buffer.getvalue()
|
||||
|
||||
conflicts = {}
|
||||
for country, country_tax_signs in tag_signs.items():
|
||||
if errors := [country for country, sign in country_tax_signs.items() if sign == 'error']:
|
||||
conflicts[country] = errors
|
||||
|
||||
if conflicts:
|
||||
_logger.warning("\n\n\nInconsistent tag signs found:")
|
||||
for country in sorted(conflicts):
|
||||
_logger.warning("%s: %s", country, conflicts[country])
|
||||
|
||||
# test_tag_signs(tag_signs)
|
||||
|
||||
unknowns = defaultdict(list)
|
||||
for i, file in enumerate(tax_report_files):
|
||||
file_manager.print_progress(nb_template_files + i, nb_template_files + nb_report_files, file.path)
|
||||
tree = etree.parse(str(file.path))
|
||||
touch = False
|
||||
for report_node in tree.xpath("//record[@model='account.report']"):
|
||||
country_node = report_node.find("field[@name='country_id']")
|
||||
if country_node is None:
|
||||
continue
|
||||
country_code = country_node.attrib['ref']
|
||||
country_tax_signs = tag_signs[country_code]
|
||||
for expression_node in report_node.findall(".//record[@model='account.report.expression']"):
|
||||
engine_node = expression_node.find("field[@name='engine']")
|
||||
if engine_node.text == 'tax_tags':
|
||||
formula_node = expression_node.find("field[@name='formula']")
|
||||
tag = formula_node.text
|
||||
if manual_sign := manual.get(country_code, {}).get(tag):
|
||||
if manual_sign == -1:
|
||||
touch = True
|
||||
formula_node.text = '-' + formula_node.text
|
||||
elif tag not in country_tax_signs:
|
||||
unknowns[country_code].append(tag)
|
||||
elif country_tax_signs[tag] == -1:
|
||||
touch = True
|
||||
formula_node.text = '-' + formula_node.text
|
||||
if touch:
|
||||
file.content = ''.join(
|
||||
diff[2:]
|
||||
for diff in difflib.ndiff(
|
||||
file.content.splitlines(keepends=True),
|
||||
etree.tostring(tree, encoding="utf-8").decode().splitlines(keepends=True),
|
||||
)
|
||||
# avoid any diff generated by lxml and only keep diff for the lines added
|
||||
if (
|
||||
diff.startswith((' ', '-'))
|
||||
or re.match(r"""\+\s*<field name=["']formula["']""", diff)
|
||||
) and not re.match(r"""-\s*<field name=["']formula["']""", diff)
|
||||
)
|
||||
|
||||
if unknowns:
|
||||
_logger.warning("\n\n\nUnknown tag signs found:")
|
||||
for country in sorted(unknowns):
|
||||
_logger.warning("%s: %s", country, unknowns[country])
|
||||
Loading…
Add table
Add a link
Reference in a new issue