19.0 vanilla

This commit is contained in:
Ernad Husremovic 2025-10-03 18:07:25 +02:00
parent 0a7ae8db93
commit 991d2234ca
416 changed files with 646602 additions and 300844 deletions

View file

@ -1,23 +1,14 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# ruff: noqa: F401
from . import appdirs
from . import arabic_reshaper
from . import cloc
from . import constants
from . import pdf
from . import pycompat
from . import template_inheritance
from . import win32
from . import urls
from .parse_version import parse_version
from .barcode import check_barcode_encoding
from .cache import ormcache, ormcache_context
from .config import config
from .date_utils import *
from .float_utils import *
from .func import *
from .float_utils import float_compare, float_is_zero, float_repr, float_round, float_split, float_split_str
from .func import classproperty, conditional, lazy, lazy_classproperty, reset_cached_properties
from .i18n import format_list, py_to_js_locale
from .image import image_process
from .json import json_default
from .mail import *
from .misc import *
@ -26,7 +17,4 @@ from .sql import *
from .translate import _, html_translate, xml_translate, LazyTranslate
from .xml_utils import cleanup_xml_node, load_xsd_files_from_url, validate_xml_from_attachment
from .convert import convert_csv_import, convert_file, convert_sql_import, convert_xml_import
from . import osutil
from .js_transpiler import transpile_javascript, is_odoo_module, URL_RE, ODOO_MODULE_RE
from .sourcemap_generator import SourceMapGenerator
from .set_expression import SetDefinitions

2
odoo-bringout-oca-ocb-base/odoo/tools/appdirs.py Normal file → Executable file
View file

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor

View file

@ -0,0 +1,2 @@
from .python_extractor import extract_python
from .javascript_extractor import extract_javascript

View file

@ -0,0 +1,285 @@
from __future__ import annotations
import io
from textwrap import dedent
from typing import TYPE_CHECKING
from babel.messages.jslexer import Token, line_re, tokenize, unquote_string
if TYPE_CHECKING:
from collections.abc import Collection, Generator, Mapping
from typing import Protocol, TypeAlias, TypedDict
from _typeshed import SupportsRead, SupportsReadline
"""
Types used by the extractor
"""
# Tuple specifying which of the translation function's arguments contains localizable strings.
# e.g. (1, 2)
# -> Indicates the first and second argument are translatable terms, like in `ngettext`
# e.g. ((1, 'c'), 2)
# -> Indicates the first argument is a context key and the second is the translatable term, like in `pgettext`
# e.g. None
# -> Indicates there is only one argument translatable, like in `gettext`
_SimpleKeyword: TypeAlias = tuple[int | tuple[int, int] | tuple[int, str], ...] | None
# A `_SimpleKeyword` or a `dict` mapping the expected number of function arguments against the `_SimpleKeyword`
_Keyword: TypeAlias = dict[int | None, _SimpleKeyword] | _SimpleKeyword
# The result of extracting terms, a 4-tuple containing:
# (lineno: int, messages: str | tuple[str, ...], comments: list[str], context: str | None)
# - `lineno`: The line number of the extracted term(s)
# - `funcname`: The translation function name
# - `messages`: The extracted term(s). A single one or multiple in case of e.g. `ngettext`
# - `comments`: The extracted translator comments for the term(s)
_ExtractionResult: TypeAlias = tuple[int, str, str | tuple[str, ...], list[str]]
# The file object to pass to the extraction function
class _FileObj(SupportsRead[bytes], SupportsReadline[bytes], Protocol):
def seek(self, offset: int, whence: int = ..., /) -> int: ...
def tell(self) -> int: ...
# The possible options to pass to the extraction function
class _JSOptions(TypedDict, total=False):
encoding: str
jsx: bool
template_string: bool
parse_template_string: bool
def parse_template_string(
template_string: str,
keywords: Mapping[str, _Keyword],
comment_tags: Collection[str],
options: _JSOptions,
lineno: int = 0,
keyword: str = "",
) -> Generator[_ExtractionResult, None, None]:
prev_character = None
level = 0
inside_str = False
expression_contents = ''
for character in template_string[1:-1]:
if not inside_str and character in ('"', "'", '`'):
inside_str = character
elif inside_str == character and prev_character != r'\\':
inside_str = False
if level or keyword:
expression_contents += character
if not inside_str:
if character == '{' and prev_character == '$':
if keyword:
break
level += 1
elif level and character == '}':
level -= 1
if level == 0 and expression_contents:
expression_contents = expression_contents[0:-1]
fake_file_obj = io.BytesIO(expression_contents.encode())
yield from extract_javascript(fake_file_obj, keywords, comment_tags, options, lineno)
lineno += len(line_re.findall(expression_contents))
expression_contents = ''
prev_character = character
if keyword:
yield (lineno, keyword, expression_contents, [])
def extract_javascript(
fileobj: _FileObj,
keywords: Mapping[str, _Keyword],
comment_tags: Collection[str],
options: _JSOptions,
lineno_offset: int = 0,
) -> Generator[_ExtractionResult, None, None]:
"""
Extract all translatable terms from a Javascript source file.
This function is modified from the official Babel extractor to support arbitrarily nested function calls.
:param fileobj: The Javascript source file
:param keywords: The translation keywords mapping
:param comment_tags: The keywords to extract translator comments
:param options: Extractor options for parsing the Javascript file
:yield: Tuples in the following form: `(lineno, funcname, message, comments)`
"""
encoding = options.get('encoding', 'utf-8')
dotted = any('.' in kw for kw in keywords)
# Keep track of the last token we saw.
last_token = None
# Keep the stack of all function calls and its related contextual variables, so we can handle nested gettext calls.
function_stack = []
# Keep track of whether we're in a class or function definition.
in_def = False
# Keep track of whether we're in a block of translator comments.
in_translator_comments = False
# Keep track of the last encountered translator comments.
translator_comments = []
# Keep track of the (split) strings encountered.
message_buffer = []
for token in tokenize(
fileobj.read().decode(encoding),
jsx=options.get('jsx', True),
dotted=dotted,
template_string=options.get('template_string', True),
):
token: Token = Token(token.type, token.value, token.lineno + lineno_offset)
if token.type == 'name' and token.value in ('class', 'function'):
# We're entering a class or function definition.
in_def = True
continue
elif in_def and token.type == 'operator' and token.value in ('(', '{'):
# We're in a class or function definition and should not do anything.
in_def = False
continue
elif (
last_token
and last_token.type == 'name'
and token.type == 'template_string'
):
# Turn keyword`foo` expressions into keyword("foo") function calls.
string_value = unquote_string(token.value)
cur_translator_comments = translator_comments
if function_stack and function_stack[-1]['function_lineno'] == last_token.lineno:
# If our current function call is on the same line as the previous one,
# copy their translator comments, since they also apply to us.
cur_translator_comments = function_stack[-1]['translator_comments']
# We add all information needed later for the current function call.
function_stack.append({
'function_lineno': last_token.lineno,
'function_name': last_token.value,
'message_lineno': token.lineno,
'messages': [string_value],
'translator_comments': cur_translator_comments,
})
translator_comments = []
message_buffer.clear()
# We act as if we are closing the function call now
last_token = token
token = Token('operator', ')', token.lineno)
if (
options.get('parse_template_string', True)
and (not last_token or last_token.type != 'name' or last_token.value not in keywords)
and token.type == 'template_string'
):
keyword = ""
if function_stack and function_stack[-1]['function_name'] in keywords:
keyword = function_stack[-1]['function_name']
yield from parse_template_string(
token.value,
keywords,
comment_tags,
options,
token.lineno,
keyword,
)
elif token.type == 'operator' and token.value == '(':
if last_token and last_token.type == 'name':
# We're entering a function call.
cur_translator_comments = translator_comments
if function_stack and function_stack[-1]['function_lineno'] == last_token.lineno:
# If our current function call is on the same line as the previous one,
# copy their translator comments, since they also apply to us.
cur_translator_comments = function_stack[-1]['translator_comments']
# We add all information needed later for the current function call.
function_stack.append({
'function_lineno': token.lineno,
'function_name': last_token.value,
'message_lineno': None,
'messages': [],
'translator_comments': cur_translator_comments,
})
translator_comments = []
message_buffer.clear()
elif token.type == 'linecomment':
# Strip the comment character from the line.
value = token.value[2:].strip()
if in_translator_comments and translator_comments[-1][0] == token.lineno - 1:
# We're already in a translator comment. Continue appending.
translator_comments.append((token.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
# The comment starts with one of the translator comment keywords, so let's start capturing it.
in_translator_comments = True
translator_comments.append((token.lineno, value))
break
elif token.type == 'multilinecomment':
# Only one multi-line comment may precede a translation.
translator_comments = []
value = token.value[2:-2].strip()
for comment_tag in comment_tags:
if value.startswith(comment_tag):
lines = value.splitlines()
if lines:
lines[0] = lines[0].strip()
lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
for offset, line in enumerate(lines):
translator_comments.append((token.lineno + offset, line))
break
elif function_stack and function_stack[-1]['function_name'] in keywords:
# We're inside a translation function call.
if token.type == 'operator' and token.value == ')':
# The call has ended, so we yield the translatable term(s).
messages = function_stack[-1]['messages']
lineno = function_stack[-1]['message_lineno'] or function_stack[-1]['function_lineno']
cur_translator_comments = function_stack[-1]['translator_comments']
if message_buffer:
messages.append(''.join(message_buffer))
message_buffer.clear()
else:
messages.append(None)
messages = tuple(messages) if len(messages) > 1 else messages[0]
if cur_translator_comments and cur_translator_comments[-1][0] < lineno - 1:
# The translator comments are not immediately preceding the current term, so we skip them.
cur_translator_comments = []
yield (
lineno,
function_stack[-1]['function_name'],
messages,
[comment[1] for comment in cur_translator_comments],
)
function_stack.pop()
elif token.type in ('string', 'template_string'):
# We've encountered a string inside a translation function call.
if last_token.type == 'name':
message_buffer.clear()
else:
string_value = unquote_string(token.value)
if not function_stack[-1]['message_lineno']:
function_stack[-1]['message_lineno'] = token.lineno
if string_value is not None:
message_buffer.append(string_value)
elif token.type == 'operator' and token.value == ',':
# We're at the end of a function call argument.
if message_buffer:
function_stack[-1]['messages'].append(''.join(message_buffer))
message_buffer.clear()
else:
function_stack[-1]['messages'].append(None)
elif function_stack and token.type == 'operator' and token.value == ')':
function_stack.pop()
if in_translator_comments and translator_comments[-1][0] < token.lineno:
# We have a newline between the comment lines, so they don't belong together anymore.
in_translator_comments = False
last_token = token

View file

@ -0,0 +1,224 @@
from __future__ import annotations
import ast
import tokenize
from tokenize import COMMENT, NAME, OP, STRING, generate_tokens
from typing import TYPE_CHECKING
from babel.util import parse_encoding, parse_future_flags
if TYPE_CHECKING:
from collections.abc import Collection, Generator, Mapping
from typing import IO, TypeAlias, TypedDict
"""
Types used by the extractor
"""
# Tuple specifying which of the translation function's arguments contains localizable strings.
# e.g. (1, 2)
# -> Indicates the first and second argument are translatable terms, like in `ngettext`
# e.g. ((1, 'c'), 2)
# -> Indicates the first argument is a context key and the second is the translatable term, like in `pgettext`
# e.g. None
# -> Indicates there is only one argument translatable, like in `gettext`
_SimpleKeyword: TypeAlias = tuple[int | tuple[int, int] | tuple[int, str], ...] | None
# A `_SimpleKeyword` or a `dict` mapping the expected number of function arguments against the `_SimpleKeyword`
_Keyword: TypeAlias = dict[int | None, _SimpleKeyword] | _SimpleKeyword
# The result of extracting terms, a 4-tuple containing:
# (lineno: int, messages: str | tuple[str, ...], comments: list[str], context: str | None)
# - `lineno`: The line number of the extracted term(s)
# - `funcname`: The translation function name
# - `messages`: The extracted term(s). A single one or multiple in case of e.g. `ngettext`
# - `comments`: The extracted translator comments for the term(s)
_ExtractionResult: TypeAlias = tuple[int, str, str | tuple[str, ...], list[str]]
# The possible options to pass to the extraction function
class _PyOptions(TypedDict, total=False):
encoding: str
# New tokens in Python 3.12, or None on older versions
FSTRING_START = tokenize.FSTRING_START if hasattr(tokenize, 'FSTRING_START') else None
FSTRING_MIDDLE = tokenize.FSTRING_MIDDLE if hasattr(tokenize, 'FSTRING_MIDDLE') else None
FSTRING_END = tokenize.FSTRING_END if hasattr(tokenize, 'FSTRING_END') else None
def _parse_python_string(value: str, encoding: str, future_flags: int) -> str | None:
# Unwrap quotes in a safe manner, maintaining the string's encoding
code = compile(
f'# coding={encoding!s}\n{value}',
'<string>',
'eval',
ast.PyCF_ONLY_AST | future_flags,
)
if isinstance(code, ast.Expression):
body = code.body
if isinstance(body, ast.Constant):
return body.value
if isinstance(body, ast.JoinedStr): # f-string
if all(isinstance(node, ast.Constant) for node in body.values):
return ''.join(node.value for node in body.values)
return None
def extract_python(
fileobj: IO[bytes],
keywords: Mapping[str, _Keyword],
comment_tags: Collection[str],
options: _PyOptions,
) -> Generator[_ExtractionResult, None, None]:
"""
Extract all translatable terms from a Python source file.
This function is modified from the official Babel extractor to support arbitrarily nested function calls.
:param fileobj: The Python source file
:param keywords: The translation keywords (function names) mapping
:param comment_tags: The keywords to extract translator comments
:param options: Extractor options for parsing the Python file
:yield: Tuples in the following form: `(lineno, funcname, message, comments)`
"""
encoding = parse_encoding(fileobj) or options.get('encoding', 'utf-8')
future_flags = parse_future_flags(fileobj, encoding)
def next_line():
return fileobj.readline().decode(encoding)
tokens = generate_tokens(next_line)
# Keep the stack of all function calls and its related contextual variables, so we can handle nested gettext calls.
function_stack = []
# Keep the last encountered function/variable name for when we encounter an opening parenthesis.
last_name = None
# Keep track of whether we're in a class or function definition.
in_def = False
# Keep track of whether we're in a block of translator comments.
in_translator_comments = False
# Keep track of the last encountered translator comments.
translator_comments = []
# Keep track of the (split) strings encountered.
message_buffer = []
# Current prefix of a Python 3.12 (PEP 701) f-string, or None if we're not currently parsing one.
current_fstring_start = None
for token, value, (lineno, _), _, _ in tokens:
if token == NAME and value in ('def', 'class'):
# We're entering a class or function definition.
in_def = True
continue
if in_def and token == OP and value in ('(', ':'):
# We're in a class or function definition and should not do anything.
in_def = False
continue
if token == OP and value == '(' and last_name:
# We're entering a function call.
cur_translator_comments = translator_comments
if function_stack and function_stack[-1]['function_lineno'] == lineno:
# If our current function call is on the same line as the previous one,
# copy their translator comments, since they also apply to us.
cur_translator_comments = function_stack[-1]['translator_comments']
# We add all information needed later for the current function call.
function_stack.append({
'function_lineno': lineno,
'function_name': last_name,
'message_lineno': None,
'messages': [],
'translator_comments': cur_translator_comments,
})
translator_comments = []
message_buffer.clear()
elif token == COMMENT:
# Strip the comment character from the line.
value = value[1:].strip()
if in_translator_comments and translator_comments[-1][0] == lineno - 1:
# We're already in a translator comment. Continue appending.
translator_comments.append((lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
# The comment starts with one of the translator comment keywords, so let's start capturing it.
in_translator_comments = True
translator_comments.append((lineno, value))
break
elif function_stack and function_stack[-1]['function_name'] in keywords:
# We're inside a translation function call.
if token == OP and value == ')':
# The call has ended, so we yield the translatable term(s).
messages = function_stack[-1]['messages']
lineno = function_stack[-1]['message_lineno'] or function_stack[-1]['function_lineno']
cur_translator_comments = function_stack[-1]['translator_comments']
if message_buffer:
messages.append(''.join(message_buffer))
message_buffer.clear()
else:
messages.append(None)
messages = tuple(messages) if len(messages) > 1 else messages[0]
if cur_translator_comments and cur_translator_comments[-1][0] < lineno - 1:
# The translator comments are not immediately preceding the current term, so we skip them.
cur_translator_comments = []
yield (
lineno,
function_stack[-1]['function_name'],
messages,
[comment[1] for comment in cur_translator_comments],
)
function_stack.pop()
elif token == STRING:
# We've encountered a string inside a translation function call.
string_value = _parse_python_string(value, encoding, future_flags)
if not function_stack[-1]['message_lineno']:
function_stack[-1]['message_lineno'] = lineno
if string_value is not None:
message_buffer.append(string_value)
# Python 3.12+, see https://peps.python.org/pep-0701/#new-tokens
elif token == FSTRING_START:
current_fstring_start = value
elif token == FSTRING_MIDDLE:
if current_fstring_start is not None:
current_fstring_start += value
elif token == FSTRING_END:
if current_fstring_start is not None:
fstring = current_fstring_start + value
string_value = _parse_python_string(fstring, encoding, future_flags)
if string_value is not None:
message_buffer.append(string_value)
elif token == OP and value == ',':
# End of a function call argument
if message_buffer:
function_stack[-1]['messages'].append(''.join(message_buffer))
message_buffer.clear()
else:
function_stack[-1]['messages'].append(None)
elif function_stack and token == OP and value == ')':
# This is the end of an non-translation function call. Just pop it from the stack.
function_stack.pop()
if in_translator_comments and translator_comments[-1][0] < lineno:
# We have a newline between the comment lines, so they don't belong together anymore.
in_translator_comments = False
if token == NAME:
last_name = value
if function_stack and not function_stack[-1]['message_lineno']:
function_stack[-1]['message_lineno'] = lineno
if current_fstring_start is not None and token not in {FSTRING_START, FSTRING_MIDDLE}:
# In Python 3.12, tokens other than FSTRING_* mean the f-string is dynamic,
# so we don't wan't to extract it.
# And if it's FSTRING_END, we've already handled it above.
# Let's forget that we're in an f-string.
current_fstring_start = None

View file

@ -1,21 +1,59 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import functools
import re
from threading import RLock
__all__ = ['check_barcode_encoding', 'createBarcodeDrawing', 'get_barcode_font']
_barcode_init_lock = RLock()
__all__ = ['check_barcode_encoding']
# A lock occurs when the user wants to print a report having multiple barcode while the server is
# started in threaded-mode. The reason is that reportlab has to build a cache of the T1 fonts
# before rendering a barcode (done in a C extension) and this part is not thread safe.
# This cached functions allows to lazily initialize the T1 fonts cache need for rendering of
# barcodes in a thread-safe way.
@functools.lru_cache(1)
def _init_barcode():
with _barcode_init_lock:
try:
from reportlab.graphics import barcode # noqa: PLC0415
from reportlab.pdfbase.pdfmetrics import TypeFace, getFont # noqa: PLC0415
font_name = 'Courier'
available = TypeFace(font_name).findT1File()
if not available:
substitution_font = 'NimbusMonoPS-Regular'
fnt = getFont(substitution_font)
if fnt:
font_name = substitution_font
fnt.ascent = 629
fnt.descent = -157
barcode.createBarcodeDrawing('Code128', value='foo', format='png', width=100, height=100, humanReadable=1, fontName=font_name).asString('png')
except ImportError:
raise
except Exception: # noqa: BLE001
font_name = 'Courier'
return barcode, font_name
def get_barcode_check_digit(numeric_barcode):
def createBarcodeDrawing(codeName: str, **options):
barcode, _font = _init_barcode()
return barcode.createBarcodeDrawing(codeName, **options)
def get_barcode_font():
"""Get the barcode font for rendering."""
_barcode, font = _init_barcode()
return font
def get_barcode_check_digit(numeric_barcode: str) -> int:
""" Computes and returns the barcode check digit. The used algorithm
follows the GTIN specifications and can be used by all compatible
barcode nomenclature, like as EAN-8, EAN-12 (UPC-A) or EAN-13.
https://www.gs1.org/sites/default/files/docs/barcodes/GS1_General_Specifications.pdf
https://www.gs1.org/services/how-calculate-check-digit-manually
:param numeric_barcode: the barcode to verify/recompute the check digit
:type numeric_barcode: str
:return: the number corresponding to the right check digit
:rtype: int
"""
# Multiply value of each position by
# N1 N2 N3 N4 N5 N6 N7 N8 N9 N10 N11 N12 N13 N14 N15 N16 N17 N18
@ -34,10 +72,9 @@ def get_barcode_check_digit(numeric_barcode):
return (10 - total % 10) % 10
def check_barcode_encoding(barcode, encoding):
def check_barcode_encoding(barcode: str, encoding: str) -> bool:
""" Checks if the given barcode is correctly encoded.
:return: True if the barcode string is encoded with the provided encoding.
:rtype: bool
"""
encoding = encoding.lower()
if encoding == "any":

View file

@ -1,39 +1,64 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# decorator makes wrappers that have the same API as their wrapped function
from collections import Counter, defaultdict
from decorator import decorator
from __future__ import annotations
from collections import defaultdict
from collections.abc import Mapping, Collection
from inspect import signature, Parameter
import functools
import logging
import signal
import sys
import threading
import time
import typing
import warnings
if typing.TYPE_CHECKING:
from .lru import LRU
from collections.abc import Callable, Iterable
from odoo.models import BaseModel
C = typing.TypeVar('C', bound=Callable)
unsafe_eval = eval
_logger = logging.getLogger(__name__)
_logger_lock = threading.RLock()
_logger_state: typing.Literal['wait', 'abort', 'run'] = 'wait'
class ormcache_counter(object):
class ormcache_counter:
""" Statistic counters for cache entries. """
__slots__ = ['hit', 'miss', 'err', 'gen_time', 'cache_name']
__slots__ = ['cache_name', 'err', 'gen_time', 'hit', 'miss', 'tx_err', 'tx_hit', 'tx_miss']
def __init__(self):
self.hit = 0
self.miss = 0
self.err = 0
self.gen_time = 0
self.cache_name = None
self.hit: int = 0
self.miss: int = 0
self.err: int = 0
self.gen_time: float = 0.0
self.cache_name: str = ''
self.tx_hit: int = 0
self.tx_miss: int = 0
self.tx_err: int = 0
@property
def ratio(self):
def ratio(self) -> float:
return 100.0 * self.hit / (self.hit + self.miss or 1)
# statistic counters dictionary, maps (dbname, modelname, method) to counter
STAT = defaultdict(ormcache_counter)
@property
def tx_ratio(self) -> float:
return 100.0 * self.tx_hit / (self.tx_hit + self.tx_miss or 1)
@property
def tx_calls(self) -> int:
return self.tx_hit + self.tx_miss
class ormcache(object):
_COUNTERS: defaultdict[tuple[str, Callable], ormcache_counter] = defaultdict(ormcache_counter)
"""statistic counters dictionary, maps (dbname, method) to counter"""
class ormcache:
""" LRU cache decorator for model methods.
The parameters are strings that represent expressions referring to the
signature of the decorated method, and are used to compute a cache key::
@ -53,72 +78,87 @@ class ormcache(object):
because the underlying cursor will eventually be closed and raise a
`psycopg2.InterfaceError`.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.skiparg = kwargs.get('skiparg')
self.cache_name = kwargs.get('cache', 'default')
key: Callable[..., tuple]
def __call__(self, method):
def __init__(self, *args: str, cache: str = 'default', skiparg: int | None = None, **kwargs):
self.args = args
self.skiparg = skiparg
self.cache_name = cache
if skiparg is not None:
warnings.warn("Deprecated since 19.0, ormcache(skiparg) will be removed", DeprecationWarning)
def __call__(self, method: C) -> C:
assert not hasattr(self, 'method'), "ormcache is already bound to a method"
self.method = method
self.determine_key()
lookup = decorator(self.lookup, method)
lookup.__cache__ = self
assert self.key is not None, "ormcache.key not initialized"
@functools.wraps(method)
def lookup(*args, **kwargs):
return self.lookup(*args, **kwargs)
lookup.__cache__ = self # type: ignore
return lookup
def add_value(self, *args, cache_value=None, **kwargs):
model = args[0]
d, key0, counter = self.lru(model)
counter.cache_name = self.cache_name
key = key0 + self.key(*args, **kwargs)
def add_value(self, *args, cache_value=None, **kwargs) -> None:
model: BaseModel = args[0]
d: LRU = model.pool._Registry__caches[self.cache_name] # type: ignore
key = self.key(*args, **kwargs)
d[key] = cache_value
def determine_key(self):
def determine_key(self) -> None:
""" Determine the function that computes a cache key from arguments. """
if self.skiparg is None:
# build a string that represents function code and evaluate it
args = ', '.join(
# remove annotations because lambdas can't be type-annotated,
# and defaults because they are redundant (defaults are present
# in the wrapper function itself)
str(params.replace(annotation=Parameter.empty, default=Parameter.empty))
for params in signature(self.method).parameters.values()
)
if self.args:
code = "lambda %s: (%s,)" % (args, ", ".join(self.args))
else:
code = "lambda %s: ()" % (args,)
self.key = unsafe_eval(code)
else:
assert self.method is not None
if self.skiparg is not None:
# backward-compatible function that uses self.skiparg
self.key = lambda *args, **kwargs: args[self.skiparg:]
self.key = lambda *args, **kwargs: (args[0]._name, self.method, *args[self.skiparg:])
return
# build a string that represents function code and evaluate it
args = ', '.join(
# remove annotations because lambdas can't be type-annotated,
str(params.replace(annotation=Parameter.empty))
for params in signature(self.method).parameters.values()
)
values = ['self._name', 'method', *self.args]
code = f"lambda {args}: ({''.join(a for arg in values for a in (arg, ','))})"
self.key = unsafe_eval(code, {'method': self.method})
def lru(self, model):
counter = STAT[(model.pool.db_name, model._name, self.method)]
return model.pool._Registry__caches[self.cache_name], (model._name, self.method), counter
def lookup(self, *args, **kwargs):
model: BaseModel = args[0]
d: LRU = model.pool._Registry__caches[self.cache_name] # type: ignore
key = self.key(*args, **kwargs)
counter = _COUNTERS[model.pool.db_name, self.method]
tx_lookups = model.env.cr.cache.setdefault('_ormcache_lookups', set())
# tx: is it the first call in the transation for that key
tx_first_lookup = key not in tx_lookups
if tx_first_lookup:
counter.cache_name = self.cache_name
tx_lookups.add(key)
def lookup(self, method, *args, **kwargs):
d, key0, counter = self.lru(args[0])
key = key0 + self.key(*args, **kwargs)
try:
r = d[key]
counter.hit += 1
counter.tx_hit += tx_first_lookup
return r
except KeyError:
counter.miss += 1
counter.cache_name = self.cache_name
start = time.time()
value = d[key] = self.method(*args, **kwargs)
counter.gen_time += time.time() - start
return value
counter.tx_miss += tx_first_lookup
miss = True
except TypeError:
_logger.warning("cache lookup error on %r", key, exc_info=True)
counter.err += 1
counter.tx_err += tx_first_lookup
miss = False
if miss:
start = time.monotonic()
value = self.method(*args, **kwargs)
counter.gen_time += time.monotonic() - start
d[key] = value
return value
else:
return self.method(*args, **kwargs)
def clear(self, model, *args):
""" Clear the registry cache """
warnings.warn('Deprecated method ormcache.clear(model, *args), use registry.clear_cache() instead')
model.pool.clear_all_caches()
class ormcache_context(ormcache):
""" This LRU cache decorator is a variant of :class:`ormcache`, with an
@ -126,60 +166,219 @@ class ormcache_context(ormcache):
keys are looked up in the ``context`` parameter and combined to the cache
key made by :class:`ormcache`.
"""
def __init__(self, *args, **kwargs):
super(ormcache_context, self).__init__(*args, **kwargs)
self.keys = kwargs['keys']
def __init__(self, *args: str, keys, skiparg=None, **kwargs):
assert skiparg is None, "ormcache_context() no longer supports skiparg"
warnings.warn("Since 19.0, use ormcache directly, context values are available as `self.env.context.get`", DeprecationWarning)
super().__init__(*args, **kwargs)
def determine_key(self):
""" Determine the function that computes a cache key from arguments. """
assert self.skiparg is None, "ormcache_context() no longer supports skiparg"
# build a string that represents function code and evaluate it
def determine_key(self) -> None:
assert self.method is not None
sign = signature(self.method)
args = ', '.join(
str(params.replace(annotation=Parameter.empty, default=Parameter.empty))
for params in sign.parameters.values()
)
cont_expr = "(context or {})" if 'context' in sign.parameters else "self._context"
cont_expr = "(context or {})" if 'context' in sign.parameters else "self.env.context"
keys_expr = "tuple(%s.get(k) for k in %r)" % (cont_expr, self.keys)
if self.args:
code = "lambda %s: (%s, %s)" % (args, ", ".join(self.args), keys_expr)
else:
code = "lambda %s: (%s,)" % (args, keys_expr)
self.key = unsafe_eval(code)
self.args += (keys_expr,)
super().determine_key()
def log_ormcache_stats(sig=None, frame=None): # noqa: ARG001 (arguments are there for signals)
""" Log statistics of ormcache usage by database, model, and method. """
from odoo.modules.registry import Registry
cache_entries = {}
current_db = None
cache_stats = ['Caches stats:']
for (dbname, model, method), stat in sorted(STAT.items(), key=lambda k: (k[0][0] or '~', k[0][1], k[0][2].__name__)):
dbname_display = dbname or "<no_db>"
if current_db != dbname_display:
current_db = dbname_display
cache_stats.append(f"Database {dbname_display}")
if dbname: # mainly for MockPool
if (dbname, stat.cache_name) not in cache_entries:
cache = Registry.registries.d[dbname]._Registry__caches[stat.cache_name]
cache_entries[dbname, stat.cache_name] = Counter(k[:2] for k in cache.d)
nb_entries = cache_entries[dbname, stat.cache_name][model, method]
else:
nb_entries = 0
cache_name = stat.cache_name.rjust(25)
cache_stats.append(
f"{cache_name}, {nb_entries:6d} entries, {stat.hit:6d} hit, {stat.miss:6d} miss, {stat.err:6d} err, {stat.gen_time:10.3f}s time, {stat.ratio:6.1f}% ratio for {model}.{method.__name__}"
)
_logger.info('\n'.join(cache_stats))
def log_ormcache_stats(sig=None, frame=None): # noqa: ARG001 (arguments are there for signals)
# collect and log data in a separate thread to avoid blocking the main thread
# and avoid using logging module directly in the signal handler
# https://docs.python.org/3/library/logging.html#thread-safety
global _logger_state # noqa: PLW0603
with _logger_lock:
if _logger_state != 'wait':
# send the signal again to stop the logging thread
_logger_state = 'abort'
return
_logger_state = 'run'
def check_continue_logging():
if _logger_state == 'run':
return True
_logger.info('Stopping logging ORM cache stats')
return False
class StatsLine:
def __init__(self, method, counter: ormcache_counter):
self.sz_entries_sum: int = 0
self.sz_entries_max: int = 0
self.nb_entries: int = 0
self.counter = counter
self.method = method
def _log_ormcache_stats():
""" Log statistics of ormcache usage by database, model, and method. """
from odoo.modules.registry import Registry # noqa: PLC0415
try:
# {dbname: {method: StatsLine}}
cache_stats: defaultdict[str, dict[Callable, StatsLine]] = defaultdict(dict)
# {dbname: (cache_name, entries, count, total_size)}
cache_usage: defaultdict[str, list[tuple[str, int, int, int]]] = defaultdict(list)
# browse the values in cache
registries = Registry.registries.snapshot
class_slots = {}
for i, (dbname, registry) in enumerate(registries.items(), start=1):
if not check_continue_logging():
return
_logger.info("Processing database %s (%d/%d)", dbname, i, len(registries))
db_cache_stats = cache_stats[dbname]
db_cache_usage = cache_usage[dbname]
for cache_name, cache in registry._Registry__caches.items():
cache_total_size = 0
for cache_key, cache_value in cache.snapshot.items():
method = cache_key[1]
stats = db_cache_stats.get(method)
if stats is None:
stats = db_cache_stats[method] = StatsLine(method, _COUNTERS[dbname, method])
stats.nb_entries += 1
if not show_size:
continue
size = get_cache_size((cache_key, cache_value), cache_info=method.__qualname__, class_slots=class_slots)
cache_total_size += size
stats.sz_entries_sum += size
stats.sz_entries_max = max(stats.sz_entries_max, size)
db_cache_usage.append((cache_name, len(cache), cache.count, cache_total_size))
# add counters that have no values in cache
for (dbname, method), counter in _COUNTERS.copy().items(): # copy to avoid concurrent modification
if not check_continue_logging():
return
db_cache_stats = cache_stats[dbname]
stats = db_cache_stats.get(method)
if stats is None:
db_cache_stats[method] = StatsLine(method, counter)
# Output the stats
log_msgs = ['Caches stats:']
size_column_info = (
f"{'Memory %':>10},"
f"{'Memory SUM':>12},"
f"{'Memory MAX':>12},"
) if show_size else ''
column_info = (
f"{'Cache Name':>25},"
f"{'Entry':>7},"
f"{size_column_info}"
f"{'Hit':>6},"
f"{'Miss':>6},"
f"{'Err':>6},"
f"{'Gen Time [s]':>13},"
f"{'Hit Ratio':>10},"
f"{'TX Hit Ratio':>13},"
f"{'TX Call':>8},"
" Method"
)
for dbname, db_cache_stats in sorted(cache_stats.items(), key=lambda k: k[0] or '~'):
if not check_continue_logging():
return
log_msgs.append(f'Database {dbname or "<no_db>"}:')
log_msgs.extend(
f" * {cache_name}: {entries}/{count}{' (' if cache_total_size else ''}{cache_total_size}{' bytes)' if cache_total_size else ''}"
for cache_name, entries, count, cache_total_size in db_cache_usage
)
log_msgs.append('Details:')
# sort by -sz_entries_sum and method_name
db_cache_stat = sorted(db_cache_stats.items(), key=lambda k: (-k[1].sz_entries_sum, k[0].__name__))
sz_entries_all = sum(stat.sz_entries_sum for _, stat in db_cache_stat)
log_msgs.append(column_info)
for method, stat in db_cache_stat:
size_data = (
f'{stat.sz_entries_sum / (sz_entries_all or 1) * 100:9.1f}%,'
f'{stat.sz_entries_sum:12d},'
f'{stat.sz_entries_max:12d},'
) if show_size else ''
log_msgs.append(
f'{stat.counter.cache_name:>25},'
f'{stat.nb_entries:7d},'
f'{size_data}'
f'{stat.counter.hit:6d},'
f'{stat.counter.miss:6d},'
f'{stat.counter.err:6d},'
f'{stat.counter.gen_time:13.3f},'
f'{stat.counter.ratio:9.1f}%,'
f'{stat.counter.tx_ratio:12.1f}%,'
f'{stat.counter.tx_calls:8d},'
f' {method.__qualname__}'
)
_logger.info('\n'.join(log_msgs))
except Exception: # noqa: BLE001
_logger.exception()
finally:
global _logger_state # noqa: PLW0603
with _logger_lock:
_logger_state = 'wait'
show_size = False
if sig == signal.SIGUSR1:
threading.Thread(target=_log_ormcache_stats,
name="odoo.signal.log_ormcache_stats").start()
elif sig == signal.SIGUSR2:
show_size = True
threading.Thread(target=_log_ormcache_stats,
name="odoo.signal.log_ormcache_stats_with_size").start()
def get_cache_key_counter(bound_method, *args, **kwargs):
def get_cache_key_counter(bound_method: Callable, *args, **kwargs) -> tuple[LRU, tuple, ormcache_counter]:
""" Return the cache, key and stat counter for the given call. """
model = bound_method.__self__
ormcache = bound_method.__cache__
cache, key0, counter = ormcache.lru(model)
key = key0 + ormcache.key(model, *args, **kwargs)
model: BaseModel = bound_method.__self__ # type: ignore
ormcache_instance: ormcache = bound_method.__cache__ # type: ignore
cache: LRU = model.pool._Registry__caches[ormcache_instance.cache_name] # type: ignore
key = ormcache_instance.key(model, *args, **kwargs)
counter = _COUNTERS[model.pool.db_name, ormcache_instance.method]
return cache, key, counter
# For backward compatibility
cache = ormcache
def get_cache_size(
obj,
*,
cache_info: str = '',
seen_ids: set[int] | None = None,
class_slots: dict[type, Iterable[str]] | None = None
) -> int:
""" A non-thread-safe recursive object size estimator """
from odoo.models import BaseModel # noqa: PLC0415
from odoo.api import Environment # noqa: PLC0415
if seen_ids is None:
# count internal constants as 0 bytes
seen_ids = set(map(id, (None, False, True)))
if class_slots is None:
class_slots = {} # {class_id: combined_slots}
total_size = 0
objects = [obj]
while objects:
cur_obj = objects.pop()
if id(cur_obj) in seen_ids:
continue
if cache_info and isinstance(cur_obj, (BaseModel, Environment)):
_logger.error('%s is cached by %s', cur_obj, cache_info)
continue
seen_ids.add(id(cur_obj))
total_size += sys.getsizeof(cur_obj)
if hasattr(cur_obj, '__slots__'):
cur_obj_cls = type(cur_obj)
attributes = class_slots.get(id(cur_obj_cls))
if attributes is None:
class_slots[id(cur_obj_cls)] = attributes = tuple({
f'_{cls.__name__}{attr}' if attr.startswith('__') else attr
for cls in cur_obj_cls.mro()
for attr in getattr(cls, '__slots__', ())
})
objects.extend(getattr(cur_obj, attr, None) for attr in attributes)
if hasattr(cur_obj, '__dict__'):
objects.append(object.__dict__)
if isinstance(cur_obj, Mapping):
objects.extend(cur_obj.values())
objects.extend(cur_obj.keys())
elif isinstance(cur_obj, Collection) and not isinstance(cur_obj, (str, bytes, bytearray)):
objects.extend(cur_obj)
return total_size

View file

@ -6,8 +6,9 @@ import os
import re
import shutil
import odoo
from odoo.tools.config import config
import odoo.modules
from odoo import api
from .config import config
VERSION = 1
DEFAULT_EXCLUDE = [
@ -137,7 +138,7 @@ class Cloc(object):
module_name = os.path.basename(path)
self.book(module_name)
for root, dirs, files in os.walk(path):
for root, _dirs, files in os.walk(path):
for file_name in files:
file_path = os.path.join(root, file_name)
@ -160,8 +161,10 @@ class Cloc(object):
def count_modules(self, env):
# Exclude standard addons paths
exclude_heuristic = [odoo.modules.get_module_path(m, display_warning=False) for m in STANDARD_MODULES]
exclude_path = set([os.path.dirname(os.path.realpath(m)) for m in exclude_heuristic if m])
exclude_path = {
m.addons_path for name in STANDARD_MODULES
if (m := odoo.modules.Manifest.for_addon(name, display_warning=False))
}
domain = [('state', '=', 'installed')]
# if base_import_module is present
@ -170,11 +173,9 @@ class Cloc(object):
module_list = env['ir.module.module'].search(domain).mapped('name')
for module_name in module_list:
module_path = os.path.realpath(odoo.modules.get_module_path(module_name))
if module_path:
if any(module_path.startswith(i) for i in exclude_path):
continue
self.count_path(module_path)
manifest = odoo.modules.Manifest.for_addon(module_name)
if manifest and manifest.addons_path not in exclude_path:
self.count_path(manifest.path)
def count_customization(self, env):
imported_module_sa = ""
@ -287,10 +288,10 @@ class Cloc(object):
self.count_customization(env)
def count_database(self, database):
registry = odoo.modules.registry.Registry(config['db_name'])
registry = odoo.modules.registry.Registry(database)
with registry.cursor() as cr:
uid = odoo.SUPERUSER_ID
env = odoo.api.Environment(cr, uid, {})
uid = api.SUPERUSER_ID
env = api.Environment(cr, uid, {})
self.count_env(env)
#------------------------------------------------------

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
SCRIPT_EXTENSIONS = ('js',)
@ -8,3 +7,9 @@ ASSET_EXTENSIONS = SCRIPT_EXTENSIONS + STYLE_EXTENSIONS + TEMPLATE_EXTENSIONS
SUPPORTED_DEBUGGER = {'pdb', 'ipdb', 'wdb', 'pudb'}
EXTERNAL_ASSET = object()
PREFETCH_MAX = 1000
"""Maximum number of prefetched records"""
GC_UNLINK_LIMIT = 100_000
"""Maximuum number of records to clean in a single transaction."""

View file

@ -13,7 +13,9 @@ import os.path
import pprint
import re
import subprocess
import warnings
from datetime import datetime, timedelta
from typing import Literal, Optional
from dateutil.relativedelta import relativedelta
from lxml import etree, builder
@ -22,37 +24,35 @@ try:
except ImportError:
jingtrang = None
import odoo
from .config import config
from .misc import file_open, file_path, SKIPPED_ELEMENT_TYPES
from odoo.exceptions import ValidationError
from .safe_eval import safe_eval as s_eval, pytz, time
from .safe_eval import safe_eval, pytz, time
_logger = logging.getLogger(__name__)
def safe_eval(expr, ctx={}):
return s_eval(expr, ctx, nocopy=True)
ConvertMode = Literal['init', 'update']
IdRef = dict[str, int | Literal[False]]
class ParseError(Exception):
...
def _get_idref(self, env, model_str, idref):
idref2 = dict(idref,
Command=odoo.fields.Command,
def _get_eval_context(self, env, model_str):
from odoo import fields, release # noqa: PLC0415
context = dict(Command=fields.Command,
time=time,
DateTime=datetime,
datetime=datetime,
timedelta=timedelta,
relativedelta=relativedelta,
version=odoo.release.major_version,
version=release.major_version,
ref=self.id_get,
pytz=pytz)
if model_str:
idref2['obj'] = env[model_str].browse
return idref2
context['obj'] = env[model_str].browse
return context
def _fix_multiple_roots(node):
"""
@ -79,10 +79,8 @@ def _eval_xml(self, node, env):
if f_search := node.get('search'):
f_use = node.get("use",'id')
f_name = node.get("name")
idref2 = {}
if f_search:
idref2 = _get_idref(self, env, f_model, self.idref)
q = safe_eval(f_search, idref2)
context = _get_eval_context(self, env, f_model)
q = safe_eval(f_search, context)
ids = env[f_model].search(q).ids
if f_use != 'id':
ids = [x[f_use] for x in env[f_model].browse(ids).read([f_use])]
@ -96,9 +94,9 @@ def _eval_xml(self, node, env):
f_val = f_val[0]
return f_val
if a_eval := node.get('eval'):
idref2 = _get_idref(self, env, f_model, self.idref)
context = _get_eval_context(self, env, f_model)
try:
return safe_eval(a_eval, idref2)
return safe_eval(a_eval, context)
except Exception:
logging.getLogger('odoo.tools.convert.init').error(
'Could not eval(%s) for %s in %s', a_eval, node.get('name'), env.context)
@ -111,13 +109,14 @@ def _eval_xml(self, node, env):
if found in done:
continue
done.add(found)
id = m.groups()[0]
if not id in self.idref:
self.idref[id] = self.id_get(id)
rec_id = m[1]
xid = self.make_xml_id(rec_id)
if (record_id := self.idref.get(xid)) is None:
record_id = self.idref[xid] = self.id_get(xid)
# So funny story: in Python 3, bytes(n: int) returns a
# bytestring of n nuls. In Python 2 it obviously returns the
# stringified number, which is what we're expecting here
s = s.replace(found, str(self.idref[id]))
s = s.replace(found, str(record_id))
s = s.replace('%%', '%') # Quite weird but it's for (somewhat) backward compatibility sake
return s
@ -167,6 +166,7 @@ def _eval_xml(self, node, env):
raise ValueError(f"Unknown type {t!r}")
elif node.tag == "function":
from odoo.models import BaseModel # noqa: PLC0415
model_str = node.get('model')
model = env[model_str]
method_name = node.get('name')
@ -175,17 +175,29 @@ def _eval_xml(self, node, env):
kwargs = {}
if a_eval := node.get('eval'):
idref2 = _get_idref(self, env, model_str, self.idref)
args = list(safe_eval(a_eval, idref2))
context = _get_eval_context(self, env, model_str)
args = list(safe_eval(a_eval, context))
for child in node:
if child.tag == 'value' and child.get('name'):
kwargs[child.get('name')] = _eval_xml(self, child, env)
else:
args.append(_eval_xml(self, child, env))
# merge current context with context in kwargs
kwargs['context'] = {**env.context, **kwargs.get('context', {})}
if 'context' in kwargs:
model = model.with_context(**kwargs.pop('context'))
method = getattr(model, method_name)
is_model_method = getattr(method, '_api_model', False)
if is_model_method:
pass # already bound to an empty recordset
else:
record_ids, *args = args
model = model.browse(record_ids)
method = getattr(model, method_name)
# invoke method
return odoo.api.call_kw(model, method_name, args, kwargs)
result = method(*args, **kwargs)
if isinstance(result, BaseModel):
result = result.ids
return result
elif node.tag == "test":
return node.text
@ -238,9 +250,9 @@ form: module.record_id""" % (xml_id,)
records = self.env[d_model]
if d_search := rec.get("search"):
idref = _get_idref(self, self.env, d_model, {})
context = _get_eval_context(self, self.env, d_model)
try:
records = records.search(safe_eval(d_search, idref))
records = records.search(safe_eval(d_search, context))
except ValueError:
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
@ -299,17 +311,17 @@ form: module.record_id""" % (xml_id,)
if not values.get('name'):
values['name'] = rec_id or '?'
from odoo.fields import Command # noqa: PLC0415
groups = []
for group in rec.get('groups', '').split(','):
if group.startswith('-'):
group_id = self.id_get(group[1:])
groups.append(odoo.Command.unlink(group_id))
groups.append(Command.unlink(group_id))
elif group:
group_id = self.id_get(group)
groups.append(odoo.Command.link(group_id))
groups.append(Command.link(group_id))
if groups:
values['groups_id'] = groups
values['group_ids'] = groups
data = {
@ -330,6 +342,7 @@ form: module.record_id""" % (xml_id,)
if self.xml_filename and rec_id:
model = model.with_context(
install_mode=True,
install_module=self.module,
install_filename=self.xml_filename,
install_xmlid=rec_id,
@ -356,7 +369,7 @@ form: module.record_id""" % (xml_id,)
# if the resource already exists, don't update it but store
# its database id (can be useful)
self.idref[rec_id] = record.id
self.idref[xid] = record.id
return None
elif not nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
@ -373,11 +386,14 @@ form: module.record_id""" % (xml_id,)
return None
raise Exception("Cannot update missing record %r" % xid)
from odoo.fields import Command # noqa: PLC0415
res = {}
sub_records = []
for field in rec.iterchildren('field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name")
if '@' in f_name:
continue # used for translations
f_model = field.get("model")
if not f_model and f_name in model._fields:
f_model = model._fields[f_name].comodel_name
@ -385,8 +401,8 @@ form: module.record_id""" % (xml_id,)
f_val = False
if f_search := field.get("search"):
idref2 = _get_idref(self, env, f_model, self.idref)
q = safe_eval(f_search, idref2)
context = _get_eval_context(self, env, f_model)
q = safe_eval(f_search, context)
assert f_model, 'Define an attribute model="..." in your .XML file!'
# browse the objects searched
s = env[f_model].search(q)
@ -394,7 +410,7 @@ form: module.record_id""" % (xml_id,)
_fields = env[rec_model]._fields
# if the current field is many2many
if (f_name in _fields) and _fields[f_name].type == 'many2many':
f_val = [odoo.Command.set([x[f_use] for x in s])]
f_val = [Command.set([x[f_use] for x in s])]
elif len(s):
# otherwise (we are probably in a many2one field),
# take the first element of the search
@ -442,8 +458,8 @@ form: module.record_id""" % (xml_id,)
if foreign_record_to_create:
model = model.with_context(foreign_record_to_create=foreign_record_to_create)
record = model._load_records([data], self.mode == 'update')
if rec_id:
self.idref[rec_id] = record.id
if xid:
self.idref[xid] = record.id
if config.get('import_partial'):
env.cr.commit()
for child_rec, inverse_name in sub_records:
@ -503,7 +519,7 @@ form: module.record_id""" % (xml_id,)
groups = el.attrib.pop('groups', None)
if groups:
grp_lst = [("ref('%s')" % x) for x in groups.split(',')]
record.append(Field(name="groups_id", eval="[Command.set(["+', '.join(grp_lst)+"])]"))
record.append(Field(name="group_ids", eval="[Command.set(["+', '.join(grp_lst)+"])]"))
if el.get('primary') == 'True':
# Pseudo clone mode, we'll set the t-name to the full canonical xmlid
el.append(
@ -521,14 +537,13 @@ form: module.record_id""" % (xml_id,)
return self._tag_record(record)
def id_get(self, id_str, raise_if_not_found=True):
id_str = self.make_xml_id(id_str)
if id_str in self.idref:
return self.idref[id_str]
res = self.model_id_get(id_str, raise_if_not_found)
return res and res[1]
return self.model_id_get(id_str, raise_if_not_found)[1]
def model_id_get(self, id_str, raise_if_not_found=True):
if '.' not in id_str:
id_str = '%s.%s' % (self.module, id_str)
id_str = self.make_xml_id(id_str)
return self.env['ir.model.data']._xmlid_to_res_model_res_id(id_str, raise_if_not_found=raise_if_not_found)
def _tag_root(self, el):
@ -578,11 +593,11 @@ form: module.record_id""" % (xml_id,)
value = self._sequences[-1] = value + 10
return value
def __init__(self, env, module, idref, mode, noupdate=False, xml_filename=None):
def __init__(self, env, module, idref: Optional[IdRef], mode: ConvertMode, noupdate: bool = False, xml_filename: str = ''):
self.mode = mode
self.module = module
self.envs = [env(context=dict(env.context, lang=None))]
self.idref = {} if idref is None else idref
self.idref: IdRef = {} if idref is None else idref
self._noupdate = [noupdate]
self._sequences = []
self.xml_filename = xml_filename
@ -601,12 +616,28 @@ form: module.record_id""" % (xml_id,)
self._tag_root(de)
DATA_ROOTS = ['odoo', 'data', 'openerp']
def convert_file(env, module, filename, idref, mode='update', noupdate=False, kind=None, pathname=None):
def convert_file(
env,
module,
filename,
idref: Optional[IdRef],
mode: ConvertMode = 'update',
noupdate=False,
kind=None,
pathname=None,
):
if kind is not None:
warnings.warn(
"The `kind` argument is deprecated in Odoo 19.",
DeprecationWarning,
stacklevel=2,
)
if pathname is None:
pathname = os.path.join(module, filename)
ext = os.path.splitext(filename)[1].lower()
with file_open(pathname, 'rb') as fp:
with file_open(pathname, 'rb', env=env) as fp:
if ext == '.csv':
convert_csv_import(env, module, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
@ -618,11 +649,20 @@ def convert_file(env, module, filename, idref, mode='update', noupdate=False, ki
else:
raise ValueError("Can't load unknown file type %s.", filename)
def convert_sql_import(env, fp):
env.cr.execute(fp.read()) # pylint: disable=sql-injection
def convert_csv_import(env, module, fname, csvcontent, idref=None, mode='init',
noupdate=False):
def convert_csv_import(
env,
module,
fname,
csvcontent,
idref: Optional[IdRef] = None,
mode: ConvertMode = 'init',
noupdate=False,
):
'''Import csv file :
quote: "
delimiter: ,
@ -637,15 +677,25 @@ def convert_csv_import(env, module, fname, csvcontent, idref=None, mode='init',
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
translate_indexes = {i for i, field in enumerate(fields) if '@' in field}
def remove_translations(row):
return [cell for i, cell in enumerate(row) if i not in translate_indexes]
fields = remove_translations(fields)
if not fields:
return
# clean the data from translations (treated during translation import), then
# filter out empty lines (any([]) == False) and lines containing only empty cells
datas = [
line for line in reader
if any(line)
data_line for line in reader
if any(data_line := remove_translations(line))
]
context = {
'mode': mode,
'module': module,
'install_mode': True,
'install_module': module,
'install_filename': fname,
'noupdate': noupdate,
@ -661,9 +711,18 @@ def convert_csv_import(env, module, fname, csvcontent, idref=None, mode='init',
message=warning_msg,
))
def convert_xml_import(env, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
def convert_xml_import(
env,
module,
xmlfile,
idref: Optional[IdRef] = None,
mode: ConvertMode = 'init',
noupdate=False,
report=None,
):
doc = etree.parse(xmlfile)
schema = os.path.join(config['root_path'], 'import_xml.rng')
schema = os.path.join(config.root_path, 'import_xml.rng')
relaxng = etree.RelaxNG(etree.parse(schema))
try:
relaxng.assert_(doc)

View file

@ -1,60 +1,228 @@
# -*- coding: utf-8 -*-
from __future__ import annotations
import calendar
import math
from datetime import date, datetime, time
from typing import Tuple, TypeVar, Literal, Iterator, Type
import re
import typing
from datetime import date, datetime, time, timedelta, tzinfo
import babel
import pytz
from dateutil.relativedelta import relativedelta, weekdays
from .func import lazy
from .float_utils import float_round
D = TypeVar('D', date, datetime)
if typing.TYPE_CHECKING:
import babel
from collections.abc import Callable, Iterable, Iterator
from odoo.orm.types import Environment
D = typing.TypeVar('D', date, datetime)
utc = pytz.utc
TRUNCATE_TODAY = relativedelta(microsecond=0, second=0, minute=0, hour=0)
TRUNCATE_UNIT = {
'day': TRUNCATE_TODAY,
'month': TRUNCATE_TODAY,
'year': TRUNCATE_TODAY,
'week': TRUNCATE_TODAY,
'hour': relativedelta(microsecond=0, second=0, minute=0),
'minute': relativedelta(microsecond=0, second=0),
'second': relativedelta(microsecond=0),
}
WEEKDAY_NUMBER = dict(zip(
('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'),
range(7),
strict=True,
))
_SHORT_DATE_UNIT = {
'd': 'days',
'm': 'months',
'y': 'years',
'w': 'weeks',
'H': 'hours',
'M': 'minutes',
'S': 'seconds',
}
__all__ = [
'date_range',
'float_to_time',
'get_fiscal_year',
'get_month',
'get_quarter',
'get_quarter_number',
'get_timedelta',
'localized',
'parse_date',
'parse_iso_date',
'sum_intervals',
'time_to_float',
'to_timezone',
]
def date_type(value: D) -> Type[D]:
''' Return either the datetime.datetime class or datetime.date type whether `value` is a datetime or a date.
:param value: A datetime.datetime or datetime.date object.
:return: datetime.datetime or datetime.date
'''
return datetime if isinstance(value, datetime) else date
def float_to_time(hours: float) -> time:
""" Convert a number of hours into a time object. """
if hours == 24.0:
return time.max
fractional, integral = math.modf(hours)
return time(int(integral), int(float_round(60 * fractional, precision_digits=0)), 0)
def get_month(date: D) -> Tuple[D, D]:
''' Compute the month dates range on which the 'date' parameter belongs to.
'''
def time_to_float(duration: time | timedelta) -> float:
""" Convert a time object to a number of hours. """
if isinstance(duration, timedelta):
return duration.total_seconds() / 3600
if duration == time.max:
return 24.0
seconds = duration.microsecond / 1_000_000 + duration.second + duration.minute * 60
return seconds / 3600 + duration.hour
def localized(dt: datetime) -> datetime:
""" When missing, add tzinfo to a datetime. """
return dt if dt.tzinfo else dt.replace(tzinfo=utc)
def to_timezone(tz: tzinfo | None) -> Callable[[datetime], datetime]:
""" Get a function converting a datetime to another localized datetime. """
if tz is None:
return lambda dt: dt.astimezone(utc).replace(tzinfo=None)
return lambda dt: dt.astimezone(tz)
def parse_iso_date(value: str) -> date | datetime:
""" Parse a ISO encoded string to a date or datetime.
:raises ValueError: when the format is invalid or has a timezone
"""
# Looks like ISO format
if len(value) <= 10:
return date.fromisoformat(value)
now = datetime.fromisoformat(value)
if now.tzinfo is not None:
raise ValueError(f"expecting only datetimes with no timezone: {value!r}")
return now
def parse_date(value: str, env: Environment) -> date | datetime:
r""" Parse a technical date string into a date or datetime.
This supports ISO formatted dates and dates relative to now.
`parse_iso_date` is used if the input starts with r'\d+-'.
Otherwise, the date is computed by starting from now at user's timezone.
We can also start 'today' (resulting in a date type). Then we apply offsets:
- we can add 'd', 'w', 'm', 'y', 'H', 'M', 'S':
days, weeks, months, years, hours, minutes, seconds
- "+3d" to add 3 days
- "-1m" to subtract one month
- we can set a part of the date which will reset to midnight or only lower
date parts
- "=1d" sets first day of month at midnight
- "=6m" sets June and resets to midnight
- "=3H" sets time to 3:00:00
- weekdays are handled similarly
- "=tuesday" sets to Tuesday of the current week at midnight
- "+monday" goes to next Monday (no change if we are on Monday)
- "=week_start" sets to the first day of the current week, according to the locale
The DSL for relative dates is as follows:
```
relative_date := ('today' | 'now')? offset*
offset := date_rel | time_rel | weekday
date_rel := (regex) [=+-]\d+[dwmy]
time_rel := (regex) [=+-]\d+[HMS]
weekday := [=+-] ('monday' | ... | 'sunday' | 'week_start')
```
An equivalent function is JavaScript is `parseSmartDateInput`.
:param value: The string to parse
:param env: The environment to get the current date (in user's tz)
:param naive: Whether to cast the result to a naive datetime.
"""
if re.match(r'\d+-', value):
return parse_iso_date(value)
terms = value.split()
if not terms:
raise ValueError("Empty date value")
# Find the starting point
from odoo.orm.fields_temporal import Date, Datetime # noqa: PLC0415
dt: datetime | date = Datetime.now()
term = terms.pop(0) if terms[0] in ('today', 'now') else 'now'
if term == 'today':
dt = Date.context_today(env['base'], dt)
else:
dt = Datetime.context_timestamp(env['base'], dt)
for term in terms:
operator = term[0]
if operator not in ('+', '-', '=') or len(term) < 3:
raise ValueError(f"Invalid term {term!r} in expression date: {value!r}")
# Weekday
dayname = term[1:]
if dayname in WEEKDAY_NUMBER or dayname == "week_start":
week_start = int(env["res.lang"]._get_data(code=env.user.lang).week_start) - 1
weekday = week_start if dayname == "week_start" else WEEKDAY_NUMBER[dayname]
weekday_offset = ((weekday - week_start) % 7) - ((dt.weekday() - week_start) % 7)
if operator in ('+', '-'):
if operator == '+' and weekday_offset < 0:
weekday_offset += 7
elif operator == '-' and weekday_offset > 0:
weekday_offset -= 7
elif isinstance(dt, datetime):
dt += TRUNCATE_TODAY
dt += timedelta(weekday_offset)
continue
# Operations on dates
try:
unit = _SHORT_DATE_UNIT[term[-1]]
if operator in ('+', '-'):
number = int(term[:-1]) # positive or negative
else:
number = int(term[1:-1])
unit = unit.removesuffix('s')
if isinstance(dt, datetime):
dt += TRUNCATE_UNIT[unit]
# note: '=Nw' is not supported
dt += relativedelta(**{unit: number})
except (ValueError, TypeError, KeyError):
raise ValueError(f"Invalid term {term!r} in expression date: {value!r}")
# always return a naive date
if isinstance(dt, datetime) and dt.tzinfo is not None:
dt = dt.astimezone(pytz.utc).replace(tzinfo=None)
return dt
def get_month(date: D) -> tuple[D, D]:
""" Compute the month date range from a date (set first and last day of month).
"""
return date.replace(day=1), date.replace(day=calendar.monthrange(date.year, date.month)[1])
def get_quarter_number(date: date) -> int:
''' Get the number of the quarter on which the 'date' parameter belongs to.
'''
return math.ceil(date.month / 3)
""" Get the quarter from a date (1-4)."""
return (date.month - 1) // 3 + 1
def get_quarter(date: D) -> Tuple[D, D]:
''' Compute the quarter dates range on which the 'date' parameter belongs to.
'''
quarter_number = get_quarter_number(date)
month_from = ((quarter_number - 1) * 3) + 1
def get_quarter(date: D) -> tuple[D, D]:
""" Compute the quarter date range from a date (set first and last day of quarter).
"""
month_from = (date.month - 1) // 3 * 3 + 1
date_from = date.replace(month=month_from, day=1)
date_to = date_from + relativedelta(months=2)
date_to = date_from.replace(month=month_from + 2)
date_to = date_to.replace(day=calendar.monthrange(date_to.year, date_to.month)[1])
return date_from, date_to
def get_fiscal_year(date: D, day: int = 31, month: int = 12) -> Tuple[D, D]:
''' Compute the fiscal year dates range on which the 'date' parameter belongs to.
def get_fiscal_year(date: D, day: int = 31, month: int = 12) -> tuple[D, D]:
""" Compute the fiscal year date range from a date (first and last day of fiscal year).
A fiscal year is the period used by governments for accounting purposes and vary between countries.
By default, calling this method with only one parameter gives the calendar year because the ending date of the
fiscal year is set to the YYYY-12-31.
@ -63,7 +231,7 @@ def get_fiscal_year(date: D, day: int = 31, month: int = 12) -> Tuple[D, D]:
:param day: The day of month the fiscal year ends.
:param month: The month of year the fiscal year ends.
:return: The start and end dates of the fiscal year.
'''
"""
def fix_day(year, month, day):
max_day = calendar.monthrange(year, month)[1]
@ -86,7 +254,7 @@ def get_fiscal_year(date: D, day: int = 31, month: int = 12) -> Tuple[D, D]:
return date_from, date_to
def get_timedelta(qty: int, granularity: Literal['hour', 'day', 'week', 'month', 'year']):
def get_timedelta(qty: int, granularity: typing.Literal['hour', 'day', 'week', 'month', 'year']):
""" Helper to get a `relativedelta` object for the given quantity and interval unit.
"""
switch = {
@ -99,7 +267,7 @@ def get_timedelta(qty: int, granularity: Literal['hour', 'day', 'week', 'month',
return switch[granularity]
Granularity = Literal['year', 'quarter', 'month', 'week', 'day', 'hour']
Granularity = typing.Literal['year', 'quarter', 'month', 'week', 'day', 'hour']
def start_of(value: D, granularity: Granularity) -> D:
@ -163,7 +331,7 @@ def end_of(value: D, granularity: Granularity) -> D:
elif granularity == 'week':
# `calendar.weekday` uses ISO8601 for start of week reference, this means that
# by default MONDAY is the first day of the week and SUNDAY is the last.
result = value + relativedelta(days=6-calendar.weekday(value.year, value.month, value.day))
result = value + relativedelta(days=6 - calendar.weekday(value.year, value.month, value.day))
elif granularity == "day":
result = value
elif granularity == "hour" and is_datetime:
@ -208,11 +376,12 @@ def date_range(start: D, end: D, step: relativedelta = relativedelta(months=1))
"""Date range generator with a step interval.
:param start: beginning date of the range.
:param end: ending date of the range.
:param step: interval of the range.
:param end: ending date of the range (inclusive).
:param step: interval of the range (positive).
:return: a range of datetime from start to end.
"""
post_process = lambda dt: dt # noqa: E731
if isinstance(start, datetime) and isinstance(end, datetime):
are_naive = start.tzinfo is None and end.tzinfo is None
are_utc = start.tzinfo == pytz.utc and end.tzinfo == pytz.utc
@ -226,32 +395,37 @@ def date_range(start: D, end: D, step: relativedelta = relativedelta(months=1))
if not are_naive and not are_utc and not are_others:
raise ValueError("Timezones of start argument and end argument mismatch")
dt = start.replace(tzinfo=None)
end_dt = end.replace(tzinfo=None)
post_process = start.tzinfo.localize if start.tzinfo else lambda dt: dt
if not are_naive:
post_process = start.tzinfo.localize
start = start.replace(tzinfo=None)
end = end.replace(tzinfo=None)
elif isinstance(start, date) and isinstance(end, date):
# FIXME: not correctly typed, and will break if the step is a fractional
# day: `relativedelta` will return a datetime, which can't be
# compared with a `date`
dt, end_dt = start, end
post_process = lambda dt: dt
if not isinstance(start + step, date):
raise ValueError("the step interval must add only entire days") # noqa: TRY004
else:
raise ValueError("start/end should be both date or both datetime type")
raise ValueError("start/end should be both date or both datetime type") # noqa: TRY004
if start > end:
raise ValueError("start > end, start date must be before end")
if start == start + step:
raise ValueError("Looks like step is null")
if start >= start + step:
raise ValueError("Looks like step is null or negative")
while dt <= end_dt:
yield post_process(dt)
dt = dt + step
while start <= end:
yield post_process(start)
start += step
def weeknumber(locale: babel.Locale, date: date) -> Tuple[int, int]:
def sum_intervals(intervals: Iterable[tuple[datetime, datetime, ...]]) -> float:
""" Sum the intervals duration (unit: hour)"""
return sum(
(interval[1] - interval[0]).total_seconds() / 3600
for interval in intervals
)
def weeknumber(locale: babel.Locale, date: date) -> tuple[int, int]:
"""Computes the year and weeknumber of `date`. The week number is 1-indexed
(so the first week is week number 1).
@ -289,3 +463,31 @@ def weeknumber(locale: babel.Locale, date: date) -> Tuple[int, int]:
doy = (date - fdow).days
return date.year, (doy // 7 + 1)
def weekstart(locale: babel.Locale, date: date):
"""
Return the first weekday of the week containing `day`
If `day` is already that weekday, it is returned unchanged.
Otherwise, it is shifted back to the most recent such weekday.
Examples: week starts Sunday
- weekstart of Sat 30 Aug -> Sun 24 Aug
- weekstart of Sat 23 Aug -> Sun 17 Aug
"""
return date + relativedelta(weekday=weekdays[locale.first_week_day](-1))
def weekend(locale: babel.Locale, date: date):
"""
Return the last weekday of the week containing `day`
If `day` is already that weekday, it is returned unchanged.
Otherwise, it is shifted forward to the next such weekday.
Examples: week starts Sunday (so week ends Saturday)
- weekend of Sun 24 Aug -> Sat 30 Aug
- weekend of Sat 30 Aug -> Sat 30 Aug
"""
return weekstart(locale, date) + relativedelta(days=6)

View file

@ -1,8 +1,12 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from typing import Literal, overload
import builtins
import math
RoundingMethod = Literal['UP', 'DOWN', 'HALF-UP', 'HALF-DOWN', 'HALF-EVEN']
__all__ = [
"float_compare",
"float_is_zero",
@ -13,7 +17,7 @@ __all__ = [
]
def round(f):
def round(f: float) -> float:
# P3's builtin round differs from P2 in the following manner:
# * it rounds half to even rather than up (away from 0)
# * round(-0.) loses the sign (it returns -0 rather than 0)
@ -30,7 +34,10 @@ def round(f):
return math.copysign(roundf, f)
def _float_check_precision(precision_digits=None, precision_rounding=None):
def _float_check_precision(
precision_digits: int | None = None,
precision_rounding: float | None = None,
) -> float:
if precision_rounding is not None and precision_digits is None:
assert precision_rounding > 0,\
f"precision_rounding must be positive, got {precision_rounding}"
@ -45,7 +52,28 @@ def _float_check_precision(precision_digits=None, precision_rounding=None):
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None, rounding_method='HALF-UP'):
@overload
def float_round(
value: float,
precision_digits: int,
rounding_method: RoundingMethod = ...,
) -> float: ...
@overload
def float_round(
value: float,
precision_rounding: float,
rounding_method: RoundingMethod = ...,
) -> float: ...
def float_round(
value: float,
precision_digits: int | None = None,
precision_rounding: float | None = None,
rounding_method: RoundingMethod = 'HALF-UP',
) -> float:
"""Return ``value`` rounded to ``precision_digits`` decimal digits,
minimizing IEEE-754 floating point representation errors, and applying
the tie-breaking rule selected with ``rounding_method``, by default
@ -53,15 +81,15 @@ def float_round(value, precision_digits=None, precision_rounding=None, rounding_
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
:param value: the value to round
:param precision_digits: number of fractional digits to round to.
:param precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param rounding_method: the rounding method used:
- 'HALF-UP' will round to the closest number with ties going away from zero.
- 'HALF-DOWN' will round to the closest number with ties going towards zero.
- 'HALF_EVEN' will round to the closest number with ties going to the closest
- 'HALF-EVEN' will round to the closest number with ties going to the closest
even number.
- 'UP' will always round away from 0.
- 'DOWN' will always round towards 0.
@ -124,7 +152,25 @@ def float_round(value, precision_digits=None, precision_rounding=None, rounding_
return denormalize(result)
def float_is_zero(value, precision_digits=None, precision_rounding=None):
@overload
def float_is_zero(
value: float,
precision_digits: int,
) -> bool: ...
@overload
def float_is_zero(
value: float,
precision_rounding: float,
) -> bool: ...
def float_is_zero(
value: float,
precision_digits: int | None = None,
precision_rounding: float | None = None,
) -> bool:
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
@ -138,11 +184,11 @@ def float_is_zero(value, precision_digits=None, precision_rounding=None):
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
:param precision_digits: number of fractional digits to round to.
:param precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:param value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
@ -150,7 +196,28 @@ def float_is_zero(value, precision_digits=None, precision_rounding=None):
return value == 0.0 or abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
@overload
def float_compare(
value1: float,
value2: float,
precision_digits: int,
) -> Literal[-1, 0, 1]: ...
@overload
def float_compare(
value1: float,
value2: float,
precision_rounding: float,
) -> Literal[-1, 0, 1]: ...
def float_compare(
value1: float,
value2: float,
precision_digits: int | None = None,
precision_rounding: float | None = None,
) -> Literal[-1, 0, 1]:
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
@ -169,10 +236,10 @@ def float_compare(value1, value2, precision_digits=None, precision_rounding=None
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param float value1: first value to compare
:param float value2: second value to compare
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
:param value1: first value to compare
:param value2: second value to compare
:param precision_digits: number of fractional digits to round to.
:param precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
@ -192,15 +259,16 @@ def float_compare(value1, value2, precision_digits=None, precision_rounding=None
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
def float_repr(value: float, precision_digits: int) -> str:
"""Returns a string representation of a float with the
given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:func:`~.float_round`), but only to produce a suitable
string representation for a float.
:param float value:
:param int precision_digits: number of fractional digits to include in the output
:param value: the value to represent
:param precision_digits: number of fractional digits to include in the output
:return: the string representation of the value
"""
# Can't use str() here because it seems to have an intrinsic
# rounding to 12 significant digits, which causes a loss of
@ -210,7 +278,7 @@ def float_repr(value, precision_digits):
return "%.*f" % (precision_digits, value)
def float_split_str(value, precision_digits):
def float_split_str(value: float, precision_digits: int) -> tuple[str, str]:
"""Splits the given float 'value' in its unitary and decimal parts,
returning each of them as a string, rounding the value using
the provided ``precision_digits`` argument.
@ -227,22 +295,19 @@ def float_split_str(value, precision_digits):
1.1 with precision 3 => ('1', '100')
1.12 with precision 0 => ('1', '')
:param float value: value to split.
:param int precision_digits: number of fractional digits to round to.
:param value: value to split.
:param precision_digits: number of fractional digits to round to.
:return: returns the tuple(<unitary part>, <decimal part>) of the given value
:rtype: tuple(str, str)
"""
value = float_round(value, precision_digits=precision_digits)
value_repr = float_repr(value, precision_digits)
return tuple(value_repr.split('.')) if precision_digits else (value_repr, '')
def float_split(value, precision_digits):
def float_split(value: float, precision_digits: int) -> tuple[int, int]:
""" same as float_split_str() except that it returns the unitary and decimal
parts as integers instead of strings. In case ``precision_digits`` is zero,
0 is always returned as decimal part.
:rtype: tuple(int, int)
"""
units, cents = float_split_str(value, precision_digits)
if not cents:
@ -250,7 +315,11 @@ def float_split(value, precision_digits):
return int(units), int(cents)
def json_float_round(value, precision_digits, rounding_method='HALF-UP'):
def json_float_round(
value: float,
precision_digits: int,
rounding_method: RoundingMethod = 'HALF-UP',
) -> float:
"""Not suitable for float calculations! Similar to float_repr except that it
returns a float suitable for json dump
@ -259,7 +328,7 @@ def json_float_round(value, precision_digits, rounding_method='HALF-UP'):
Unfortunately `json.dumps` does not allow any form of custom float representation,
nor any custom types, everything is serialized from the basic JSON types.
:param int precision_digits: number of fractional digits to round to.
:param precision_digits: number of fractional digits to round to.
:param rounding_method: the rounding method used: 'HALF-UP', 'UP' or 'DOWN',
the first one rounding up to the closest number with the rule that
number>=0.5 is rounded up to 1, the second always rounding up and the
@ -290,12 +359,11 @@ _INVERTDICT = {
}
def float_invert(value):
def float_invert(value: float) -> float:
"""Inverts a floating point number with increased accuracy.
:param float value: value to invert.
:param bool store: whether store the result in memory for future calls.
:return: rounded float.
:param value: value to invert.
:return: inverted float.
"""
result = _INVERTDICT.get(value)
if result is None:

View file

@ -1,9 +1,11 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from __future__ import annotations
import typing
from inspect import Parameter, getsourcefile, signature
from decorator import decorator
import functools
import typing
import warnings
from collections.abc import Callable # noqa: TC003
from inspect import Parameter, getsourcefile, signature
__all__ = [
'classproperty',
@ -11,53 +13,41 @@ __all__ = [
'lazy',
'lazy_classproperty',
'lazy_property',
'synchronized',
'reset_cached_properties',
]
T = typing.TypeVar("T")
if typing.TYPE_CHECKING:
from collections.abc import Callable
P = typing.ParamSpec("P")
class lazy_property(typing.Generic[T]):
""" Decorator for a lazy property of an object, i.e., an object attribute
that is determined by the result of a method call evaluated once. To
reevaluate the property, simply delete the attribute on the object, and
get it again.
"""
def __init__(self, fget: Callable[[typing.Any], T]):
assert not fget.__name__.startswith('__'),\
"lazy_property does not support mangled names"
self.fget = fget
def reset_cached_properties(obj) -> None:
""" Reset all cached properties on the instance `obj`. """
cls = type(obj)
obj_dict = vars(obj)
for name in list(obj_dict):
if isinstance(getattr(cls, name, None), functools.cached_property):
del obj_dict[name]
@typing.overload
def __get__(self, obj: None, cls: typing.Any, /) -> typing.Any: ...
@typing.overload
def __get__(self, obj: object, cls: typing.Any, /) -> T: ...
def __get__(self, obj, cls, /):
if obj is None:
return self
value = self.fget(obj)
setattr(obj, self.fget.__name__, value)
return value
@property
def __doc__(self):
return self.fget.__doc__
class lazy_property(functools.cached_property):
def __init__(self, func):
super().__init__(func)
warnings.warn(
"lazy_property is deprecated since Odoo 19, use `functools.cached_property`",
category=DeprecationWarning,
stacklevel=2,
)
@staticmethod
def reset_all(obj) -> None:
""" Reset all lazy properties on the instance `obj`. """
cls = type(obj)
obj_dict = vars(obj)
for name in list(obj_dict):
if isinstance(getattr(cls, name, None), lazy_property):
obj_dict.pop(name)
def reset_all(instance):
warnings.warn(
"lazy_property is deprecated since Odoo 19, use `reset_cache_properties` directly",
category=DeprecationWarning,
)
reset_cached_properties(instance)
def conditional(condition, decorator):
def conditional(condition: typing.Any, decorator: Callable[[T], T]) -> Callable[[T], T]:
""" Decorator for a conditionally applied decorator.
Example::
@ -90,12 +80,14 @@ def filter_kwargs(func: Callable, kwargs: dict[str, typing.Any]) -> dict[str, ty
return {key: kwargs[key] for key in kwargs if key not in leftovers}
def synchronized(lock_attr: str = '_lock'):
@decorator
def locked(func, inst, *args, **kwargs):
with getattr(inst, lock_attr):
return func(inst, *args, **kwargs)
return locked
def synchronized(lock_attr: str = '_lock') -> Callable[[Callable[P, T]], Callable[P, T]]:
def synchronized_lock(func, /):
@functools.wraps(func)
def locked(inst, *args, **kwargs):
with getattr(inst, lock_attr):
return func(inst, *args, **kwargs)
return locked
return synchronized_lock
locked = synchronized()
@ -108,7 +100,7 @@ def frame_codeinfo(fframe, back=0):
try:
if not fframe:
return "<unknown>", ''
for i in range(back):
for _i in range(back):
fframe = fframe.f_back
try:
fname = getsourcefile(fframe)

View file

@ -0,0 +1,101 @@
""" Garbage collector tools
## Reference
https://github.com/python/cpython/blob/main/InternalDocs/garbage_collector.md
## TLDR cpython
Objects have reference counts, but we need garbage collection for cyclic
references. All allocated objects are split into collections (aka generations).
There is also one permanent generation that is never collected (see
``gc.freeze``).
The GC is triggered by the number of created objects. For the first collection,
at every allocation and deallocation, a counter is respectively increased and
decreased. Once it reaches a threshold, that collection is automatically
collected. Other thresolds indicate that every X collections, the next
collection is collected.
Default thresolds are 700, 10, 10.
"""
import contextlib
import gc
import logging
from time import thread_time_ns as _gc_time
_logger = logging.getLogger('gc')
_gc_start: int = 0
_gc_init_stats = gc.get_stats()
_gc_timings = [0, 0, 0]
def _to_ms(ns):
return round(ns / 1_000_000, 2)
def _timing_gc_callback(event, info):
"""Called before and after each run of the gc, see gc_set_timing."""
global _gc_start # noqa: PLW0603
gen = info['generation']
if event == 'start':
_gc_start = _gc_time()
if gen == 2 and _logger.isEnabledFor(logging.DEBUG):
_logger.debug("info %s, starting collection of gen2", gc_info())
else:
timing = _gc_time() - _gc_start
_gc_timings[gen] += timing
_gc_start = 0
if gen > 0:
_logger.debug("collected %s in %.2fms", info, _to_ms(timing))
def gc_set_timing(*, enable: bool):
"""Enable or disable timing callback.
This collects information about how much time is spent by the GC.
It logs GC times (at debug level) for collections bigger than 0.
The overhead is under a microsecond.
"""
if _timing_gc_callback in gc.callbacks:
if enable:
return
gc.callbacks.remove(_timing_gc_callback)
elif enable:
global _gc_init_stats, _gc_timings # noqa: PLW0603
_gc_init_stats = gc.get_stats()
_gc_timings = [0, 0, 0]
gc.callbacks.append(_timing_gc_callback)
def gc_info():
"""Return a dict with stats about the garbage collector."""
stats = gc.get_stats()
times = []
cumulative_time = sum(_gc_timings) or 1
for info, info_init, time in zip(stats, _gc_init_stats, _gc_timings):
count = info['collections'] - info_init['collections']
times.append({
'avg_time': time // count if count > 0 else 0,
'time': _to_ms(time),
'pct': round(time / cumulative_time, 3)
})
return {
'cumulative_time': _to_ms(cumulative_time),
'time': times if _timing_gc_callback in gc.callbacks else (),
'count': stats,
'thresholds': (gc.get_count(), gc.get_threshold()),
}
@contextlib.contextmanager
def disabling_gc():
"""Disable gc in the context manager."""
if not gc.isenabled():
yield False
return
gc.disable()
_logger.debug('disabled, counts %s', gc.get_count())
yield True
counts = gc.get_count()
gc.enable()
_logger.debug('enabled, counts %s', counts)

View file

@ -1,13 +1,14 @@
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Literal, Optional, Sequence
from typing import TYPE_CHECKING, Literal
from babel import lists
from odoo.tools.misc import babel_locale_parse, get_lang
if TYPE_CHECKING:
from collections.abc import Iterable
import odoo.api
XPG_LOCALE_RE = re.compile(
@ -23,9 +24,9 @@ XPG_LOCALE_RE = re.compile(
def format_list(
env: odoo.api.Environment,
lst: Sequence[str],
lst: Iterable,
style: Literal["standard", "standard-short", "or", "or-short", "unit", "unit-short", "unit-narrow"] = "standard",
lang_code: Optional[str] = None,
lang_code: str | None = None,
) -> str:
"""
Format the items in `lst` as a list in a locale-dependent manner with the chosen style.
@ -56,7 +57,7 @@ def format_list(
See https://www.unicode.org/reports/tr35/tr35-49/tr35-general.html#ListPatterns for more details.
:param env: the current environment.
:param lst: the sequence of items to format into a list.
:param lst: the iterable of items to format into a list.
:param style: the style to format the list with.
:param lang_code: the locale (i.e. en_US).
:return: the formatted list.
@ -65,7 +66,7 @@ def format_list(
# Some styles could be unavailable for the chosen locale
if style not in locale.list_patterns:
style = "standard"
return lists.format_list(lst, style, locale)
return lists.format_list([str(el) for el in lst], style, locale)
def py_to_js_locale(locale: str) -> str:

View file

@ -0,0 +1,158 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from __future__ import annotations
import itertools
import typing
import warnings
if typing.TYPE_CHECKING:
from collections.abc import Iterable, Iterator
from collections.abc import Set as AbstractSet
T = typing.TypeVar('T')
def _boundaries(intervals: Intervals[T] | Iterable[tuple[T, T, AbstractSet]], opening: str, closing: str) -> Iterator[tuple[T, str, AbstractSet]]:
""" Iterate on the boundaries of intervals. """
for start, stop, recs in intervals:
if start < stop:
yield (start, opening, recs)
yield (stop, closing, recs)
class Intervals(typing.Generic[T]):
""" Collection of ordered disjoint intervals with some associated records.
Each interval is a triple ``(start, stop, records)``, where ``records``
is a recordset.
By default, adjacent intervals are merged (1, 3, a) and (3, 5, b) become
(1, 5, a | b). This behaviour can be prevented by setting
`keep_distinct=True`.
"""
def __init__(self, intervals: Iterable[tuple[T, T, AbstractSet]] | None = None, *, keep_distinct: bool = False):
self._items: list[tuple[T, T, AbstractSet]] = []
self._keep_distinct = keep_distinct
if intervals:
# normalize the representation of intervals
append = self._items.append
starts: list[T] = []
items: AbstractSet | None = None
if self._keep_distinct:
boundaries = sorted(_boundaries(sorted(intervals), 'start', 'stop'), key=lambda i: i[0])
else:
boundaries = sorted(_boundaries(intervals, 'start', 'stop'))
for value, flag, value_items in boundaries:
if flag == 'start':
starts.append(value)
if items is None:
items = value_items
else:
items = items.union(value_items)
else:
start = starts.pop()
if not starts:
append((start, value, items))
items = None
def __bool__(self):
return bool(self._items)
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
def __reversed__(self):
return reversed(self._items)
def __or__(self, other):
""" Return the union of two sets of intervals. """
return Intervals(itertools.chain(self._items, other._items), keep_distinct=self._keep_distinct)
def __and__(self, other):
""" Return the intersection of two sets of intervals. """
return self._merge(other, False)
def __sub__(self, other):
""" Return the difference of two sets of intervals. """
return self._merge(other, True)
def _merge(self, other: Intervals | Iterable[tuple[T, T, AbstractSet]], difference: bool) -> Intervals:
""" Return the difference or intersection of two sets of intervals. """
result = Intervals(keep_distinct=self._keep_distinct)
append = result._items.append
# using 'self' and 'other' below forces normalization
bounds1 = _boundaries(self, 'start', 'stop')
bounds2 = _boundaries(other, 'switch', 'switch')
start = None # set by start/stop
recs1 = None # set by start
enabled = difference # changed by switch
if self._keep_distinct:
bounds = sorted(itertools.chain(bounds1, bounds2), key=lambda i: i[0])
else:
bounds = sorted(itertools.chain(bounds1, bounds2))
for value, flag, recs in bounds:
if flag == 'start':
start = value
recs1 = recs
elif flag == 'stop':
if enabled and start < value:
append((start, value, recs1))
start = None
else:
if not enabled and start is not None:
start = value
if enabled and start is not None and start < value:
append((start, value, recs1))
enabled = not enabled
return result
def remove(self, interval):
""" Remove an interval from the set. """
warnings.warn("Deprecated since 19.0, do not mutate intervals", DeprecationWarning)
self._items.remove(interval)
def items(self):
""" Return the intervals. """
warnings.warn("Deprecated since 19.0, just iterate over Intervals", DeprecationWarning)
return self._items
def intervals_overlap(interval_a: tuple[T, T], interval_b: tuple[T, T]) -> bool:
"""Check whether intervals intersect.
:param interval_a:
:param interval_b:
:return: True if two non-zero intervals overlap
"""
start_a, stop_a = interval_a
start_b, stop_b = interval_b
return start_a < stop_b and stop_a > start_b
def invert_intervals(intervals: Iterable[tuple[T, T]], first_start: T, last_stop: T) -> list[tuple[T, T]]:
"""Return the intervals between the intervals that were passed in.
The expected use case is to turn "available intervals" into "unavailable intervals".
:examples:
([(1, 2), (4, 5)], 0, 10) -> [(0, 1), (2, 4), (5, 10)]
:param intervals:
:param first_start: start of whole interval
:param last_stop: stop of whole interval
"""
items = []
prev_stop = first_start
for start, stop in sorted(intervals):
if prev_stop and prev_stop < start and start <= last_stop:
items.append((prev_stop, start))
prev_stop = max(prev_stop, stop)
if last_stop and prev_stop < last_stop:
items.append((prev_stop, last_stop))
# abuse Intervals to merge contiguous intervals
return [(start, stop) for start, stop, _ in Intervals([(start, stop, set()) for start, stop in items])]

View file

@ -12,12 +12,10 @@ the original source need to be supported by the browsers.
"""
import re
import logging
from functools import partial
from odoo.tools.misc import OrderedSet
_logger = logging.getLogger(__name__)
def transpile_javascript(url, content):
"""
@ -50,6 +48,7 @@ def transpile_javascript(url, content):
convert_default_export,
partial(wrap_with_qunit_module, url),
partial(wrap_with_odoo_define, module_path, dependencies),
partial(convert_t, url)
]
for s in steps:
content = s(content)
@ -229,6 +228,45 @@ def convert_export_class_default(content):
repl = r"""\g<space>const \g<identifier> = __exports[Symbol.for("default")] = \g<type> \g<identifier>"""
return EXPORT_CLASS_DEFAULT_RE.sub(repl, content)
GETTEXT_RE = re.compile(r"""
^
\s*const\s*{
(?:\s*\w*\s*,)*
\s*(_t)\s*
(?:,\s*\w*\s*)*,?\s*
}\s*=\s*require\("@web/core/l10n/translation"\);$
""", re.MULTILINE | re.VERBOSE)
T_FN_RE = re.compile(r"""
^
\s*const\s*{
(?:\s*\w*\s*,)*
\s*(appTranslateFn)\s*
(?:,\s*\w*\s*)*,?\s*
}\s*=\s*require\("@web/core/l10n/translation"\);$
""", re.MULTILINE | re.VERBOSE)
def convert_t(url, content):
if url.endswith(".test.js"):
return content
module_name = URL_RE.match(url)["module"]
has_import_of_appTranslateFn = bool(T_FN_RE.search(content))
def rename_gettext(match_):
if has_import_of_appTranslateFn:
renamed_import = match_.group(0).replace("_t", "__not_defined__")
else:
renamed_import = match_.group(0).replace("_t", "appTranslateFn")
renamed_import += f"""const _t = (str, ...args) => appTranslateFn(str, "{module_name}", ...args);"""
return renamed_import
return GETTEXT_RE.sub(rename_gettext, content)
EXPORT_VAR_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line

View file

@ -70,4 +70,6 @@ def json_default(obj):
return dict(obj)
if isinstance(obj, bytes):
return obj.decode()
if isinstance(obj, fields.Domain):
return list(obj)
return str(obj)

View file

@ -1,9 +1,8 @@
import collections
import threading
import typing
from collections.abc import Iterable, Iterator, MutableMapping
from .func import locked
from .misc import SENTINEL
__all__ = ['LRU']
@ -13,50 +12,112 @@ V = typing.TypeVar('V')
class LRU(MutableMapping[K, V], typing.Generic[K, V]):
"""
Implementation of a length-limited O(1) LRU map.
Implementation of a length-limited LRU map.
Original Copyright 2003 Josiah Carlson, later rebuilt on OrderedDict and added typing.
The mapping is thread-safe, and internally uses a lock to avoid concurrency
issues. However, access operations like ``lru[key]`` are fast and
lock-free.
"""
__slots__ = ('_count', '_lock', '_ordering', '_values')
def __init__(self, count: int, pairs: Iterable[tuple[K, V]] = ()):
assert count > 0, "LRU needs a positive count"
self._count = count
self._lock = threading.RLock()
self.count = max(count, 1)
self.d: collections.OrderedDict[K, V] = collections.OrderedDict()
self._values: dict[K, V] = {}
#
# The dict self._values contains the LRU items, while self._ordering
# only keeps track of their order, the most recently used ones being
# last. For performance reasons, we only use the lock when modifying
# the LRU, while reading it is lock-free (and thus faster).
#
# This strategy may result in inconsistencies between self._values and
# self._ordering. Indeed, concurrently accessed keys may be missing
# from self._ordering, but will eventually be added. This could result
# in keys being added back in self._ordering after their actual removal
# from the LRU. This results in the following invariant:
#
# self._values <= self._ordering | "keys being accessed"
#
self._ordering: dict[K, None] = {}
# Initialize
for key, value in pairs:
self[key] = value
@locked
def __contains__(self, obj: K) -> bool:
return obj in self.d
@property
def count(self) -> int:
return self._count
@locked
def __getitem__(self, obj: K) -> V:
a = self.d[obj]
self.d.move_to_end(obj, last=False)
return a
def __contains__(self, key: object) -> bool:
return key in self._values
@locked
def __setitem__(self, obj: K, val: V):
self.d[obj] = val
self.d.move_to_end(obj, last=False)
while len(self.d) > self.count:
self.d.popitem(last=True)
def __getitem__(self, key: K) -> V:
val = self._values[key]
# move key at the last position in self._ordering
self._ordering[key] = self._ordering.pop(key, None)
return val
@locked
def __delitem__(self, obj: K):
del self.d[obj]
def __setitem__(self, key: K, val: V):
values = self._values
ordering = self._ordering
with self._lock:
values[key] = val
ordering[key] = ordering.pop(key, None)
while True:
# if we have too many keys in ordering, filter them out
if len(ordering) > len(values):
# (copy to avoid concurrent changes on ordering)
for k in ordering.copy():
if k not in values:
ordering.pop(k, None)
# check if we have too many keys
if len(values) <= self._count:
break
# if so, pop the least recently used
try:
# have a default in case of concurrent accesses
key = next(iter(ordering), key)
except RuntimeError:
# ordering modified during iteration, retry
continue
values.pop(key, None)
ordering.pop(key, None)
def __delitem__(self, key: K):
self.pop(key)
@locked
def __len__(self) -> int:
return len(self.d)
return len(self._values)
@locked
def __iter__(self) -> Iterator[K]:
return iter(self.d)
return iter(self.snapshot)
@locked
def pop(self, key: K) -> V:
return self.d.pop(key)
@property
def snapshot(self) -> dict[K, V]:
""" Return a copy of the LRU (ordered according to LRU first). """
with self._lock:
values = self._values
# build result in expected order (copy self._ordering to avoid concurrent changes)
result = {
key: val
for key in self._ordering.copy()
if (val := values.get(key, SENTINEL)) is not SENTINEL
}
if len(result) < len(values):
# keys in value were missing from self._ordering, add them
result.update(values)
return result
def pop(self, key: K, /, default=SENTINEL) -> V:
with self._lock:
self._ordering.pop(key, None)
if default is SENTINEL:
return self._values.pop(key)
return self._values.pop(key, default)
@locked
def clear(self):
self.d.clear()
with self._lock:
self._ordering.clear()
self._values.clear()

View file

@ -12,12 +12,22 @@ import time
import email.utils
from email.utils import getaddresses as orig_getaddresses
from urllib.parse import urlparse
from typing import Literal
import html as htmllib
import idna
import markupsafe
from lxml import etree, html
from lxml.html import clean, defs
from lxml.html import (
XHTML_NAMESPACE,
_contains_block_level_tag,
_looks_like_full_html_bytes,
_looks_like_full_html_unicode,
clean,
defs,
document_fromstring,
html_parser,
)
from werkzeug import urls
from odoo.tools import misc
@ -66,7 +76,11 @@ safe_attrs = defs.safe_attrs | frozenset(
'data-publish', 'data-id', 'data-res_id', 'data-interval', 'data-member_id', 'data-scroll-background-ratio', 'data-view-id',
'data-class', 'data-mimetype', 'data-original-src', 'data-original-id', 'data-gl-filter', 'data-quality', 'data-resize-width',
'data-shape', 'data-shape-colors', 'data-file-name', 'data-original-mimetype',
'data-attachment-id', 'data-format-mimetype',
'data-ai-field', 'data-ai-record-id',
'data-heading-link-id',
'data-mimetype-before-conversion',
'data-language-id', 'data-syntax-highlighting-value'
])
SANITIZE_TAGS = {
# allow new semantic HTML5 tags
@ -262,6 +276,76 @@ def tag_quote(el):
el.set('data-o-mail-quote', '1')
def fromstring(html_, base_url=None, parser=None, **kw):
"""
This function mimics lxml.html.fromstring. It not only returns the parsed
element/document but also a flag indicating whether the input is for a
a single body element or not.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
"""
if parser is None:
parser = html_parser
if isinstance(html_, bytes):
is_full_html = _looks_like_full_html_bytes(html_)
else:
is_full_html = _looks_like_full_html_unicode(html_)
doc = document_fromstring(html_, parser=parser, base_url=base_url, **kw)
if is_full_html:
return doc, False
# otherwise, lets parse it out...
bodies = doc.findall('body')
if not bodies:
bodies = doc.findall('{%s}body' % XHTML_NAMESPACE)
if bodies:
body = bodies[0]
if len(bodies) > 1:
# Somehow there are multiple bodies, which is bad, but just
# smash them into one body
for other_body in bodies[1:]:
if other_body.text:
if len(body):
body[-1].tail = (body[-1].tail or '') + other_body.text
else:
body.text = (body.text or '') + other_body.text
body.extend(other_body)
# We'll ignore tail
# I guess we are ignoring attributes too
other_body.drop_tree()
else:
body = None
heads = doc.findall('head')
if not heads:
heads = doc.findall('{%s}head' % XHTML_NAMESPACE)
if heads:
# Well, we have some sort of structure, so lets keep it all
head = heads[0]
if len(heads) > 1:
for other_head in heads[1:]:
head.extend(other_head)
# We don't care about text or tail in a head
other_head.drop_tree()
return doc, False
if body is None:
return doc, False
if (len(body) == 1 and (not body.text or not body.text.strip())
and (not body[-1].tail or not body[-1].tail.strip())):
# The body has just one element, so it was probably a single
# element passed in
return body[0], True
# Now we have a body which represents a bunch of tags which have the
# content that was passed in. We will create a fake container, which
# is the body tag, except <body> implies too much structure.
if _contains_block_level_tag(body):
body.tag = 'div'
else:
body.tag = 'span'
return body, False
def html_normalize(src, filter_callback=None, output_method="html"):
""" Normalize `src` for storage as an html field value.
@ -282,7 +366,7 @@ def html_normalize(src, filter_callback=None, output_method="html"):
return src
# html: remove encoding attribute inside tags
src = re.sub(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', "", src, flags=re.IGNORECASE | re.DOTALL)
src = re.sub(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', "", src)
src = src.replace('--!>', '-->')
src = re.sub(r'(<!-->|<!--->)', '<!-- -->', src)
@ -291,7 +375,7 @@ def html_normalize(src, filter_callback=None, output_method="html"):
src = re.sub(r'</?o:.*?>', '', src)
try:
doc = html.fromstring(src)
doc, single_body_element = fromstring(src)
except etree.ParserError as e:
# HTML comment only string, whitespace only..
if 'empty' in str(e):
@ -299,18 +383,23 @@ def html_normalize(src, filter_callback=None, output_method="html"):
raise
# perform quote detection before cleaning and class removal
if doc is not None:
for el in doc.iter(tag=etree.Element):
tag_quote(el)
for el in doc.iter(tag=etree.Element):
tag_quote(el)
if filter_callback:
doc = filter_callback(doc)
src = html.tostring(doc, encoding='unicode', method=output_method)
# this is ugly, but lxml/etree tostring want to put everything in a
# 'div' that breaks the editor -> remove that
if src.startswith('<div>') and src.endswith('</div>'):
if not single_body_element and src.startswith('<div>') and src.endswith('</div>'):
# the <div></div> may come from 2 places
# 1. the src is parsed as multiple body elements
# <div></div> wraps all elements.
# 2. the src is parsed as not only body elements
# <html></html> wraps all elements.
# then the Cleaner as the filter_callback which has 'html' in its
# 'remove_tags' will write <html></html> to <div></div> since it
# cannot directly drop the parent-most tag
src = src[5:-6]
# html considerations so real html content match database value
@ -393,19 +482,20 @@ def validate_url(url):
return url
def is_html_empty(html_content):
def is_html_empty(html_content: str | markupsafe.Markup | Literal[False] | None) -> bool:
"""Check if a html content is empty. If there are only formatting tags with style
attributes or a void content return True. Famous use case if a
'<p style="..."><br></p>' added by some web editor.
:param str html_content: html content, coming from example from an HTML field
:returns: bool, True if no content found or if containing only void formatting tags
:param html_content: html content, coming from example from an HTML field
:returns: True if no content found or if containing only void formatting tags
"""
if not html_content:
return True
icon_re = r'<\s*(i|span)\b(\s+[A-Za-z_-][A-Za-z0-9-_]*(\s*=\s*[\'"][^"\']*[\'"])?)*\s*\bclass\s*=\s*["\'][^"\']*\b(fa|fab|fad|far|oi)\b'
tag_re = r'<\s*\/?(?:p|div|section|span|br|b|i|font)\b(?:(\s+[A-Za-z_-][A-Za-z0-9-_]*(\s*=\s*[\'"][^"\']*[\'"]))*)(?:\s*>|\s*\/\s*>)'
return not bool(re.sub(tag_re, '', html_content).strip()) and not re.search(icon_re, html_content)
text_content = htmllib.unescape(re.sub(tag_re, '', html_content))
return not bool(text_content.strip()) and not re.search(icon_re, html_content)
def html_keep_url(text):
@ -432,16 +522,21 @@ def html_to_inner_content(html):
processed = re.sub(HTML_NEWLINES_REGEX, ' ', html)
processed = re.sub(HTML_TAGS_REGEX, '', processed)
processed = re.sub(r' {2,}|\t', ' ', processed)
processed = processed.replace("\xa0", " ")
processed = htmllib.unescape(processed)
processed = processed.strip()
return processed
return processed.strip()
def create_link(url, label):
return f'<a href="{url}" target="_blank" rel="noreferrer noopener">{label}</a>'
def html2plaintext(html, body_id=None, encoding='utf-8', include_references=True):
def html2plaintext(
html: str | markupsafe.Markup | Literal[False] | None,
body_id: str | None = None,
encoding: str = 'utf-8',
include_references: bool = True
) -> str:
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
@ -518,19 +613,19 @@ def html2plaintext(html, body_id=None, encoding='utf-8', include_references=True
return html.strip()
def plaintext2html(text, container_tag=None):
def plaintext2html(text: str, container_tag: str | None = None, with_paragraph: bool = True) -> markupsafe.Markup:
r"""Convert plaintext into html. Content of the text is escaped to manage
html entities, using :func:`~odoo.tools.misc.html_escape`.
- all ``\n``, ``\r`` are replaced by ``<br/>``
- enclose content into ``<p>``
- convert url into clickable link
- 2 or more consecutive ``<br/>`` are considered as paragraph breaks
:param str text: plaintext to convert
:param str container_tag: container of the html; by default the content is
:param text: plaintext to convert
:param container_tag: container of the html; by default the content is
embedded into a ``<div>``
:rtype: markupsafe.Markup
:param with_paragraph: whether or not considering 2 or more consecutive ``<br/>``
as paragraph breaks and enclosing content in ``<p>``
"""
assert isinstance(text, str)
text = misc.html_escape(text)
@ -542,13 +637,15 @@ def plaintext2html(text, container_tag=None):
text = html_keep_url(text)
# 3-4: form paragraphs
idx = 0
final = '<p>'
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*/?[>]\s*){2,})')
for item in re.finditer(br_tags, text):
final += text[idx:item.start()] + '</p><p>'
idx = item.end()
final += text[idx:] + '</p>'
final = text
if with_paragraph:
idx = 0
final = '<p>'
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*/?[>]\s*){2,})')
for item in re.finditer(br_tags, text):
final += text[idx:item.start()] + '</p><p>'
idx = item.end()
final += text[idx:] + '</p>'
# 5. container
if container_tag: # FIXME: validate that container_tag is just a simple tag?
@ -682,19 +779,23 @@ def email_split_tuples(text):
return list(map(_parse_based_on_spaces, valid_pairs))
def email_split(text):
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
return [email for (name, email) in email_split_tuples(text)]
def email_split_and_format(text):
""" Return a list of email addresses found in ``text``, formatted using
formataddr. """
if not text:
return []
return [formataddr((name, email)) for (name, email) in email_split_tuples(text)]
def email_split_and_normalize(text):
""" Same as 'email_split' but normalized email """
return [(name, _normalize_email(email)) for (name, email) in email_split_tuples(text)]
def email_split_and_format_normalize(text):
""" Same as 'email_split_and_format' but normalizing email. """
return [
@ -737,7 +838,6 @@ def email_normalize(text, strict=True):
emails = email_split(text)
if not emails or (strict and len(emails) != 1):
return False
return _normalize_email(emails[0])
def email_normalize_all(text):
@ -749,8 +849,6 @@ def email_normalize_all(text):
:return list: list of normalized emails found in text
"""
if not text:
return []
emails = email_split(text)
return list(filter(None, [_normalize_email(email) for email in emails]))
@ -782,6 +880,7 @@ def _normalize_email(email):
pass
else:
local_part = local_part.lower()
return local_part + at + domain.lower()
def email_anonymize(normalized_email, *, redact_domain=False):

View file

@ -136,7 +136,7 @@ _mime_mappings = (
_Entry('image/png', [b'\x89PNG\r\n\x1A\n'], []),
_Entry('image/gif', [b'GIF87a', b'GIF89a'], []),
_Entry('image/bmp', [b'BM'], []),
_Entry('application/xml', [b'<'], [
_Entry('text/xml', [b'<'], [
_check_svg,
]),
_Entry('image/x-icon', [b'\x00\x00\x01\x00'], []),
@ -186,30 +186,14 @@ def _odoo_guess_mimetype(bin_data, default='application/octet-stream'):
try:
import magic
except ImportError:
magic = None
if magic:
# There are 2 python libs named 'magic' with incompatible api.
# magic from pypi https://pypi.python.org/pypi/python-magic/
if hasattr(magic, 'from_buffer'):
_guesser = functools.partial(magic.from_buffer, mime=True)
# magic from file(1) https://packages.debian.org/squeeze/python-magic
elif hasattr(magic, 'open'):
ms = magic.open(magic.MAGIC_MIME_TYPE)
ms.load()
_guesser = ms.buffer
def guess_mimetype(bin_data, default=None):
mimetype = _guesser(bin_data[:1024])
# upgrade incorrect mimetype to official one, fixed upstream
# https://github.com/file/file/commit/1a08bb5c235700ba623ffa6f3c95938fe295b262
if mimetype == 'image/svg':
return 'image/svg+xml'
# application/CDFV2 and application/x-ole-storage are two files
# formats that Microsoft Office was using before 2006. Use our
# own guesser to further discriminate the mimetype.
if mimetype in _olecf_mimetypes:
if isinstance(bin_data, bytearray):
bin_data = bytes(bin_data[:1024])
mimetype = magic.from_buffer(bin_data[:1024], mime=True)
if mimetype in ('application/CDFV2', 'application/x-ole-storage'):
# Those are the generic file format that Microsoft Office
# was using before 2006, use our own check to further
# discriminate the mimetype.
try:
if msoffice_mimetype := _check_olecf(bin_data):
return msoffice_mimetype
@ -219,8 +203,25 @@ if magic:
mimetype,
exc_info=True,
)
if mimetype == 'application/zip':
# magic doesn't properly detect some Microsoft Office
# documents created after 2025, use our own check to further
# discriminate the mimetype.
# /!\ Only work when bin_data holds the whole zipfile. /!\
try:
if msoffice_mimetype := _check_ooxml(bin_data):
return msoffice_mimetype
except zipfile.BadZipFile:
pass
except Exception: # noqa: BLE001
_logger_guess_mimetype.warning(
"Sub-checker '_check_ooxml' of type '%s' failed",
mimetype,
exc_info=True,
)
return mimetype
else:
except ImportError:
guess_mimetype = _odoo_guess_mimetype

View file

@ -39,8 +39,6 @@ import markupsafe
import pytz
from lxml import etree, objectify
import odoo
import odoo.addons
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from odoo.loglevels import exception_to_unicode, get_encodings, ustr # noqa: F401
@ -65,6 +63,7 @@ __all__ = [
'NON_BREAKING_SPACE',
'SKIPPED_ELEMENT_TYPES',
'DotDict',
'LastOrderedSet',
'OrderedSet',
'Reverse',
'babel_locale_parse',
@ -108,6 +107,7 @@ __all__ = [
'topological_sort',
'unique',
'ustr',
'real_time',
]
_logger = logging.getLogger(__name__)
@ -124,6 +124,9 @@ objectify.set_default_parser(default_parser)
NON_BREAKING_SPACE = u'\N{NO-BREAK SPACE}'
# ensure we have a non patched time for query times when using freezegun
real_time = time.time.__call__ # type: ignore
class Sentinel(enum.Enum):
"""Class for typing parameters with a sentinel as a default"""
@ -142,9 +145,10 @@ def find_in_path(name):
path.append(config['bin_path'])
return which(name, path=os.pathsep.join(path))
#----------------------------------------------------------
# ----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
# ----------------------------------------------------------
def find_pg_tool(name):
path = None
@ -152,9 +156,10 @@ def find_pg_tool(name):
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
except OSError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
"""
Force the database PostgreSQL environment variables to the database
@ -165,17 +170,21 @@ def exec_pg_environ():
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
See also https://www.postgresql.org/docs/current/libpq-envars.html
"""
env = os.environ.copy()
if odoo.tools.config['db_host']:
env['PGHOST'] = odoo.tools.config['db_host']
if odoo.tools.config['db_port']:
env['PGPORT'] = str(odoo.tools.config['db_port'])
if odoo.tools.config['db_user']:
env['PGUSER'] = odoo.tools.config['db_user']
if odoo.tools.config['db_password']:
env['PGPASSWORD'] = odoo.tools.config['db_password']
if config['db_host']:
env['PGHOST'] = config['db_host']
if config['db_port']:
env['PGPORT'] = str(config['db_port'])
if config['db_user']:
env['PGUSER'] = config['db_user']
if config['db_password']:
env['PGPASSWORD'] = config['db_password']
if config['db_app_name']:
env['PGAPPNAME'] = config['db_app_name'].replace('{pid}', f'env{os.getpid()}')[:63]
if config['db_sslmode']:
env['PGSSLMODE'] = config['db_sslmode']
return env
@ -184,7 +193,7 @@ def exec_pg_environ():
# ----------------------------------------------------------
def file_path(file_path: str, filter_ext: tuple[str, ...] = ('',), env: Environment | None = None) -> str:
def file_path(file_path: str, filter_ext: tuple[str, ...] = ('',), env: Environment | None = None, *, check_exists: bool = True) -> str:
"""Verify that a file exists under a known `addons_path` directory and return its full path.
Examples::
@ -197,13 +206,12 @@ def file_path(file_path: str, filter_ext: tuple[str, ...] = ('',), env: Environm
:param list[str] filter_ext: optional list of supported extensions (lowercase, with leading dot)
:param env: optional environment, required for a file path within a temporary directory
created using `file_open_temporary_directory()`
:param check_exists: check that the file exists (default: True)
:return: the absolute path to the file
:raise FileNotFoundError: if the file is not found under the known `addons_path` directories
:raise ValueError: if the file doesn't have one of the supported extensions (`filter_ext`)
"""
root_path = os.path.abspath(config['root_path'])
temporary_paths = env.transaction._Transaction__file_open_tmp_paths if env else ()
addons_paths = [*odoo.addons.__path__, root_path, *temporary_paths]
import odoo.addons # noqa: PLC0415
is_abs = os.path.isabs(file_path)
normalized_path = os.path.normpath(os.path.normcase(file_path))
@ -212,15 +220,31 @@ def file_path(file_path: str, filter_ext: tuple[str, ...] = ('',), env: Environm
# ignore leading 'addons/' if present, it's the final component of root_path, but
# may sometimes be included in relative paths
if normalized_path.startswith('addons' + os.sep):
normalized_path = normalized_path[7:]
normalized_path = normalized_path.removeprefix('addons' + os.sep)
# if path is relative and represents a loaded module, accept only the
# __path__ for that module; otherwise, search in all accepted paths
file_path_split = normalized_path.split(os.path.sep)
if not is_abs and (module := sys.modules.get(f'odoo.addons.{file_path_split[0]}')):
addons_paths = list(map(os.path.dirname, module.__path__))
else:
root_path = os.path.abspath(config.root_path)
temporary_paths = env.transaction._Transaction__file_open_tmp_paths if env else ()
addons_paths = [*odoo.addons.__path__, root_path, *temporary_paths]
for addons_dir in addons_paths:
# final path sep required to avoid partial match
parent_path = os.path.normpath(os.path.normcase(addons_dir)) + os.sep
fpath = (normalized_path if is_abs else
os.path.normpath(os.path.normcase(os.path.join(parent_path, normalized_path))))
if fpath.startswith(parent_path) and os.path.exists(fpath):
if is_abs:
fpath = normalized_path
else:
fpath = os.path.normpath(os.path.join(parent_path, normalized_path))
if fpath.startswith(parent_path) and (
# we check existence when asked or we have multiple paths to check
# (there is one possibility for absolute paths)
(not check_exists and (is_abs or len(addons_paths) == 1))
or os.path.exists(fpath)
):
return fpath
raise FileNotFoundError("File not found: " + file_path)
@ -245,18 +269,20 @@ def file_open(name: str, mode: str = "r", filter_ext: tuple[str, ...] = (), env:
:raise FileNotFoundError: if the file is not found under the known `addons_path` directories
:raise ValueError: if the file doesn't have one of the supported extensions (`filter_ext`)
"""
path = file_path(name, filter_ext=filter_ext, env=env)
if os.path.isfile(path):
if 'b' not in mode:
# Force encoding for text mode, as system locale could affect default encoding,
# even with the latest Python 3 versions.
# Note: This is not covered by a unit test, due to the platform dependency.
# For testing purposes you should be able to force a non-UTF8 encoding with:
# `sudo locale-gen fr_FR; LC_ALL=fr_FR.iso8859-1 python3 ...'
# See also PEP-540, although we can't rely on that at the moment.
return open(path, mode, encoding="utf-8")
return open(path, mode)
raise FileNotFoundError("Not a file: " + name)
path = file_path(name, filter_ext=filter_ext, env=env, check_exists=False)
encoding = None
if 'b' not in mode:
# Force encoding for text mode, as system locale could affect default encoding,
# even with the latest Python 3 versions.
# Note: This is not covered by a unit test, due to the platform dependency.
# For testing purposes you should be able to force a non-UTF8 encoding with:
# `sudo locale-gen fr_FR; LC_ALL=fr_FR.iso8859-1 python3 ...'
# See also PEP-540, although we can't rely on that at the moment.
encoding = "utf-8"
if any(m in mode for m in ('w', 'x', 'a')) and not os.path.isfile(path):
# Don't let create new files
raise FileNotFoundError(f"Not a file: {path}")
return open(path, mode, encoding=encoding)
@contextmanager
@ -417,48 +443,6 @@ def merge_sequences(*iterables: Iterable[T]) -> list[T]:
return topological_sort(deps)
try:
import xlwt
# add some sanitization to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedWorkbook(xlwt.Workbook):
def add_sheet(self, name, cell_overwrite_ok=False):
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedWorkbook, self).add_sheet(name, cell_overwrite_ok=cell_overwrite_ok)
xlwt.Workbook = PatchedWorkbook
except ImportError:
xlwt = None
try:
import xlsxwriter
# add some sanitization to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedXlsxWorkbook(xlsxwriter.Workbook):
# TODO when xlsxwriter bump to 0.9.8, add worksheet_class=None parameter instead of kw
def add_worksheet(self, name=None, **kw):
if name:
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedXlsxWorkbook, self).add_worksheet(name, **kw)
xlsxwriter.Workbook = PatchedXlsxWorkbook
except ImportError:
xlsxwriter = None
def get_iso_codes(lang: str) -> str:
if lang.find('_') != -1:
lang_items = lang.split('_')
@ -609,10 +593,12 @@ POSIX_TO_LDML = {
'B': 'MMMM',
#'c': '',
'd': 'dd',
'-d': 'd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'-m': 'M',
'M': 'mm',
'p': 'a',
'S': 'ss',
@ -637,6 +623,7 @@ def posix_to_ldml(fmt: str, locale: babel.Locale) -> str:
"""
buf = []
pc = False
minus = False
quoted = []
for c in fmt:
@ -657,7 +644,13 @@ def posix_to_ldml(fmt: str, locale: babel.Locale) -> str:
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
elif c == '-':
minus = True
continue
else: # look up format char in static mapping
if minus:
c = '-' + c
minus = False
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
@ -906,7 +899,7 @@ def dumpstacks(sig=None, frame=None, thread_idents=None, log_level=logging.INFO)
perf_t0 = thread_info.get('perf_t0')
remaining_time = None
if query_time is not None and perf_t0:
remaining_time = '%.3f' % (time.time() - perf_t0 - query_time)
remaining_time = '%.3f' % (real_time() - perf_t0 - query_time)
query_time = '%.3f' % query_time
# qc:query_count qt:query_time pt:python_time (aka remaining time)
code.append("\n# Thread: %s (db:%s) (uid:%s) (url:%s) (qc:%s qt:%s pt:%s)" %
@ -920,6 +913,7 @@ def dumpstacks(sig=None, frame=None, thread_idents=None, log_level=logging.INFO)
for line in extract_stack(stack):
code.append(line)
import odoo # eventd
if odoo.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
@ -1055,7 +1049,7 @@ class OrderedSet(MutableSet[T], typing.Generic[T]):
""" A set collection that remembers the elements first insertion order. """
__slots__ = ['_map']
def __init__(self, elems=()):
def __init__(self, elems: Iterable[T] = ()):
self._map: dict[T, None] = dict.fromkeys(elems)
def __contains__(self, elem):
@ -1327,32 +1321,29 @@ def formatLang(
value: float | typing.Literal[''],
digits: int = 2,
grouping: bool = True,
monetary: bool | Sentinel = SENTINEL,
dp: str | None = None,
currency_obj=None,
currency_obj: typing.Any | None = None,
rounding_method: typing.Literal['HALF-UP', 'HALF-DOWN', 'HALF-EVEN', "UP", "DOWN"] = 'HALF-EVEN',
rounding_unit: typing.Literal['decimals', 'units', 'thousands', 'lakhs', 'millions'] = 'decimals',
) -> str:
"""
This function will format a number `value` to the appropriate format of the language used.
:param Object env: The environment.
:param float value: The value to be formatted.
:param int digits: The number of decimals digits.
:param bool grouping: Usage of language grouping or not.
:param bool monetary: Usage of thousands separator or not.
.. deprecated:: 13.0
:param str dp: Name of the decimals precision to be used. This will override ``digits``
:param env: The environment.
:param value: The value to be formatted.
:param digits: The number of decimals digits.
:param grouping: Usage of language grouping or not.
:param dp: Name of the decimals precision to be used. This will override ``digits``
and ``currency_obj`` precision.
:param Object currency_obj: Currency to be used. This will override ``digits`` precision.
:param str rounding_method: The rounding method to be used:
:param currency_obj: Currency to be used. This will override ``digits`` precision.
:param rounding_method: The rounding method to be used:
**'HALF-UP'** will round to the closest number with ties going away from zero,
**'HALF-DOWN'** will round to the closest number with ties going towards zero,
**'HALF_EVEN'** will round to the closest number with ties going to the closest
even number,
**'UP'** will always round away from 0,
**'DOWN'** will always round towards 0.
:param str rounding_unit: The rounding unit to be used:
:param rounding_unit: The rounding unit to be used:
**decimals** will round to decimals with ``digits`` or ``dp`` precision,
**units** will round to units without any decimals,
**thousands** will round to thousands without any decimals,
@ -1360,10 +1351,7 @@ def formatLang(
**millions** will round to millions without any decimals.
:returns: The value formatted.
:rtype: str
"""
if monetary is not SENTINEL:
warnings.warn("monetary argument deprecated since 13.0", DeprecationWarning, 2)
# We don't want to return 0
if value == '':
return ''
@ -1418,18 +1406,19 @@ def format_date(
"""
if not value:
return ''
from odoo.fields import Datetime # noqa: PLC0415
if isinstance(value, str):
if len(value) < DATE_LENGTH:
return ''
if len(value) > DATE_LENGTH:
# a datetime, convert to correct timezone
value = odoo.fields.Datetime.from_string(value)
value = odoo.fields.Datetime.context_timestamp(env['res.lang'], value)
value = Datetime.from_string(value)
value = Datetime.context_timestamp(env['res.lang'], value)
else:
value = odoo.fields.Datetime.from_string(value)
value = Datetime.from_string(value)
elif isinstance(value, datetime.datetime) and not value.tzinfo:
# a datetime, convert to correct timezone
value = odoo.fields.Datetime.context_timestamp(env['res.lang'], value)
value = Datetime.context_timestamp(env['res.lang'], value)
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code)
@ -1479,7 +1468,8 @@ def format_datetime(
if not value:
return ''
if isinstance(value, str):
timestamp = odoo.fields.Datetime.from_string(value)
from odoo.fields import Datetime # noqa: PLC0415
timestamp = Datetime.from_string(value)
else:
timestamp = value
@ -1494,7 +1484,7 @@ def format_datetime(
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code or lang_code) # lang can be inactive, so `lang`is empty
if not dt_format:
if not dt_format or dt_format == 'medium':
date_format = posix_to_ldml(lang.date_format, locale=locale)
time_format = posix_to_ldml(lang.time_format, locale=locale)
dt_format = '%s %s' % (date_format, time_format)
@ -1533,7 +1523,8 @@ def format_time(
localized_time = value
else:
if isinstance(value, str):
value = odoo.fields.Datetime.from_string(value)
from odoo.fields import Datetime # noqa: PLC0415
value = Datetime.from_string(value)
assert isinstance(value, datetime.datetime)
tz_name = tz or env.user.tz or 'UTC'
utc_datetime = pytz.utc.localize(value, is_dst=False)
@ -1545,7 +1536,7 @@ def format_time(
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code)
if not time_format:
if not time_format or time_format == 'medium':
time_format = posix_to_ldml(lang.time_format, locale=locale)
return babel.dates.format_time(localized_time, format=time_format, locale=locale)
@ -1613,13 +1604,16 @@ def format_decimalized_amount(amount: float, currency=None) -> str:
return "%s %s" % (formated_amount, currency.symbol or '')
def format_amount(env: Environment, amount: float, currency, lang_code: str | None = None) -> str:
def format_amount(env: Environment, amount: float, currency, lang_code: str | None = None, trailing_zeroes: bool = True) -> str:
fmt = "%.{0}f".format(currency.decimal_places)
lang = env['res.lang'].browse(get_lang(env, lang_code).id)
formatted_amount = lang.format(fmt, currency.round(amount), grouping=True)\
.replace(r' ', u'\N{NO-BREAK SPACE}').replace(r'-', u'-\N{ZERO WIDTH NO-BREAK SPACE}')
if not trailing_zeroes:
formatted_amount = re.sub(fr'{re.escape(lang.decimal_point)}?0+$', '', formatted_amount)
pre = post = u''
if currency.position == 'before':
pre = u'{symbol}\N{NO-BREAK SPACE}'.format(symbol=currency.symbol or '')
@ -1667,25 +1661,22 @@ class ReadonlyDict(Mapping[K, T], typing.Generic[K, T]):
data.update({'baz', 'xyz'}) # raises exception
dict.update(data, {'baz': 'xyz'}) # raises exception
"""
__slots__ = ('_data__',)
def __init__(self, data):
self.__data = dict(data)
self._data__ = dict(data)
def __contains__(self, key: K):
return key in self.__data
return key in self._data__
def __getitem__(self, key: K) -> T:
try:
return self.__data[key]
except KeyError:
if hasattr(type(self), "__missing__"):
return self.__missing__(key)
raise
return self._data__[key]
def __len__(self):
return len(self.__data)
return len(self._data__)
def __iter__(self):
return iter(self.__data)
return iter(self._data__)
class DotDict(dict):
@ -1734,6 +1725,7 @@ def get_diff(data_from, data_to, custom_style=False, dark_color_scheme=False):
table.diff { width: 100%%; }
table.diff th.diff_header { width: 50%%; }
table.diff td.diff_header { white-space: nowrap; }
table.diff td.diff_header + td { width: 50%%; }
table.diff td { word-break: break-all; vertical-align: top; }
table.diff .diff_chg, table.diff .diff_sub, table.diff .diff_add {
display: inline-block;
@ -1831,9 +1823,9 @@ def verify_hash_signed(env, scope, payload):
return None
def limited_field_access_token(record, field_name, timestamp=None):
"""Generate a token granting access to the given record and field_name from
the binary routes (/web/content or /web/image).
def limited_field_access_token(record, field_name, timestamp=None, *, scope):
"""Generate a token granting access to the given record and field_name in
the given scope.
The validitiy of the token is determined by the timestamp parameter.
When it is not specified, a timestamp is automatically generated with a
@ -1847,6 +1839,9 @@ def limited_field_access_token(record, field_name, timestamp=None):
:type record: class:`odoo.models.Model`
:param field_name: the field name of record to generate the token for
:type field_name: str
:param scope: scope of the authentication, to have different signature for the same
record/field in different usage
:type scope: str
:param timestamp: expiration timestamp of the token, or None to generate one
:type timestamp: int, optional
:return: the token, which includes the timestamp in hex format
@ -1860,11 +1855,11 @@ def limited_field_access_token(record, field_name, timestamp=None):
adler32_max = 4294967295
jitter = two_weeks * zlib.adler32(unique_str.encode()) // adler32_max
timestamp = hex(start_of_period + 2 * two_weeks + jitter)
token = hmac(record.env(su=True), "binary", (record._name, record.id, field_name, timestamp))
token = hmac(record.env(su=True), scope, (record._name, record.id, field_name, timestamp))
return f"{token}o{timestamp}"
def verify_limited_field_access_token(record, field_name, access_token):
def verify_limited_field_access_token(record, field_name, access_token, *, scope):
"""Verify the given access_token grants access to field_name of record.
In particular, the token must have the right format, must be valid for the
given record, and must not have expired.
@ -1875,13 +1870,15 @@ def verify_limited_field_access_token(record, field_name, access_token):
:type field_name: str
:param access_token: the access token to verify
:type access_token: str
:param scope: scope of the authentication, to have different signature for the same
record/field in different usage
:return: whether the token is valid for the record/field_name combination at
the current date and time
:rtype: bool
"""
*_, timestamp = access_token.rsplit("o", 1)
return consteq(
access_token, limited_field_access_token(record, field_name, timestamp)
access_token, limited_field_access_token(record, field_name, timestamp, scope=scope)
) and datetime.datetime.now() < datetime.datetime.fromtimestamp(int(timestamp, 16))
@ -1933,13 +1930,10 @@ def format_frame(frame) -> str:
def named_to_positional_printf(string: str, args: Mapping) -> tuple[str, tuple]:
""" Convert a named printf-style format string with its arguments to an
equivalent positional format string with its arguments. This implementation
does not support escaped ``%`` characters (``"%%"``).
equivalent positional format string with its arguments.
"""
if '%%' in string:
raise ValueError(f"Unsupported escaped '%' in format string {string!r}")
pargs = _PrintfArgs(args)
return string % pargs, tuple(pargs.values)
return string.replace('%%', '%%%%') % pargs, tuple(pargs.values)
class _PrintfArgs:

View file

@ -60,7 +60,7 @@ def zip_dir(path, stream, include_dir=True, fnct_sort=None): # TODO add ign
len_prefix += 1
with zipfile.ZipFile(stream, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zipf:
for dirpath, dirnames, filenames in os.walk(path):
for dirpath, _dirnames, filenames in os.walk(path):
filenames = sorted(filenames, key=fnct_sort)
for fname in filenames:
bname, ext = os.path.splitext(fname)

View file

@ -1,4 +1,5 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import importlib
import io
import re
@ -10,20 +11,11 @@ from logging import getLogger
from zlib import compress, decompress, decompressobj
from PIL import Image, PdfImagePlugin
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.lib.utils import ImageReader
from reportlab.pdfgen import canvas
from odoo import modules
from odoo.tools.arabic_reshaper import reshape
from odoo.tools.parse_version import parse_version
from odoo.tools.misc import file_open
try:
import fontTools
from fontTools.ttLib import TTFont
except ImportError:
TTFont = None
from odoo.tools.misc import file_open, SENTINEL
# ----------------------------------------------------------
# PyPDF2 hack
@ -227,20 +219,43 @@ def to_pdf_stream(attachment) -> io.BytesIO:
_logger.warning("mimetype (%s) not recognized for %s", attachment.mimetype, attachment)
def add_banner(pdf_stream, text=None, logo=False, thickness=2 * cm):
def extract_page(attachment, num_page=0) -> io.BytesIO | None:
"""Exctract a specific page form an attachement pdf"""
pdf_stream = to_pdf_stream(attachment)
if not pdf_stream:
return
pdf = PdfFileReader(pdf_stream)
page = pdf.getPage(num_page)
pdf_writer = PdfFileWriter()
pdf_writer.addPage(page)
stream = io.BytesIO()
pdf_writer.write(stream)
return stream
def add_banner(pdf_stream, text=None, logo=False, thickness=SENTINEL):
""" Add a banner on a PDF in the upper right corner, with Odoo's logo (optionally).
:param pdf_stream (BytesIO): The PDF stream where the banner will be applied.
:param text (str): The text to be displayed.
:param logo (bool): Whether to display Odoo's logo in the banner.
:param thickness (float): The thickness of the banner in pixels.
:param thickness (float): The thickness of the banner in pixels (default: 2cm).
:return (BytesIO): The modified PDF stream.
"""
from reportlab.lib import colors # noqa: PLC0415
from reportlab.lib.utils import ImageReader # noqa: PLC0415
from reportlab.pdfgen import canvas # noqa: PLC0415
if thickness is SENTINEL:
from reportlab.lib.units import cm # noqa: PLC0415
thickness = 2 * cm
old_pdf = PdfFileReader(pdf_stream, strict=False, overwriteWarnings=False)
packet = io.BytesIO()
can = canvas.Canvas(packet)
odoo_logo = Image.open(file_open('base/static/img/main_partner-image.png', mode='rb'))
with file_open('base/static/img/main_partner-image.png', mode='rb') as f:
odoo_logo_file = io.BytesIO(f.read())
odoo_logo = Image.open(odoo_logo_file)
odoo_color = colors.Color(113 / 255, 75 / 255, 103 / 255, 0.8)
for p in range(old_pdf.getNumPages()):
@ -521,7 +536,11 @@ class OdooPdfFileWriter(PdfFileWriter):
# PDF/A needs the glyphs width array embedded in the pdf to be consistent with the ones from the font file.
# But it seems like it is not the case when exporting from wkhtmltopdf.
if TTFont:
try:
import fontTools.ttLib # noqa: PLC0415
except ImportError:
_logger.warning('The fonttools package is not installed. Generated PDF may not be PDF/A compliant.')
else:
fonts = {}
# First browse through all the pages of the pdf file, to get a reference to all the fonts used in the PDF.
for page in pages:
@ -535,7 +554,7 @@ class OdooPdfFileWriter(PdfFileWriter):
for font in fonts.values():
font_file = font['/FontDescriptor']['/FontFile2']
stream = io.BytesIO(decompress(font_file._data))
ttfont = TTFont(stream)
ttfont = fontTools.ttLib.TTFont(stream)
font_upm = ttfont['head'].unitsPerEm
if parse_version(fontTools.__version__) < parse_version('4.37.2'):
glyphs = ttfont.getGlyphSet()._hmtx.metrics
@ -548,8 +567,6 @@ class OdooPdfFileWriter(PdfFileWriter):
font[NameObject('/W')] = ArrayObject([NumberObject(1), ArrayObject(glyph_widths)])
stream.close()
else:
_logger.warning('The fonttools package is not installed. Generated PDF may not be PDF/A compliant.')
outlines = self._root_object['/Outlines'].getObject()
outlines[NameObject('/Count')] = NumberObject(1)

View file

@ -0,0 +1,467 @@
import base64
import datetime
import hashlib
import io
from typing import Optional
from asn1crypto import cms, algos, core, x509
import logging
try:
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric.types import PrivateKeyTypes
from cryptography.hazmat.primitives.serialization import Encoding, load_pem_private_key
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.x509 import Certificate, load_pem_x509_certificate
except ImportError:
# cryptography 41.0.7 and above is supported
hashes = None
PrivateKeyTypes = None
Encoding = None
load_pem_private_key = None
padding = None
Certificate = None
load_pem_x509_certificate = None
from odoo import _
from odoo.addons.base.models.res_company import ResCompany
from odoo.addons.base.models.res_users import ResUsers
from odoo.tools.pdf import PdfReader, PdfWriter, ArrayObject, ByteStringObject, DictionaryObject, NameObject, NumberObject, create_string_object, DecodedStreamObject as StreamObject
_logger = logging.getLogger(__name__)
class PdfSigner:
"""Class that defines methods uses in the signing process of pdf documents
The PdfSigner will perform the following operations on a PDF document:
- Modifiying the document by adding a signature field via a form,
- Performing a cryptographic signature of the document.
This implementation follows the Adobe PDF Reference (v1.7) (https://ia601001.us.archive.org/1/items/pdf1.7/pdf_reference_1-7.pdf)
for the structure of the PDF document,
and Digital Signatures in a PDF (https://www.adobe.com/devnet-docs/acrobatetk/tools/DigSig/Acrobat_DigitalSignatures_in_PDF.pdf),
for the structure of the signature in a PDF.
"""
def __init__(self, stream: io.BytesIO, company: Optional[ResCompany] = None, signing_time=None) -> None:
self.signing_time = signing_time
self.company = company
if not 'clone_document_from_reader' in dir(PdfWriter):
_logger.info("PDF signature is supported by Python 3.12 and above")
return
reader = PdfReader(stream)
self.writer = PdfWriter()
self.writer.clone_document_from_reader(reader)
def sign_pdf(self, visible_signature: bool = False, field_name: str = "Odoo Signature", signer: Optional[ResUsers] = None) -> Optional[io.BytesIO]:
"""Signs the pdf document using a PdfWriter object
Returns:
Optional[io.BytesIO]: the resulting output stream after the signature has been performed, or None in case of error
"""
if not self.company or not load_pem_x509_certificate:
return
dummy, sig_field_value = self._setup_form(visible_signature, field_name, signer)
if not self._perform_signature(sig_field_value):
return
out_stream = io.BytesIO()
self.writer.write_stream(out_stream)
return out_stream
def _load_key_and_certificate(self) -> tuple[Optional[PrivateKeyTypes], Optional[Certificate]]:
"""Loads the private key
Returns:
Optional[PrivateKeyTypes]: a private key object, or None if the key couldn't be loaded.
"""
if "signing_certificate_id" not in self.company._fields \
or not self.company.signing_certificate_id.pem_certificate:
return None, None
certificate = self.company.signing_certificate_id
cert_bytes = base64.decodebytes(certificate.pem_certificate)
private_key_bytes = base64.decodebytes(certificate.private_key_id.content)
return load_pem_private_key(private_key_bytes, None), load_pem_x509_certificate(cert_bytes)
def _setup_form(self, visible_signature: bool, field_name: str, signer: Optional[ResUsers] = None) -> tuple[DictionaryObject, DictionaryObject] | None:
"""Creates the /AcroForm and populates it with the appropriate field for the signature
Args:
visible_signature (bool): boolean value that determines if the signature should be visible on the document
field_name (str): the name of the signature field
signer (Optional[ResUsers]): user that will be used in the visuals of the signature field
Returns:
tuple[DictionaryObject, DictionaryObject]: a tuple containing the signature field and the signature content
"""
if "/AcroForm" not in self.writer._root_object:
form = DictionaryObject()
form.update({
NameObject("/SigFlags"): NumberObject(3)
})
form_ref = self.writer._add_object(form)
self.writer._root_object.update({
NameObject("/AcroForm"): form_ref
})
else:
form = self.writer._root_object["/AcroForm"].get_object()
# SigFlags(3) = SignatureExists = true && AppendOnly = true.
# The document contains signed signature and must be modified in incremental mode (see https://github.com/pdf-association/pdf-issues/issues/457)
form.update({
NameObject("/SigFlags"): NumberObject(3)
})
# Assigning the newly created field to a page
page = self.writer.pages[0]
# Setting up the signature field properties
signature_field = DictionaryObject()
# Metadata of the signature field
# /FT = Field Type, here set to /Sig the signature type
# /T = name of the field
# /Type = type of object, in this case annotation (/Annot)
# /Subtype = type of annotation
# /F = annotation flags, represented as a 32 bit unsigned integer. 132 corresponds to the Print and Locked flags
# Print : corresponds to printing the signature when the page is printed
# Locked : preventing the annotation properties to be modfied or the annotation to be deletd by the user
# (see section 8.4.2 of the Adobe PDF Reference (v1.7) https://ia601001.us.archive.org/1/items/pdf1.7/pdf_reference_1-7.pdf),
# /P = page reference, reference to the page where the signature field is located
signature_field.update({
NameObject("/FT"): NameObject("/Sig"),
NameObject("/T"): create_string_object(field_name),
NameObject("/Type"): NameObject("/Annot"),
NameObject("/Subtype"): NameObject("/Widget"),
NameObject("/F"): NumberObject(132),
NameObject("/P"): page.indirect_reference,
})
# Creating the appearance (visible elements of the signature)
if visible_signature:
origin = page.mediabox.upper_right # retrieves the top-right coordinates of the page
rect_size = (200, 20) # dimensions of the box (width, height)
padding = 5
# Box that will contain the signature, defined as [x1, y1, x2, y2]
# where (x1, y1) is the bottom left coordinates of the box,
# and (x2, y2) the top-right coordinates.
rect = [
origin[0] - rect_size[0] - padding,
origin[1] - rect_size[1] - padding,
origin[0] - padding,
origin[1] - padding
]
# Here is defined the StreamObject that contains the information about the visible
# parts of the signature
#
# Dictionary contents:
# /BBox = coordinates of the 'visible' box, relative to the /Rect definition of the signature field
# /Resources = resources needed to properly render the signature,
# /Font = dictionary containing the information about the font used by the signature
# /F1 = font resource, used to define a font that will be usable in the signature
stream = StreamObject()
stream.update({
NameObject("/BBox"): self._create_number_array_object([0, 0, rect_size[0], rect_size[1]]),
NameObject("/Resources"): DictionaryObject({
NameObject("/Font"): DictionaryObject({
NameObject("/F1"): DictionaryObject({
NameObject("/Type"): NameObject("/Font"),
NameObject("/Subtype"): NameObject("/Type1"),
NameObject("/BaseFont"): NameObject("/Helvetica")
})
})
}),
NameObject("/Type"): NameObject("/XObject"),
NameObject("/Subtype"): NameObject("/Form")
})
#
content = "Digitally signed"
content = create_string_object(f'{content} by {signer.name} <{signer.email}>') if signer is not None else create_string_object(content)
# Setting the parameters used to display the text object of the signature
# More details on this subject can be found in the sections 4.3 and 5.3
# of the Adobe PDF Reference (v1.7) https://ia601001.us.archive.org/1/items/pdf1.7/pdf_reference_1-7.pdf
#
# Parameters:
# q = saves the the current graphics state on the graphics state stack
# 0.5 0 0 0.5 0 0 cm = modification of the current transformation matrix. Here used to scale down the text size by 0.5 in x and y
# BT = begin text object
# /F1 = reference to the font resource named F1
# 12 Tf = set the font size to 12
# 0 TL = defines text leading, the space between lines, here set to 0
# 0 10 Td = moves the text to the start of the next line, expressed in text space units. Here (x, y) = (0, 10)
# (text_content) Tj = renders a text string
# ET = end text object
# Q = Restore the graphics state by removing the most recently saved state from the stack and making it the current state
stream._data = f"q 0.5 0 0 0.5 0 0 cm BT /F1 12 Tf 0 TL 0 10 Td ({content}) Tj ET Q".encode()
signature_appearence = DictionaryObject()
signature_appearence.update({
NameObject("/N"): stream
})
signature_field.update({
NameObject("/AP"): signature_appearence,
})
else:
rect = [0,0,0,0]
signature_field.update({
NameObject("/Rect"): self._create_number_array_object(rect)
})
# Setting up the actual signature contents with placeholders for /Contents and /ByteRange
#
# Dictionary contents:
# /Contents = content of the signature field. The content is a byte string of an object that follows
# the Cryptographic Message Syntax (CMS). The object is converted in hexadecimal and stored as bytes.
# The /Contents are pre-filled with placeholder values of an arbitrary size (i.e. 8KB) to ensure that
# the signature will fit in the "<>" bounds of the field
# /ByteRange = an array represented as [offset, length, offset, length, ...] which defines the bytes that
# are used when computing the digest of the document. Similarly to the /Contents, the /ByteRange is set to
# a placeholder as we aren't yet able to compute the range at this point.
# /Type = the type of form field. Here /Sig, the signature field
# /Filter
# /SubFilter
# /M = the timestamp of the signature. Indicates when the document was signed.
signature_field_value = DictionaryObject()
signature_field_value.update({
NameObject("/Contents"): ByteStringObject(b"\0" * 8192),
NameObject("/ByteRange"): self._create_number_array_object([0, 0, 0, 0]),
NameObject("/Type"): NameObject("/Sig"),
NameObject("/Filter"): NameObject("/Adobe.PPKLite"),
NameObject("/SubFilter"): NameObject("/adbe.pkcs7.detached"),
NameObject("/M"): create_string_object(datetime.datetime.now(datetime.timezone.utc).strftime("D:%Y%m%d%H%M%S")),
})
# Here we add the reference to be written in a specific order. This is needed
# by Adobe Acrobat to consider the signature valid.
signature_field_ref = self.writer._add_object(signature_field)
signature_field_value_ref = self.writer._add_object(signature_field_value)
# /V = the actual value of the signature field. Used to store the dictionary of the field
signature_field.update({
NameObject("/V"): signature_field_value_ref
})
# Definition of the fields array linked to the form (/AcroForm)
if "/Fields" not in self.writer._root_object:
fields = ArrayObject()
else:
fields = self.writer._root_object["/Fields"].get_object()
fields.append(signature_field_ref)
form.update({
NameObject("/Fields"): fields
})
# The signature field reference is added to the annotations array
if "/Annots" not in page:
page[NameObject("/Annots")] = ArrayObject()
page[NameObject("/Annots")].append(signature_field_ref)
return signature_field, signature_field_value
def _get_cms_object(self, digest: bytes) -> Optional[cms.ContentInfo]:
"""Creates an object that follows the Cryptographic Message Syntax(CMS)
RFC: https://datatracker.ietf.org/doc/html/rfc5652
Args:
digest (bytes): the digest of the document in bytes
Returns:
cms.ContentInfo: a CMS object containing the information of the signature
"""
private_key, certificate = self._load_key_and_certificate()
if private_key == None or certificate == None:
return None
cert = x509.Certificate.load(
certificate.public_bytes(encoding=Encoding.DER))
encap_content_info = {
'content_type': 'data',
'content': None
}
attrs = cms.CMSAttributes([
cms.CMSAttribute({
'type': 'content_type',
'values': ['data']
}),
cms.CMSAttribute({
'type': 'signing_time',
'values': [cms.Time({'utc_time': core.UTCTime(self.signing_time or datetime.datetime.now(datetime.timezone.utc))})]
}),
cms.CMSAttribute({
'type': 'cms_algorithm_protection',
'values': [
cms.CMSAlgorithmProtection(
{
'mac_algorithm': None,
'digest_algorithm': cms.DigestAlgorithm(
{'algorithm': 'sha256', 'parameters': None}
),
'signature_algorithm': cms.SignedDigestAlgorithm({
'algorithm': 'sha256_rsa',
'parameters': None
})
}
)
]
}),
cms.CMSAttribute({
'type': 'message_digest',
'values': [digest],
}),
])
signed_attrs = private_key.sign(
attrs.dump(),
padding.PKCS1v15(),
hashes.SHA256()
)
signer_info = cms.SignerInfo({
'version': "v1",
'digest_algorithm': algos.DigestAlgorithm({'algorithm': 'sha256'}),
'signature_algorithm': algos.SignedDigestAlgorithm({'algorithm': 'sha256_rsa'}),
'signature': signed_attrs,
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': cert.issuer,
'serial_number': cert.serial_number
})
}),
'signed_attrs': attrs})
signed_data = {
'version': 'v1',
'digest_algorithms': [algos.DigestAlgorithm({'algorithm': 'sha256'})],
'encap_content_info': encap_content_info,
'certificates': [cert],
'signer_infos': [signer_info]
}
return cms.ContentInfo({
'content_type': 'signed_data',
'content': cms.SignedData(signed_data)
})
def _perform_signature(self, sig_field_value: DictionaryObject) -> bool:
"""Creates the actual signature content and populate /ByteRange and /Contents properties with meaningful content.
Args:
sig_field_value (DictionaryObject): the value (/V) of the signature field which needs to be modified
"""
pdf_data = self._get_document_data()
# Computation of the location of the last inserted contents for the signature field
signature_field_pos = pdf_data.rfind(b"/FT /Sig")
contents_field_pos = pdf_data.find(b"Contents", signature_field_pos)
# Computing the start and end position of the /Contents <signature> field
# to exclude the content of <> (aka the actual signature) from the byte range
placeholder_start = contents_field_pos + 9
placeholder_end = placeholder_start + len(b"\0" * 8192) * 2 + 2
# Replacing the placeholder byte range with the actual range
# that will be used to compute the document digest
placeholder_byte_range = sig_field_value.get("/ByteRange")
# Here the byte range represents an array [index, length, index, length, ...]
# where 'index' represents the index of a byte, and length the number of bytes to take
# This array indicates the bytes that are used when computing the digest of the document
byte_range = [0, placeholder_start,
placeholder_end, abs(len(pdf_data) - placeholder_end)]
byte_range = self._correct_byte_range(
placeholder_byte_range, byte_range, len(pdf_data))
sig_field_value.update({
NameObject("/ByteRange"): self._create_number_array_object(byte_range)
})
pdf_data = self._get_document_data()
digest = self._compute_digest_from_byte_range(pdf_data, byte_range)
cms_content_info = self._get_cms_object(digest)
if cms_content_info == None:
return False
signature_hex = cms_content_info.dump().hex()
signature_hex = signature_hex.ljust(8192 * 2, "0")
sig_field_value.update({
NameObject("/Contents"): ByteStringObject(bytes.fromhex(signature_hex))
})
return True
def _get_document_data(self):
"""Retrieves the bytes of the document from the writer"""
output_stream = io.BytesIO()
self.writer.write_stream(output_stream)
return output_stream.getvalue()
def _correct_byte_range(self, old_range: list[int], new_range: list[int], base_pdf_len: int) -> list[int]:
"""Corrects the last value of the new byte range
This function corrects the initial byte range (old_range) which was computed for document containing
the placeholder values for the /ByteRange and /Contents fields. This is needed because when updating
/ByteRange, the length of the document will change as the byte range will take more bytes of the
document, resulting in an invalid byte range.
Args:
old_range (list[int]): the previous byte range
new_range (list[int]): the new byte range
base_pdf_len (int): the base length of the pdf, before insertion of the actual byte range
Returns:
list[int]: the corrected byte range
"""
# Computing the difference of length of the strings of the old and new byte ranges.
# Used to determine if a re-computation of the range is needed or not
current_len = len(str(old_range))
corrected_len = len(str(new_range))
diff = corrected_len - current_len
if diff == 0:
return new_range
corrected_range = new_range.copy()
corrected_range[-1] = abs((base_pdf_len + diff) - new_range[-2])
return self._correct_byte_range(new_range, corrected_range, base_pdf_len)
def _compute_digest_from_byte_range(self, data: bytes, byte_range: list[int]) -> bytes:
"""Computes the digest of the data from a byte range. Uses SHA256 algorithm to compute the hash.
The byte range is defined as an array [offset, length, offset, length, ...] which corresponds to the bytes from the document
that will be used in the computation of the hash.
i.e. for document = b'example' and byte_range = [0, 1, 6, 1],
the hash will be computed from b'ee'
Args:
document (bytes): the data in bytes
byte_range (list[int]): the byte range used to compute the digest.
Returns:
bytes: the computed digest
"""
hashed = hashlib.sha256()
for i in range(0, len(byte_range), 2):
hashed.update(data[byte_range[i]:byte_range[i] + byte_range[i+1]])
return hashed.digest()
def _create_number_array_object(self, array: list[int]) -> ArrayObject:
return ArrayObject([NumberObject(item) for item in array])

View file

@ -339,7 +339,7 @@ def populate_model(model: Model, populated: dict[Model, int], factors: dict[Mode
class Many2oneFieldWrapper(Many2one):
def __init__(self, model, field_name, comodel_name):
super().__init__(comodel_name)
self._setup_attrs(model, field_name) # setup most of the default attrs
self._setup_attrs__(model, field_name) # setup most of the default attrs
class Many2manyModelWrapper:

View file

@ -1,27 +1,30 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from contextlib import nullcontext, ExitStack
from datetime import datetime
import gc
import json
import logging
import sys
import time
import threading
import re
import functools
import tracemalloc
from psycopg2 import OperationalError
from odoo import tools
from odoo.tools import SQL
from .gc import disabling_gc
_logger = logging.getLogger(__name__)
# ensure we have a non patched time for profiling times when using freezegun
real_datetime_now = datetime.now
real_time = time.time.__call__
real_cpu_time = time.thread_time.__call__
def _format_frame(frame):
code = frame.f_code
@ -48,7 +51,7 @@ def _get_stack_trace(frame, limit_frame=None):
stack.append(_format_frame(frame))
frame = frame.f_back
if frame is None and limit_frame:
_logger.error("Limit frame was not found")
_logger.runbot("Limit frame was not found")
return list(reversed(stack))
@ -89,6 +92,7 @@ class Collector:
It defines default behaviors for creating an entry in the collector.
"""
name = None # symbolic name of the collector
_store = name
_registry = {} # map collector names to their class
@classmethod
@ -128,7 +132,7 @@ class Collector:
and self.profiler.entry_count() >= self.profiler.entry_count_limit:
self.profiler.end()
self.add(entry=entry, frame=frame)
self.add(entry=entry,frame=frame)
def _get_stack_trace(self, frame=None):
""" Return the stack trace to be included in a given entry. """
@ -183,26 +187,56 @@ class SQLCollector(Collector):
return super().summary() + sql_entries
class PeriodicCollector(Collector):
class _BasePeriodicCollector(Collector):
"""
Record execution frames asynchronously at most every `interval` seconds.
:param interval (float): time to wait in seconds between two samples.
"""
name = 'traces_async'
_min_interval = 0.001 # minimum interval allowed
_max_interval = 5 # maximum interval allowed
_default_interval = 0.001
def __init__(self, interval=0.01): # check duration. dynamic?
def __init__(self, interval=None): # check duration. dynamic?
super().__init__()
self.active = False
self.frame_interval = interval
self.frame_interval = interval or self._default_interval
self.__thread = threading.Thread(target=self.run)
self.last_frame = None
def start(self):
interval = self.profiler.params.get(f'{self.name}_interval')
if interval:
self.frame_interval = min(max(float(interval), self._min_interval), self._max_interval)
init_thread = self.profiler.init_thread
if not hasattr(init_thread, 'profile_hooks'):
init_thread.profile_hooks = []
init_thread.profile_hooks.append(self.progress)
self.__thread.start()
def run(self):
self.active = True
last_time = real_time()
self.last_time = real_time()
while self.active: # maybe add a check on parent_thread state?
duration = real_time() - last_time
self.progress()
time.sleep(self.frame_interval)
self._entries.append({'stack': [], 'start': real_time()}) # add final end frame
def stop(self):
self.active = False
self.__thread.join()
self.profiler.init_thread.profile_hooks.remove(self.progress)
class PeriodicCollector(_BasePeriodicCollector):
name = 'traces_async'
def add(self, entry=None, frame=None):
""" Add an entry (dict) to this collector. """
if self.last_frame:
duration = real_time() - self._last_time
if duration > self.frame_interval * 10 and self.last_frame:
# The profiler has unexpectedly slept for more than 10 frame intervals. This may
# happen when calling a C library without releasing the GIL. In that case, the
@ -210,32 +244,9 @@ class PeriodicCollector(Collector):
# the call itself does not appear in any of those frames: the duration of the call
# is incorrectly attributed to the last frame.
self._entries[-1]['stack'].append(('profiling', 0, '⚠ Profiler freezed for %s s' % duration, ''))
self.last_frame = None # skip duplicate detection for the next frame.
self.progress()
last_time = real_time()
time.sleep(self.frame_interval)
self.last_frame = None # skip duplicate detection for the next frame.
self._last_time = real_time()
self._entries.append({'stack': [], 'start': real_time()}) # add final end frame
def start(self):
interval = self.profiler.params.get('traces_async_interval')
if interval:
self.frame_interval = min(max(float(interval), 0.001), 1)
init_thread = self.profiler.init_thread
if not hasattr(init_thread, 'profile_hooks'):
init_thread.profile_hooks = []
init_thread.profile_hooks.append(self.progress)
self.__thread.start()
def stop(self):
self.active = False
self.__thread.join()
self.profiler.init_thread.profile_hooks.remove(self.progress)
def add(self, entry=None, frame=None):
""" Add an entry (dict) to this collector. """
frame = frame or get_current_frame(self.profiler.init_thread)
if frame == self.last_frame:
# don't save if the frame is exactly the same as the previous one.
@ -245,6 +256,42 @@ class PeriodicCollector(Collector):
super().add(entry=entry, frame=frame)
_lock = threading.Lock()
class MemoryCollector(_BasePeriodicCollector):
name = 'memory'
_store = 'others'
_min_interval = 0.01 # minimum interval allowed
_default_interval = 1
def start(self):
_lock.acquire()
tracemalloc.start()
super().start()
def add(self, entry=None, frame=None):
""" Add an entry (dict) to this collector. """
self._entries.append({
'start': real_time(),
'memory': tracemalloc.take_snapshot(),
})
def stop(self):
_lock.release()
tracemalloc.stop()
super().stop()
def post_process(self):
for i, entry in enumerate(self._entries):
if entry.get("memory", False):
entry_statistics = entry["memory"].statistics('traceback')
modified_entry_statistics = [{'traceback': list(statistic.traceback._frames),
'size': statistic.size} for statistic in entry_statistics]
self._entries[i] = {"memory_tracebacks": modified_entry_statistics, "start": entry['start']}
class SyncCollector(Collector):
"""
Record complete execution synchronously.
@ -299,57 +346,6 @@ class SyncCollector(Collector):
class QwebTracker():
@classmethod
def wrap_render(cls, method_render):
@functools.wraps(method_render)
def _tracked_method_render(self, template, values=None, **options):
current_thread = threading.current_thread()
execution_context_enabled = getattr(current_thread, 'profiler_params', {}).get('execution_context_qweb')
qweb_hooks = getattr(current_thread, 'qweb_hooks', ())
if execution_context_enabled or qweb_hooks:
# To have the new compilation cached because the generated code will change.
# Therefore 'profile' is a key to the cache.
options['profile'] = True
return method_render(self, template, values, **options)
return _tracked_method_render
@classmethod
def wrap_compile(cls, method_compile):
@functools.wraps(method_compile)
def _tracked_compile(self, template):
if not self.env.context.get('profile'):
return method_compile(self, template)
template_functions, def_name = method_compile(self, template)
render_template = template_functions[def_name]
def profiled_method_compile(self, values):
options = template_functions['options']
ref = options.get('ref')
ref_xml = options.get('ref_xml')
qweb_tracker = QwebTracker(ref, ref_xml, self.env.cr)
self = self.with_context(qweb_tracker=qweb_tracker)
if qweb_tracker.execution_context_enabled:
with ExecutionContext(template=ref):
return render_template(self, values)
return render_template(self, values)
template_functions[def_name] = profiled_method_compile
return (template_functions, def_name)
return _tracked_compile
@classmethod
def wrap_compile_directive(cls, method_compile_directive):
@functools.wraps(method_compile_directive)
def _tracked_compile_directive(self, el, options, directive, level):
if not options.get('profile') or directive in ('inner-content', 'tag-open', 'tag-close'):
return method_compile_directive(self, el, options, directive, level)
enter = f"{' ' * 4 * level}self.env.context['qweb_tracker'].enter_directive({directive!r}, {el.attrib!r}, {options['_qweb_error_path_xml'][0]!r})"
leave = f"{' ' * 4 * level}self.env.context['qweb_tracker'].leave_directive({directive!r}, {el.attrib!r}, {options['_qweb_error_path_xml'][0]!r})"
code_directive = method_compile_directive(self, el, options, directive, level)
return [enter, *code_directive, leave] if code_directive else []
return _tracked_compile_directive
def __init__(self, view_id, arch, cr):
current_thread = threading.current_thread() # don't store current_thread on self
self.execution_context_enabled = getattr(current_thread, 'profiler_params', {}).get('execution_context_qweb')
@ -540,6 +536,8 @@ class Profiler:
"""
self.start_time = 0
self.duration = 0
self.start_cpu_time = 0
self.cpu_duration = 0
self.profile_session = profile_session or make_session()
self.description = description
self.init_frame = None
@ -551,8 +549,9 @@ class Profiler:
self.profile_id = None
self.log = log
self.sub_profilers = []
self.entry_count_limit = int(self.params.get("entry_count_limit", 0)) # the limit could be set using a smarter way
self.entry_count_limit = int(self.params.get("entry_count_limit",0)) # the limit could be set using a smarter way
self.done = False
self.exit_stack = ExitStack()
if db is ...:
# determine database from current thread
@ -599,9 +598,10 @@ class Profiler:
self.description = f"{frame.f_code.co_name} ({code.co_filename}:{frame.f_lineno})"
if self.params:
self.init_thread.profiler_params = self.params
if self.disable_gc and gc.isenabled():
gc.disable()
if self.disable_gc:
self.exit_stack.enter_context(disabling_gc())
self.start_time = real_time()
self.start_cpu_time = real_cpu_time()
for collector in self.collectors:
collector.start()
return self
@ -617,6 +617,7 @@ class Profiler:
for collector in self.collectors:
collector.stop()
self.duration = real_time() - self.start_time
self.cpu_duration = real_cpu_time() - self.start_cpu_time
self._add_file_lines(self.init_stack_trace)
if self.db:
@ -629,12 +630,19 @@ class Profiler:
"create_date": real_datetime_now(),
"init_stack_trace": json.dumps(_format_stack(self.init_stack_trace)),
"duration": self.duration,
"cpu_duration": self.cpu_duration,
"entry_count": self.entry_count(),
"sql_count": sum(len(collector.entries) for collector in self.collectors if collector.name == 'sql')
}
others = {}
for collector in self.collectors:
if collector.entries:
values[collector.name] = json.dumps(collector.entries)
if collector._store == "others":
others[collector.name] = json.dumps(collector.entries)
else:
values[collector.name] = json.dumps(collector.entries)
if others:
values['others'] = json.dumps(others)
query = SQL(
"INSERT INTO ir_profile(%s) VALUES %s RETURNING id",
SQL(",").join(map(SQL.identifier, values)),
@ -646,15 +654,14 @@ class Profiler:
except OperationalError:
_logger.exception("Could not save profile in database")
finally:
if self.disable_gc:
gc.enable()
self.exit_stack.close()
if self.params:
del self.init_thread.profiler_params
if self.log:
_logger.info(self.summary())
def _get_cm_proxy(self):
return _Nested(self)
return Nested(self)
def _add_file_lines(self, stack):
for index, frame in enumerate(stack):
@ -722,20 +729,6 @@ class Profiler:
return result
class _Nested:
__slots__ = ("__profiler",)
def __init__(self, profiler):
self.__profiler = profiler
def __enter__(self):
self.__profiler.__enter__()
return self
def __exit__(self, *args):
return self.__profiler.__exit__(*args)
class Nested:
"""
Utility to nest another context manager inside a profiler.
@ -747,16 +740,16 @@ class Nested:
be ignored, too. This is also why Nested() does not use
contextlib.contextmanager.
"""
def __init__(self, profiler, context_manager):
self.profiler = profiler
self.context_manager = context_manager
def __init__(self, profiler, context_manager=None):
self._profiler__ = profiler
self.context_manager = context_manager or nullcontext()
def __enter__(self):
self.profiler.__enter__()
self._profiler__.__enter__()
return self.context_manager.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
try:
return self.context_manager.__exit__(exc_type, exc_value, traceback)
finally:
self.profiler.__exit__(exc_type, exc_value, traceback)
self._profiler__.__exit__(exc_type, exc_value, traceback)

View file

@ -11,14 +11,14 @@ _writer = codecs.getwriter('utf-8')
def csv_reader(stream, **params):
warnings.warn("Deprecated since Odoo 18.0: can just use `csv.reader` with a text stream or use `TextIOWriter` or `codec.getreader` to transcode.", DeprecationWarning, 2)
warnings.warn("Deprecated since Odoo 18.0: can just use `csv.reader` with a text stream or use `TextIOWriter` or `codec.getreader` to transcode.", DeprecationWarning, stacklevel=2)
assert not isinstance(stream, io.TextIOBase),\
"For cross-compatibility purposes, csv_reader takes a bytes stream"
return csv.reader(_reader(stream), **params)
def csv_writer(stream, **params):
warnings.warn("Deprecated since Odoo 18.0: can just use `csv.writer` with a text stream or use `TextIOWriter` or `codec.getwriter` to transcode.", DeprecationWarning, 2)
warnings.warn("Deprecated since Odoo 18.0: can just use `csv.writer` with a text stream or use `TextIOWriter` or `codec.getwriter` to transcode.", DeprecationWarning, stacklevel=2)
assert not isinstance(stream, io.TextIOBase), \
"For cross-compatibility purposes, csv_writer takes a bytes stream"
return csv.writer(_writer(stream), **params)
@ -32,7 +32,7 @@ def to_text(source: typing.Any) -> str:
* bytes are decoded as UTF-8
* rest is textified
"""
warnings.warn("Deprecated since Odoo 18.0.", DeprecationWarning, 2)
warnings.warn("Deprecated since Odoo 18.0.", DeprecationWarning, stacklevel=2)
if source is None or source is False:
return ''

View file

@ -69,6 +69,7 @@ class Query:
# groupby, having, order, limit, offset
self.groupby: SQL | None = None
self._order_groupby: list[SQL] = []
self.having: SQL | None = None
self._order: SQL | None = None
self.limit: int | None = None
@ -86,7 +87,7 @@ class Query:
""" Add a table with a given alias to the from clause. """
assert alias not in self._tables and alias not in self._joins, f"Alias {alias!r} already in {self}"
self._tables[alias] = table if table is not None else SQL.identifier(alias)
self._ids = None
self._ids = self._ids and None
def add_join(self, kind: str, alias: str, table: str | SQL | None, condition: SQL):
""" Add a join clause with the given alias, table and condition. """
@ -101,12 +102,12 @@ class Query:
assert self._joins[alias] == (sql_kind, table, condition)
else:
self._joins[alias] = (sql_kind, table, condition)
self._ids = None
self._ids = self._ids and None
def add_where(self, where_clause: str | SQL, where_params=()):
""" Add a condition to the where clause. """
self._where_clauses.append(SQL(where_clause, *where_params)) # pylint: disable = sql-injection
self._ids = None
self._ids = self._ids and None
def join(self, lhs_alias: str, lhs_column: str, rhs_table: str | SQL, rhs_column: str, link: str) -> str:
"""

View file

@ -19,17 +19,18 @@ import functools
import logging
import sys
import types
import typing
from opcode import opmap, opname
from types import CodeType
import werkzeug
from psycopg2 import OperationalError
import odoo
import odoo.exceptions
unsafe_eval = eval
__all__ = ['test_expr', 'safe_eval', 'const_eval']
__all__ = ['const_eval', 'safe_eval']
# The time module is usually already provided in the safe_eval environment
# but some code, e.g. datetime.datetime.now() (Windows/Python 2.5.2, bug
@ -239,28 +240,25 @@ def assert_valid_codeobj(allowed_codes, code_obj, expr):
if isinstance(const, CodeType):
assert_valid_codeobj(allowed_codes, const, 'lambda')
def test_expr(expr, allowed_codes, mode="eval", filename=None):
"""test_expr(expression, allowed_codes[, mode[, filename]]) -> code_object
Test that the expression contains only the allowed opcodes.
If the expression is valid and contains only allowed codes,
return the compiled code object.
Otherwise raise a ValueError, a Syntax Error or TypeError accordingly.
:param filename: optional pseudo-filename for the compiled expression,
displayed for example in traceback frames
:type filename: string
def compile_codeobj(expr: str, /, filename: str = '<unknown>', mode: typing.Literal['eval', 'exec'] = 'eval'):
"""
:param str filename: optional pseudo-filename for the compiled expression,
displayed for example in traceback frames
:param str mode: 'eval' if single expression
'exec' if sequence of statements
:return: compiled code object
:rtype: types.CodeType
"""
assert mode in ('eval', 'exec')
try:
if mode == 'eval':
# eval() does not like leading/trailing whitespace
expr = expr.strip()
code_obj = compile(expr, filename or "", mode)
expr = expr.strip() # eval() does not like leading/trailing whitespace
code_obj = compile(expr, filename or '', mode)
except (SyntaxError, TypeError, ValueError):
raise
except Exception as e:
raise ValueError('%r while compiling\n%r' % (e, expr))
assert_valid_codeobj(allowed_codes, code_obj, expr)
return code_obj
@ -282,7 +280,8 @@ def const_eval(expr):
...
ValueError: opcode BINARY_ADD not allowed
"""
c = test_expr(expr, _CONST_OPCODES)
c = compile_codeobj(expr)
assert_valid_codeobj(_CONST_OPCODES, c, expr)
return unsafe_eval(c)
def expr_eval(expr):
@ -303,7 +302,8 @@ def expr_eval(expr):
...
ValueError: opcode LOAD_NAME not allowed
"""
c = test_expr(expr, _EXPR_OPCODES)
c = compile_codeobj(expr)
assert_valid_codeobj(_EXPR_OPCODES, c, expr)
return unsafe_eval(c)
_BUILTINS = {
@ -344,18 +344,35 @@ _BUILTINS = {
'zip': zip,
'Exception': Exception,
}
def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=False, locals_builtins=False, filename=None):
"""safe_eval(expression[, globals[, locals[, mode[, nocopy]]]]) -> result
System-restricted Python expression evaluation
_BUBBLEUP_EXCEPTIONS = (
odoo.exceptions.UserError,
odoo.exceptions.RedirectWarning,
werkzeug.exceptions.HTTPException,
OperationalError, # let auto-replay of serialized transactions work its magic
ZeroDivisionError,
)
def safe_eval(expr, /, context=None, *, mode="eval", filename=None):
"""System-restricted Python expression evaluation
Evaluates a string that contains an expression that mostly
uses Python constants, arithmetic expressions and the
objects directly provided in context.
This can be used to e.g. evaluate
an OpenERP domain expression from an untrusted source.
a domain expression from an untrusted source.
:param expr: The Python expression (or block, if ``mode='exec'``) to evaluate.
:type expr: string | bytes
:param context: Namespace available to the expression.
This dict will be mutated with any variables created during
evaluation
:type context: dict
:param mode: ``exec`` or ``eval``
:type mode: str
:param filename: optional pseudo-filename for the compiled expression,
displayed for example in traceback frames
:type filename: string
@ -367,51 +384,34 @@ def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=Fal
if type(expr) is CodeType:
raise TypeError("safe_eval does not allow direct evaluation of code objects.")
# prevent altering the globals/locals from within the sandbox
# by taking a copy.
if not nocopy:
# isinstance() does not work below, we want *exactly* the dict class
if (globals_dict is not None and type(globals_dict) is not dict) \
or (locals_dict is not None and type(locals_dict) is not dict):
_logger.warning(
"Looks like you are trying to pass a dynamic environment, "
"you should probably pass nocopy=True to safe_eval().")
if globals_dict is not None:
globals_dict = dict(globals_dict)
if locals_dict is not None:
locals_dict = dict(locals_dict)
assert context is None or type(context) is dict, "Context must be a dict"
check_values(globals_dict)
check_values(locals_dict)
check_values(context)
if globals_dict is None:
globals_dict = {}
globals_dict = dict(context or {}, __builtins__=dict(_BUILTINS))
globals_dict['__builtins__'] = dict(_BUILTINS)
if locals_builtins:
if locals_dict is None:
locals_dict = {}
locals_dict.update(_BUILTINS)
c = test_expr(expr, _SAFE_OPCODES, mode=mode, filename=filename)
c = compile_codeobj(expr, filename=filename, mode=mode)
assert_valid_codeobj(_SAFE_OPCODES, c, expr)
try:
return unsafe_eval(c, globals_dict, locals_dict)
except odoo.exceptions.UserError:
raise
except odoo.exceptions.RedirectWarning:
raise
except werkzeug.exceptions.HTTPException:
raise
except OperationalError:
# Do not hide PostgreSQL low-level exceptions, to let the auto-replay
# of serialized transactions work its magic
raise
except ZeroDivisionError:
# empty locals dict makes the eval behave like top-level code
return unsafe_eval(c, globals_dict, None)
except _BUBBLEUP_EXCEPTIONS:
raise
except Exception as e:
raise ValueError('%r while evaluating\n%r' % (e, expr))
finally:
if context is not None:
del globals_dict['__builtins__']
context.update(globals_dict)
def test_python_expr(expr, mode="eval"):
try:
test_expr(expr, _SAFE_OPCODES, mode=mode)
c = compile_codeobj(expr, mode=mode)
assert_valid_codeobj(_SAFE_OPCODES, c, expr)
except (SyntaxError, TypeError, ValueError) as err:
if len(err.args) >= 2 and len(err.args[1]) >= 4:
error = {
@ -473,7 +473,7 @@ mods = ['parser', 'relativedelta', 'rrule', 'tz']
for mod in mods:
__import__('dateutil.%s' % mod)
# make sure to patch pytz before exposing
from odoo._monkeypatches.pytz import patch_pytz # noqa: E402, F401
from odoo._monkeypatches.pytz import patch_module as patch_pytz # noqa: E402, F401
patch_pytz()
datetime = wrap_module(__import__('datetime'), ['date', 'datetime', 'time', 'timedelta', 'timezone', 'tzinfo', 'MAXYEAR', 'MINYEAR'])

View file

@ -23,8 +23,8 @@ class SetDefinitions:
(value is a collection of set ids).
Here is an example of set definitions, with natural numbers (N), integer
numbers (Z), rational numbers (Q), real numbers (R), imaginary numbers
(I) and complex numbers (C)::
numbers (Z), rational numbers (Q), irrational numbers (R\\Q), real
numbers (R), imaginary numbers (I) and complex numbers (C)::
{
1: {"ref": "N", "supersets": [2]},
@ -33,7 +33,23 @@ class SetDefinitions:
4: {"ref": "R", "supersets": [6]},
5: {"ref": "I", "supersets": [6], "disjoints": [4]},
6: {"ref": "C"},
7: {"ref": "R\\Q", "supersets": [4]},
}
Representation:
C
R | "C"
Q I | | "I" implied "C"
Z | | "R" implied "C"
N "Q" implied "R"
"R\\Q" implied "R"
"Z" implied "Q"
"N" implied "Z"
R\\Q
"""
self.__leaves: dict[int | str, Leaf] = {}
@ -137,6 +153,47 @@ class SetDefinitions:
return Leaf(UnknownId(ref), ref)
return self.__leaves[ref]
def get_superset_ids(self, ids: Iterable[int]) -> list[int]:
""" Returns the supersets matching the provided list of ids.
Following example defined in this set definitions constructor::
The supersets of "Q" (id 3) is "R" and "C" with ids [4, 6]
"""
return sorted({
sup_id
for id_ in ids
if id_ in self.__leaves
for sup_id in self.__leaves[id_].supersets
if sup_id != id_
})
def get_subset_ids(self, ids: Iterable[int]) -> list[int]:
""" Returns the subsets matching the provided list of ids.
Following example defined in this set definitions constructor::
The subsets of "Q" (id 3) is "Z" and "N" with ids [1, 2]
"""
return sorted({
sub_id
for id_ in ids
if id_ in self.__leaves
for sub_id in self.__leaves[id_].subsets
if sub_id != id_
})
def get_disjoint_ids(self, ids: Iterable[int]) -> list[int]:
""" Returns the disjoints set matching the provided list of ids.
Following example defined in this set definitions constructor::
The disjoint set of "Q" (id 3) is "R\\Q" and "I" with ids [7, 5]
"""
return sorted({
disjoint_id
for id_ in ids
if id_ in self.__leaves
for disjoint_id in self.__leaves[id_].disjoints
})
class SetExpression(ABC):
""" An object that represents a combination of named sets with union,

View file

@ -6,7 +6,6 @@ shortener = reprlib.Repr()
shortener.maxstring = 150
shorten = shortener.repr
class Speedscope:
def __init__(self, name='Speedscope', init_stack_trace=None):
self.init_stack_trace = init_stack_trace or []
@ -44,13 +43,25 @@ class Speedscope:
stack[index] = (method, line, number,)
self.caller_frame = frame
def add_output(self, names, complete=True, display_name=None, use_context=True, **params):
def add_output(self, names, complete=True, display_name=None, use_context=True, constant_time=False, context_per_name = None, **params):
"""
Add a profile output to the list of profiles
:param names: list of keys to combine in this output. Keys corresponds to the one used in add
:param display_name: name of the tab for this output
:param complete: display the complete stack. If False, don't display the stack bellow the profiler.
:param use_context: use execution context (added by ExecutionContext context manager) to display the profile.
:param constant_time: hide temporality. Useful to compare query counts
:param context_per_name: a dictionary of additionnal context per name
"""
entries = []
display_name = display_name or ','.join(names)
for name in names:
entries += self.profiles_raw[name]
raw = self.profiles_raw.get(name)
if not raw:
continue
entries += raw
entries.sort(key=lambda e: e['start'])
result = self.process(entries, use_context=use_context, **params)
result = self.process(entries, use_context=use_context, constant_time=constant_time, **params)
if not result:
return self
start = result[0]['at']
@ -77,30 +88,32 @@ class Speedscope:
self.profiles.append({
"name": display_name,
"type": "evented",
"unit": "seconds",
"unit": "entries" if constant_time else "seconds",
"startValue": 0,
"endValue": end - start,
"events": result
})
return self
def add_default(self):
def add_default(self,**params):
if len(self.profiles_raw) > 1:
self.add_output(self.profiles_raw, display_name='Combined')
self.add_output(self.profiles_raw, display_name='Combined no context', use_context=False)
if params['combined_profile']:
self.add_output(self.profiles_raw, display_name='Combined', **params)
for key, profile in self.profiles_raw.items():
sql = profile and profile[0].get('query')
if sql:
self.add_output([key], hide_gaps=True, display_name=f'{key} (no gap)')
self.add_output([key], continuous=False, complete=False, display_name=f'{key} (density)')
if params['sql_no_gap_profile']:
self.add_output([key], hide_gaps=True, display_name=f'{key} (no gap)', **params)
if params['sql_density_profile']:
self.add_output([key], continuous=False, complete=False, display_name=f'{key} (density)',**params)
else:
self.add_output([key], display_name=key)
elif params['frames_profile']:
self.add_output([key], display_name=key,**params)
return self
def make(self):
def make(self, **params):
if not self.profiles:
self.add_default()
self.add_default(**params)
return {
"name": self.name,
"activeProfileIndex": 0,
@ -121,7 +134,7 @@ class Speedscope:
self.frame_count += 1
return self.frames_indexes[frame]
def stack_to_ids(self, stack, context, stack_offset=0):
def stack_to_ids(self, stack, context, aggregate_sql=False, stack_offset=0):
"""
:param stack: A list of hashable frame
:param context: an iterable of (level, value) ordered by level
@ -138,6 +151,8 @@ class Speedscope:
while context_level is not None and context_level < stack_offset:
context_level, context_value = next(context_iterator, (None, None))
for level, frame in enumerate(stack, start=stack_offset + 1):
if aggregate_sql:
frame = (frame[0], '', frame[2])
while context_level == level:
context_frame = (", ".join(f"{k}={v}" for k, v in context_value.items()), '', '')
stack_ids.append(self.get_frame_id(context_frame))
@ -145,7 +160,7 @@ class Speedscope:
stack_ids.append(self.get_frame_id(frame))
return stack_ids
def process(self, entries, continuous=True, hide_gaps=False, use_context=True, constant_time=False):
def process(self, entries, continuous=True, hide_gaps=False, use_context=True, constant_time=False, aggregate_sql=False, **params):
# constant_time parameters is mainly useful to hide temporality when focussing on sql determinism
entry_end = previous_end = None
if not entries:
@ -164,7 +179,6 @@ class Speedscope:
entry_start = close_time = index
else:
previous_end = entry_end
if hide_gaps and previous_end:
entry_start = previous_end
else:
@ -185,13 +199,14 @@ class Speedscope:
entry_stack_ids = self.stack_to_ids(
entry['stack'] or [],
use_context and entry.get('exec_context'),
aggregate_sql,
self.init_stack_trace_level
)
level = 0
if continuous:
level = -1
for level, at_level in enumerate(zip(current_stack_ids, entry_stack_ids)):
current, new = at_level
for current, new in zip(current_stack_ids, entry_stack_ids):
level += 1
if current != new:
break
else:

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# pylint: disable=sql-injection
from __future__ import annotations
@ -7,6 +6,7 @@ import enum
import json
import logging
import re
import warnings
from binascii import crc32
from collections import defaultdict
from typing import TYPE_CHECKING
@ -16,14 +16,12 @@ if TYPE_CHECKING:
from collections.abc import Iterable
import psycopg2
import psycopg2.sql as pgsql
from .misc import named_to_positional_printf
__all__ = [
"SQL",
"create_index",
"create_unique_index",
"drop_view_if_exists",
"escape_psql",
"index_exists",
@ -52,9 +50,10 @@ class SQL:
cr.execute(sql)
The code is given as a ``%``-format string, and supports either positional
arguments (with `%s`) or named arguments (with `%(name)s`). Escaped
characters (like ``"%%"``) are not supported, though. The arguments are
meant to be merged into the code using the `%` formatting operator.
arguments (with `%s`) or named arguments (with `%(name)s`). The arguments
are meant to be merged into the code using the `%` formatting operator.
Note that the character ``%`` must always be escaped (as ``%%``), even if
the code does not have parameters, like in ``SQL("foo LIKE 'a%%'")``.
The SQL wrapper is designed to be composable: the arguments can be either
actual parameters, or SQL objects themselves::
@ -84,10 +83,10 @@ class SQL:
__code: str
__params: tuple
__to_flush: tuple
__to_flush: tuple[Field, ...]
# pylint: disable=keyword-arg-before-vararg
def __init__(self, code: (str | SQL) = "", /, *args, to_flush: (Field | None) = None, **kwargs):
def __init__(self, code: (str | SQL) = "", /, *args, to_flush: (Field | Iterable[Field] | None) = None, **kwargs):
if isinstance(code, SQL):
if args or kwargs or to_flush:
raise TypeError("SQL() unexpected arguments when code has type SQL")
@ -106,7 +105,12 @@ class SQL:
code % () # check that code does not contain %s
self.__code = code
self.__params = ()
self.__to_flush = () if to_flush is None else (to_flush,)
if to_flush is None:
self.__to_flush = ()
elif hasattr(to_flush, '__iter__'):
self.__to_flush = tuple(to_flush)
else:
self.__to_flush = (to_flush,)
return
code_list = []
@ -121,9 +125,12 @@ class SQL:
code_list.append("%s")
params_list.append(arg)
if to_flush is not None:
to_flush_list.append(to_flush)
if hasattr(to_flush, '__iter__'):
to_flush_list.extend(to_flush)
else:
to_flush_list.append(to_flush)
self.__code = code % tuple(code_list)
self.__code = code.replace('%%', '%%%%') % tuple(code_list)
self.__params = tuple(params_list)
self.__to_flush = tuple(to_flush_list)
@ -153,6 +160,9 @@ class SQL:
def __eq__(self, other):
return isinstance(other, SQL) and self.__code == other.__code and self.__params == other.__params
def __hash__(self):
return hash((self.__code, self.__params))
def __iter__(self):
""" Yields ``self.code`` and ``self.params``. This was introduced for
backward compatibility, as it enables to access the SQL and parameters
@ -161,6 +171,7 @@ class SQL:
sql = SQL(...)
code, params = sql
"""
warnings.warn("Deprecated since 19.0, use code and params properties directly", DeprecationWarning)
yield self.code
yield self.params
@ -410,12 +421,8 @@ def set_not_null(cr, tablename, columnname):
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL",
SQL.identifier(tablename), SQL.identifier(columnname),
)
try:
with cr.savepoint(flush=False):
cr.execute(query, log_exceptions=False)
_schema.debug("Table %r: column %r: added constraint NOT NULL", tablename, columnname)
except Exception:
raise Exception("Table %r: unable to set NOT NULL on column %r", tablename, columnname)
cr.execute(query, log_exceptions=False)
_schema.debug("Table %r: column %r: added constraint NOT NULL", tablename, columnname)
def drop_not_null(cr, tablename, columnname):
@ -441,36 +448,26 @@ def constraint_definition(cr, tablename, constraintname):
def add_constraint(cr, tablename, constraintname, definition):
""" Add a constraint on the given table. """
# There is a fundamental issue with SQL implementation that messes up with queries
# using %, for details check the PR discussion of this patch #188716. To be fixed
# in master. Here we use instead psycopg.sql
query1 = pgsql.SQL("ALTER TABLE {} ADD CONSTRAINT {} {}").format(
pgsql.Identifier(tablename), pgsql.Identifier(constraintname), pgsql.SQL(definition),
query1 = SQL(
"ALTER TABLE %s ADD CONSTRAINT %s %s",
SQL.identifier(tablename), SQL.identifier(constraintname), SQL(definition.replace('%', '%%')),
)
query2 = SQL(
"COMMENT ON CONSTRAINT %s ON %s IS %s",
SQL.identifier(constraintname), SQL.identifier(tablename), definition,
)
try:
with cr.savepoint(flush=False):
cr.execute(query1, log_exceptions=False)
cr.execute(query2, log_exceptions=False)
_schema.debug("Table %r: added constraint %r as %s", tablename, constraintname, definition)
except Exception:
raise Exception("Table %r: unable to add constraint %r as %s", tablename, constraintname, definition)
cr.execute(query1, log_exceptions=False)
cr.execute(query2, log_exceptions=False)
_schema.debug("Table %r: added constraint %r as %s", tablename, constraintname, definition)
def drop_constraint(cr, tablename, constraintname):
""" drop the given constraint. """
try:
with cr.savepoint(flush=False):
cr.execute(SQL(
"ALTER TABLE %s DROP CONSTRAINT %s",
SQL.identifier(tablename), SQL.identifier(constraintname),
))
_schema.debug("Table %r: dropped constraint %r", tablename, constraintname)
except Exception:
_schema.warning("Table %r: unable to drop constraint %r!", tablename, constraintname)
""" Drop the given constraint. """
cr.execute(SQL(
"ALTER TABLE %s DROP CONSTRAINT %s",
SQL.identifier(tablename), SQL.identifier(constraintname),
))
_schema.debug("Table %r: dropped constraint %r", tablename, constraintname)
def add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
@ -483,7 +480,6 @@ def add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondele
))
_schema.debug("Table %r: added foreign key %r references %r(%r) ON DELETE %s",
tablename1, columnname1, tablename2, columnname2, ondelete)
return True
def get_foreign_keys(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
@ -531,8 +527,10 @@ def fix_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondele
found = True
else:
drop_constraint(cr, tablename1, fk[0])
if not found:
return add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete)
if found:
return False
add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete)
return True
def index_exists(cr, indexname):
@ -545,32 +543,78 @@ def check_index_exist(cr, indexname):
assert index_exists(cr, indexname), f"{indexname} does not exist"
def create_index(cr, indexname, tablename, expressions, method='btree', where=''):
""" Create the given index unless it exists. """
def index_definition(cr, indexname):
""" Read the index definition from the database """
cr.execute(SQL("""
SELECT idx.indexdef, d.description
FROM pg_class c
JOIN pg_indexes idx ON c.relname = idx.indexname
LEFT JOIN pg_description d ON c.oid = d.objoid
WHERE c.relname = %s AND c.relkind = 'i'
""", indexname))
return cr.fetchone() if cr.rowcount else (None, None)
def create_index(
cr,
indexname,
tablename,
expressions,
method='btree',
where='',
*,
comment=None,
unique=False
):
""" Create the given index unless it exists.
:param cr: The cursor
:param indexname: The name of the index
:param tablename: The name of the table
:param method: The type of the index (default: btree)
:param where: WHERE clause for the index (default: '')
:param comment: The comment to set on the index
:param unique: Whether the index is unique or not (default: False)
"""
assert expressions, "Missing expressions"
if index_exists(cr, indexname):
return
cr.execute(SQL(
"CREATE INDEX %s ON %s USING %s (%s)%s",
SQL.identifier(indexname),
SQL.identifier(tablename),
definition = SQL(
"USING %s (%s)%s",
SQL(method),
SQL(", ").join(SQL(expression) for expression in expressions),
SQL(" WHERE %s", SQL(where)) if where else SQL(),
))
_schema.debug("Table %r: created index %r (%s)", tablename, indexname, ", ".join(expressions))
)
add_index(cr, indexname, tablename, definition, unique=unique, comment=comment)
def add_index(cr, indexname, tablename, definition, *, unique: bool, comment=''):
""" Create an index. """
if isinstance(definition, str):
definition = SQL(definition.replace('%', '%%'))
else:
definition = SQL(definition)
query = SQL(
"CREATE %sINDEX %s ON %s %s",
SQL("UNIQUE ") if unique else SQL(),
SQL.identifier(indexname),
SQL.identifier(tablename),
definition,
)
query_comment = SQL(
"COMMENT ON INDEX %s IS %s",
SQL.identifier(indexname), comment,
) if comment else None
cr.execute(query, log_exceptions=False)
if query_comment:
cr.execute(query_comment, log_exceptions=False)
_schema.debug("Table %r: created index %r (%s)", tablename, indexname, definition.code)
def create_unique_index(cr, indexname, tablename, expressions):
""" Create the given index unless it exists. """
if index_exists(cr, indexname):
return
cr.execute(SQL(
"CREATE UNIQUE INDEX %s ON %s (%s)",
SQL.identifier(indexname),
SQL.identifier(tablename),
SQL(", ").join(SQL(expression) for expression in expressions),
))
_schema.debug("Table %r: created index %r (%s)", tablename, indexname, ", ".join(expressions))
warnings.warn("Since 19.0, use create_index(unique=True)", DeprecationWarning)
return create_index(cr, indexname, tablename, expressions, unique=True)
def drop_index(cr, indexname, tablename):
@ -636,7 +680,7 @@ def increment_fields_skiplock(records, *fields):
for field in fields:
assert records._fields[field].type == 'integer'
cr = records._cr
cr = records.env.cr
tablename = records._table
cr.execute(SQL(
"""

View file

@ -104,7 +104,7 @@ def locate_node(arch, spec):
return None
def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_locate=lambda s: True):
def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_locate=None):
""" Apply an inheriting view (a descendant of the base view)
Apply to a source architecture all the spec nodes (i.e. nodes
@ -123,6 +123,7 @@ def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_loca
# Queue of specification nodes (i.e. nodes describing where and
# changes to apply to some parent architecture).
specs = specs_tree if isinstance(specs_tree, list) else [specs_tree]
pre_locate = pre_locate or (lambda _: True)
def extract(spec):
"""
@ -209,15 +210,24 @@ def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_loca
node.addprevious(child)
node.getparent().remove(node)
elif mode == "inner":
# Replace the entire content of an element
for child in node:
node.remove(child)
# use a sentinel to keep the existing children nodes, so
# that one can move existing children nodes inside the new
# content of the node (with position="move")
sentinel = E.sentinel()
if len(node) > 0:
node[0].addprevious(sentinel)
else:
node.append(sentinel)
# fill the node with the spec *before* the sentinel
# remove node.text before that operation, otherwise it will
# be merged with the new content's text
node.text = None
for child in spec:
node.append(copy.deepcopy(child))
node.text = spec.text
add_stripped_items_before(sentinel, copy.deepcopy(spec), extract)
# now remove the old content and the sentinel
for child in reversed(node):
node.remove(child)
if child == sentinel:
break
else:
raise ValueError(_lt("Invalid mode attribute: “%s", mode))
elif pos == 'attributes':

View file

@ -12,7 +12,6 @@ import fnmatch
import functools
import inspect
import io
import itertools
import json
import locale
import logging
@ -21,8 +20,8 @@ import polib
import re
import tarfile
import typing
import warnings
from collections import defaultdict, namedtuple
from collections.abc import Iterable, Iterator
from contextlib import suppress
from datetime import datetime
from os.path import join
@ -37,7 +36,11 @@ from psycopg2.extras import Json
import odoo
from odoo.exceptions import UserError
from .config import config
from .misc import file_open, file_path, get_iso_codes, OrderedSet, ReadonlyDict, SKIPPED_ELEMENT_TYPES
from .i18n import format_list
from .misc import file_open, file_path, get_iso_codes, split_every, OrderedSet, ReadonlyDict, SKIPPED_ELEMENT_TYPES
if typing.TYPE_CHECKING:
from odoo.api import Environment
__all__ = [
"_",
@ -55,99 +58,8 @@ JAVASCRIPT_TRANSLATION_COMMENT = 'odoo-javascript'
SKIPPED_ELEMENTS = ('script', 'style', 'title')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_Indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for Spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Türkiye',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# these direct uses of CSV are ok.
import csv # pylint: disable=deprecated-module
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
# which elements are translated inline
TRANSLATED_ELEMENTS = {
@ -157,16 +69,29 @@ TRANSLATED_ELEMENTS = {
'sup', 'time', 'u', 'var', 'wbr', 'text', 'select', 'option',
}
# Which attributes must be translated. This is a dict, where the value indicates
# a condition for a node to have the attribute translatable.
# Attributes from QWeb views that must be translated.
# ⚠ Note that it implicitly includes their t-attf-* equivalent.
TRANSLATED_ATTRS = dict.fromkeys({
TRANSLATED_ATTRS = {
'string', 'add-label', 'help', 'sum', 'avg', 'confirm', 'placeholder', 'alt', 'title', 'aria-label',
'aria-keyshortcuts', 'aria-placeholder', 'aria-roledescription', 'aria-valuetext',
'value_label', 'data-tooltip', 'label', 'cancel-label', 'confirm-label',
}, lambda e: True)
'value_label', 'data-tooltip', 'label', 'confirm-label', 'cancel-label',
}
def translate_attrib_value(node):
TRANSLATED_ATTRS.update({f't-attf-{attr}' for attr in TRANSLATED_ATTRS})
# {column value of "ir_model_fields"."translate": orm field.translate}
FIELD_TRANSLATE = {
None: False,
'standard': True,
}
def is_translatable_attrib(key):
if not key:
return False
return key in TRANSLATED_ATTRS or key.endswith('.translate')
def is_translatable_attrib_value(node):
# check if the value attribute of a node must be translated
classes = node.attrib.get('class', '').split(' ')
return (
@ -176,11 +101,9 @@ def translate_attrib_value(node):
and 'o_translatable_input_hidden' in classes
)
TRANSLATED_ATTRS.update(
value=translate_attrib_value,
text=lambda e: (e.tag == 'field' and e.attrib.get('widget', '') == 'url'),
**{f't-attf-{attr}': cond for attr, cond in TRANSLATED_ATTRS.items()},
)
def is_translatable_attrib_text(node):
return node.tag == 'field' and node.attrib.get('widget', '') == 'url'
# This should match the list provided to OWL (see translatableAttributes).
OWL_TRANSLATED_ATTRS = {
@ -198,6 +121,19 @@ OWL_TRANSLATED_ATTRS = {
avoid_pattern = re.compile(r"\s*<!DOCTYPE", re.IGNORECASE | re.MULTILINE | re.UNICODE)
space_pattern = re.compile(r"[\s\uFEFF]*") # web_editor uses \uFEFF as ZWNBSP
# regexpr for string formatting and extract ( ruby-style )|( jinja-style ) used in `_compile_format`
FORMAT_REGEX = re.compile(r'(?:#\{(.+?)\})|(?:\{\{(.+?)\}\})')
def translate_format_string_expression(term, callback):
expressions = {}
def add(exp_py):
index = len(expressions)
expressions[str(index)] = exp_py
return '{{%s}}' % index
term_without_py = FORMAT_REGEX.sub(lambda g: add(g.group(0)), term)
translated_value = callback(term_without_py)
if translated_value:
return FORMAT_REGEX.sub(lambda g: expressions.get(g.group(0)[2:-2], 'None'), translated_value)
def translate_xml_node(node, callback, parse, serialize):
""" Return the translation of the given XML/HTML node.
@ -222,7 +158,7 @@ def translate_xml_node(node, callback, parse, serialize):
# be translated as a whole using the `o_translate_inline` class.
"o_translate_inline" in node.attrib.get("class", "").split()
or node.tag in TRANSLATED_ELEMENTS
and not any(key.startswith("t-") for key in node.attrib)
and not any(key.startswith("t-") or key.endswith(".translate") for key in node.attrib)
and all(translatable(child) for child in node)
)
@ -239,7 +175,11 @@ def translate_xml_node(node, callback, parse, serialize):
and translatable(node[pos])
and (
any( # attribute to translate
val and key in TRANSLATED_ATTRS and TRANSLATED_ATTRS[key](node[pos])
val and (
is_translatable_attrib(key) or
(key == 'value' and is_translatable_attrib_value(node[pos])) or
(key == 'text' and is_translatable_attrib_text(node[pos]))
)
for key, val in node[pos].attrib.items()
)
# node[pos] contains some text to translate
@ -256,7 +196,7 @@ def translate_xml_node(node, callback, parse, serialize):
isinstance(node, SKIPPED_ELEMENT_TYPES)
or node.tag in SKIPPED_ELEMENTS
or node.get('t-translation', "").strip() == "off"
or node.tag == 'attribute' and node.get('name') not in TRANSLATED_ATTRS
or node.tag == 'attribute' and node.get('name') not in ('value', 'text') and not is_translatable_attrib(node.get('name'))
or node.getparent() is None and avoid_pattern.match(node.text or "")
):
return
@ -304,8 +244,17 @@ def translate_xml_node(node, callback, parse, serialize):
# translate the attributes of the node
for key, val in node.attrib.items():
if nonspace(val) and key in TRANSLATED_ATTRS and TRANSLATED_ATTRS[key](node):
node.set(key, callback(val.strip()) or val)
if nonspace(val):
if (
is_translatable_attrib(key) or
(key == 'value' and is_translatable_attrib_value(node)) or
(key == 'text' and is_translatable_attrib_text(node))
):
if key.startswith('t-'):
value = translate_format_string_expression(val.strip(), callback)
else:
value = callback(val.strip())
node.set(key, value or val)
process(node)
@ -447,13 +396,8 @@ html_translate.is_text = is_text
xml_translate.term_adapter = xml_term_adapter
def translate_sql_constraint(cr, key, lang):
cr.execute("""
SELECT COALESCE(c.message->>%s, c.message->>'en_US') as message
FROM ir_model_constraint c
WHERE name=%s and type='u'
""", (lang, key))
return cr.fetchone()[0]
FIELD_TRANSLATE['html_translate'] = html_translate
FIELD_TRANSLATE['xml_translate'] = xml_translate
def get_translation(module: str, lang: str, source: str, args: tuple | dict) -> str:
@ -477,6 +421,14 @@ def get_translation(module: str, lang: str, source: str, args: tuple | dict) ->
args = {k: v._translate(lang) if isinstance(v, LazyGettext) else v for k, v in args.items()}
else:
args = tuple(v._translate(lang) if isinstance(v, LazyGettext) else v for v in args)
if any(isinstance(a, Iterable) and not isinstance(a, str) for a in (args.values() if args_is_dict else args)):
# automatically format list-like arguments in a localized way
def process_translation_arg(v):
return format_list(env=None, lst=v, lang_code=lang) if isinstance(v, Iterable) and not isinstance(v, str) else v
if args_is_dict:
args = {k: process_translation_arg(v) for k, v in args.items()}
else:
args = tuple(process_translation_arg(v) for v in args)
# format
try:
return translation % args
@ -520,7 +472,8 @@ def get_translated_module(arg: str | int | typing.Any) -> str: # frame not repr
# just a quick lookup because `get_resource_from_path is slow compared to this`
return module_name.split('.')[2]
path = inspect.getfile(frame)
path_info = odoo.modules.get_resource_from_path(path)
from odoo.modules import get_resource_from_path # noqa: PLC0415
path_info = get_resource_from_path(path)
return path_info[0] if path_info else 'base'
@ -536,13 +489,8 @@ def _get_cr(frame):
return local_env.cr
if (cr := getattr(local_self, 'cr', None)) is not None:
return cr
try:
from odoo.http import request # noqa: PLC0415
request_env = request.env
if request_env is not None and (cr := request_env.cr) is not None:
return cr
except RuntimeError:
pass
if (req := odoo.http.request) and (env := req.env):
return env.cr
return None
@ -576,13 +524,8 @@ def _get_lang(frame, default_lang='') -> str:
# we found the env, in case we fail, just log in debug
log_level = logging.DEBUG
# get from request?
try:
from odoo.http import request # noqa: PLC0415
request_env = request.env
if request_env and (lang := request_env.lang):
return lang
except RuntimeError:
pass
if (req := odoo.http.request) and (env := req.env) and (lang := env.lang):
return lang
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the original uid, so the language may
@ -590,7 +533,8 @@ def _get_lang(frame, default_lang='') -> str:
cr = _get_cr(frame)
uid = _get_uid(frame)
if cr and uid:
env = odoo.api.Environment(cr, uid, {})
from odoo import api # noqa: PLC0415
env = api.Environment(cr, uid, {})
if lang := env['res.users'].context_get().get('lang'):
return lang
# fallback
@ -615,7 +559,7 @@ def _get_translation_source(stack_level: int, module: str = '', lang: str = '',
return module or 'base', 'en_US'
def get_text_alias(source: str, *args, **kwargs):
def get_text_alias(source: str, /, *args, **kwargs):
assert not (args and kwargs)
assert isinstance(source, str)
module, lang = _get_translation_source(1)
@ -647,7 +591,7 @@ class LazyGettext:
__slots__ = ('_args', '_default_lang', '_module', '_source')
def __init__(self, source, *args, _module='', _default_lang='', **kwargs):
def __init__(self, source, /, *args, _module='', _default_lang='', **kwargs):
assert not (args and kwargs)
assert isinstance(source, str)
self._source = source
@ -739,12 +683,26 @@ def unquote(str):
"""Returns unquoted PO term string, with special PO characters unescaped"""
return re_escaped_char.sub(_sub_replacement, str[1:-1])
def TranslationFileReader(source, fileformat='po'):
def parse_xmlid(xmlid: str, default_module: str) -> tuple[str, str]:
split_id = xmlid.split('.', maxsplit=1)
if len(split_id) == 1:
return default_module, split_id[0]
return split_id[0], split_id[1]
def translation_file_reader(source, fileformat='po', module=None):
""" Iterate over translation file to return Odoo translation entries """
if fileformat == 'csv':
if module is not None:
# if `module` is provided, we are reading a data file located in that module
return CSVDataFileReader(source, module)
return CSVFileReader(source)
if fileformat == 'po':
return PoFileReader(source)
if fileformat == 'xml':
assert module
return XMLDataFileReader(source, module)
_logger.info('Bad file format: %s', fileformat)
raise Exception(_('Bad file format: %s', fileformat))
@ -776,6 +734,72 @@ class CSVFileReader:
yield entry
class CSVDataFileReader:
def __init__(self, source, module: str):
"""Read the translations in CSV data file.
:param source: the input stream
:param module: the CSV file is considered as a data file possibly
containing terms translated with the `@` syntax
"""
_reader = codecs.getreader('utf-8')
self.module = module
self.model = os.path.splitext((os.path.basename(source.name)))[0].split('-')[0]
self.source = csv.DictReader(_reader(source), quotechar='"', delimiter=',')
self.prev_code_src = ""
def __iter__(self):
translated_fnames = sorted(
[fname.split('@', maxsplit=1) for fname in self.source.fieldnames or [] if '@' in fname],
key=lambda x: x[1], # Put fallback languages first
)
for entry in self.source:
for fname, lang in translated_fnames:
module, imd_name = parse_xmlid(entry['id'], self.module)
yield {
'type': 'model',
'imd_model': self.model,
'imd_name': imd_name,
'lang': lang,
'value': entry[f"{fname}@{lang}"],
'src': entry[fname],
'module': module,
'name': f"{self.model},{fname}",
}
class XMLDataFileReader:
def __init__(self, source, module: str):
try:
tree = etree.parse(source)
except etree.LxmlSyntaxError:
_logger.warning("Error parsing XML file %s", source)
tree = etree.fromstring('<data/>')
self.source = tree
self.module = module
def __iter__(self):
for record in self.source.xpath("//field[contains(@name, '@')]/.."):
vals = {field.attrib['name']: field.text for field in record.xpath("field")}
translated_fnames = sorted(
[fname.split('@', maxsplit=1) for fname in vals if '@' in fname],
key=lambda x: x[1], # Put fallback languages first
)
for fname, lang in translated_fnames:
module, imd_name = parse_xmlid(record.attrib['id'], self.module)
yield {
'type': 'model',
'imd_model': record.attrib['model'],
'imd_name': imd_name,
'lang': lang,
'value': vals[f"{fname}@{lang}"],
'src': vals[fname],
'module': module,
'name': f"{record.attrib['model']},{fname}",
}
class PoFileReader:
""" Iterate over po file to return Odoo translation entries """
def __init__(self, source):
@ -1002,17 +1026,25 @@ class TarFileWriter:
self.tar.close()
# Methods to export the translation file
def trans_export(lang, modules, buffer, format, cr):
reader = TranslationModuleReader(cr, modules=modules, lang=lang)
writer = TranslationFileWriter(buffer, fileformat=format, lang=lang)
writer.write_rows(reader)
# Methods to export the translation file
# pylint: disable=redefined-builtin
def trans_export_records(lang, model_name, ids, buffer, format, cr):
reader = TranslationRecordReader(cr, model_name, ids, lang=lang)
def trans_export(lang, modules, buffer, format, env):
reader = TranslationModuleReader(env.cr, modules=modules, lang=lang)
if not reader:
return False
writer = TranslationFileWriter(buffer, fileformat=format, lang=lang)
writer.write_rows(reader)
return True
def trans_export_records(lang, model_name, ids, buffer, format, env):
reader = TranslationRecordReader(env.cr, model_name, ids, lang=lang)
if not reader:
return False
writer = TranslationFileWriter(buffer, fileformat=format, lang=lang)
writer.write_rows(reader)
return True
def _push(callback, term, source_line):
@ -1037,7 +1069,7 @@ def _extract_translatable_qweb_terms(element, callback):
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not (el.tag == 'attribute' and el.get('name') not in TRANSLATED_ATTRS)
and not (el.tag == 'attribute' and not is_translatable_attrib(el.get('name')))
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
@ -1107,7 +1139,8 @@ def extract_spreadsheet_terms(fileobj, keywords, comment_tags, options):
data = json.load(fileobj)
for sheet in data.get('sheets', []):
for cell in sheet['cells'].values():
content = cell.get('content', '')
# 'cell' was an object in versions <saas-18.1
content = cell if isinstance(cell, str) else cell.get('content', '')
if content.startswith('='):
terms.update(extract_formula_terms(content))
else:
@ -1125,8 +1158,10 @@ def extract_spreadsheet_terms(fileobj, keywords, comment_tags, options):
terms.update(
axes.get('title', {}).get('text', '') for axes in figure['data']['axesDesign'].values()
)
if 'baselineDescr' in figure['data']:
terms.add(figure['data']['baselineDescr'])
if 'text' in (baselineDescr := figure['data'].get('baselineDescr', {})):
terms.add(baselineDescr['text'])
if 'text' in (keyDescr := figure['data'].get('keyDescr', {})):
terms.add(keyDescr['text'])
terms.update(global_filter['label'] for global_filter in data.get('globalFilters', []))
return (
(0, None, term, [])
@ -1141,9 +1176,13 @@ class TranslationReader:
def __init__(self, cr, lang=None):
self._cr = cr
self._lang = lang or 'en_US'
self.env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
from odoo import api # noqa: PLC0415
self.env = api.Environment(cr, api.SUPERUSER_ID, {})
self._to_translate = []
def __bool__(self):
return bool(self._to_translate)
def __iter__(self):
for module, source, name, res_id, ttype, comments, _record_id, value in self._to_translate:
yield (module, ttype, name, res_id, source, value, comments)
@ -1323,6 +1362,7 @@ class TranslationModuleReader(TranslationReader):
def __init__(self, cr, modules=None, lang=None):
super().__init__(cr, lang)
self._modules = modules or ['all']
import odoo.addons # noqa: PLC0415
self._path_list = [(path, True) for path in odoo.addons.__path__]
self._installed_modules = [
m['name']
@ -1334,6 +1374,14 @@ class TranslationModuleReader(TranslationReader):
def _export_translatable_records(self):
""" Export translations of all translated records having an external id """
modules = self._installed_modules if 'all' in self._modules else list(self._modules)
xml_defined = set()
for module in modules:
for filepath in get_datafile_translation_path(module):
fileformat = os.path.splitext(filepath)[-1][1:].lower()
with file_open(filepath, mode='rb') as source:
for entry in translation_file_reader(source, fileformat=fileformat, module=module):
xml_defined.add((entry['imd_model'], module, entry['imd_name']))
query = """SELECT min(name), model, res_id, module
FROM ir_model_data
@ -1341,16 +1389,13 @@ class TranslationModuleReader(TranslationReader):
GROUP BY model, res_id, module
ORDER BY module, model, min(name)"""
if 'all' not in self._modules:
query_param = list(self._modules)
else:
query_param = self._installed_modules
self._cr.execute(query, (query_param,))
self._cr.execute(query, (modules,))
records_per_model = defaultdict(dict)
for (xml_name, model, res_id, module) in self._cr.fetchall():
records_per_model[model][res_id] = ImdInfo(xml_name, model, res_id, module)
for (imd_name, model, res_id, module) in self._cr.fetchall():
if (model, module, imd_name) in xml_defined:
continue
records_per_model[model][res_id] = ImdInfo(imd_name, model, res_id, module)
for model, imd_per_id in records_per_model.items():
self._export_imdinfo(model, imd_per_id)
@ -1375,7 +1420,7 @@ class TranslationModuleReader(TranslationReader):
return module, fabsolutepath, frelativepath, display_path
return None, None, None, None
def _babel_extract_terms(self, fname, path, root, extract_method="python", trans_type='code',
def _babel_extract_terms(self, fname, path, root, extract_method='odoo.tools.babel:extract_python', trans_type='code',
extra_comments=None, extract_keywords={'_': None}):
module, fabsolutepath, _, display_path = self._verified_module_filepaths(fname, path, root)
@ -1384,7 +1429,7 @@ class TranslationModuleReader(TranslationReader):
extra_comments = extra_comments or []
src_file = file_open(fabsolutepath, 'rb')
options = {}
if extract_method == 'python':
if 'python' in extract_method:
options['encoding'] = 'UTF-8'
translations = code_translations.get_python_translations(module, self._lang)
else:
@ -1415,25 +1460,25 @@ class TranslationModuleReader(TranslationReader):
# Also scan these non-addon paths
for bin_path in ['osv', 'report', 'modules', 'service', 'tools']:
self._path_list.append((os.path.join(config['root_path'], bin_path), True))
self._path_list.append((os.path.join(config.root_path, bin_path), True))
# non-recursive scan for individual files in root directory but without
# scanning subdirectories that may contain addons
self._path_list.append((config['root_path'], False))
self._path_list.append((config.root_path, False))
_logger.debug("Scanning modules at paths: %s", self._path_list)
spreadsheet_files_regex = re.compile(r".*_dashboard(\.osheet)?\.json$")
for (path, recursive) in self._path_list:
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in os.walk(path, followlinks=True):
for root, _dummy, files in os.walk(path, followlinks=True):
for fname in fnmatch.filter(files, '*.py'):
self._babel_extract_terms(fname, path, root, 'python',
self._babel_extract_terms(fname, path, root, 'odoo.tools.babel:extract_python',
extra_comments=[PYTHON_TRANSLATION_COMMENT],
extract_keywords={'_': None, '_lt': None})
if fnmatch.fnmatch(root, '*/static/src*'):
# Javascript source files
for fname in fnmatch.filter(files, '*.js'):
self._babel_extract_terms(fname, path, root, 'javascript',
self._babel_extract_terms(fname, path, root, 'odoo.tools.babel:extract_javascript',
extra_comments=[JAVASCRIPT_TRANSLATION_COMMENT],
extract_keywords={'_t': None})
# QWeb template files
@ -1448,6 +1493,11 @@ class TranslationModuleReader(TranslationReader):
# due to topdown, first iteration is in first level
break
IrModuleModule = self.env['ir.module.module']
for module in self._modules:
for translation in IrModuleModule._extract_resource_attachment_translations(module, self._lang):
self._push_translation(*translation)
def DeepDefaultDict():
return defaultdict(DeepDefaultDict)
@ -1462,34 +1512,39 @@ class TranslationImporter:
def __init__(self, cr, verbose=True):
self.cr = cr
self.verbose = verbose
self.env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
from odoo import api # noqa: PLC0415
self.env = api.Environment(cr, api.SUPERUSER_ID, {})
# {model_name: {field_name: {xmlid: {lang: value}}}}
self.model_translations = DeepDefaultDict()
# {model_name: {field_name: {xmlid: {src: {lang: value}}}}}
self.model_terms_translations = DeepDefaultDict()
self.imported_langs = set()
def load_file(self, filepath, lang, xmlids=None):
def load_file(self, filepath, lang, xmlids=None, module=None):
""" Load translations from the given file path.
:param filepath: file path to open
:param lang: language code of the translations contained in the file;
the language must be present and activated in the database
:param xmlids: if given, only translations for records with xmlid in xmlids will be loaded
:param module: if given, the file will be interpreted as a data file containing translations
"""
with suppress(FileNotFoundError), file_open(filepath, mode='rb', env=self.env) as fileobj:
_logger.info('loading base translation file %s for language %s', filepath, lang)
if self.verbose:
_logger.info('loading base translation file %s for language %s', filepath, lang)
fileformat = os.path.splitext(filepath)[-1][1:].lower()
self.load(fileobj, fileformat, lang, xmlids=xmlids)
self.load(fileobj, fileformat, lang, xmlids=xmlids, module=module)
def load(self, fileobj, fileformat, lang, xmlids=None):
def load(self, fileobj, fileformat, lang, xmlids=None, module=None):
"""Load translations from the given file object.
:param fileobj: buffer open to a translation file
:param fileformat: format of the `fielobj` file, one of 'po' or 'csv'
:param fileformat: format of the `fielobj` file, one of 'po', 'csv', or 'xml'
:param lang: language code of the translations contained in `fileobj`;
the language must be present and activated in the database
:param xmlids: if given, only translations for records with xmlid in xmlids will be loaded
:param module: if given, the file will be interpreted as a data file containing translations
"""
if self.verbose:
_logger.info('loading translation file for language %s', lang)
@ -1498,7 +1553,7 @@ class TranslationImporter:
return None
try:
fileobj.seek(0)
reader = TranslationFileReader(fileobj, fileformat=fileformat)
reader = translation_file_reader(fileobj, fileformat=fileformat, module=module)
self._load(reader, lang, xmlids)
except IOError:
iso_lang = get_iso_codes(lang)
@ -1508,11 +1563,14 @@ class TranslationImporter:
def _load(self, reader, lang, xmlids=None):
if xmlids and not isinstance(xmlids, set):
xmlids = set(xmlids)
valid_langs = get_base_langs(lang)
for row in reader:
if not row.get('value') or not row.get('src'): # ignore empty translations
continue
if row.get('type') == 'code': # ignore code translations
continue
if row.get('lang', lang) not in valid_langs:
continue
model_name = row.get('imd_model')
module_name = row['module']
if model_name not in self.env:
@ -1526,8 +1584,10 @@ class TranslationImporter:
continue
if row.get('type') == 'model' and field.translate is True:
self.model_translations[model_name][field_name][xmlid][lang] = row['value']
self.imported_langs.add(lang)
elif row.get('type') == 'model_terms' and callable(field.translate):
self.model_terms_translations[model_name][field_name][xmlid][row['src']][lang] = row['value']
self.imported_langs.add(lang)
def save(self, overwrite=False, force_overwrite=False):
""" Save translations to the database.
@ -1553,7 +1613,7 @@ class TranslationImporter:
# field_name, {xmlid: {src: {lang: value}}}
for field_name, field_dictionary in model_dictionary.items():
field = fields.get(field_name)
for sub_xmlids in cr.split_for_in_conditions(field_dictionary.keys()):
for sub_xmlids in split_every(cr.IN_MAX, field_dictionary.keys()):
# [module_name, imd_name, module_name, imd_name, ...]
params = []
for xmlid in sub_xmlids:
@ -1617,7 +1677,7 @@ class TranslationImporter:
Model = env[model_name]
model_table = Model._table
for field_name, field_dictionary in model_dictionary.items():
for sub_field_dictionary in cr.split_for_in_conditions(field_dictionary.items()):
for sub_field_dictionary in split_every(cr.IN_MAX, field_dictionary.items()):
# [xmlid, translations, xmlid, translations, ...]
params = []
for xmlid, translations in sub_field_dictionary:
@ -1647,27 +1707,10 @@ class TranslationImporter:
_logger.info("translations are loaded successfully")
def trans_load(cr, filepath, lang, verbose=True, overwrite=False):
warnings.warn('The function trans_load is deprecated in favor of TranslationImporter', DeprecationWarning)
translation_importer = TranslationImporter(cr, verbose=verbose)
translation_importer.load_file(filepath, lang)
translation_importer.save(overwrite=overwrite)
def trans_load_data(cr, fileobj, fileformat, lang, verbose=True, overwrite=False):
warnings.warn('The function trans_load_data is deprecated in favor of TranslationImporter', DeprecationWarning)
translation_importer = TranslationImporter(cr, verbose=verbose)
translation_importer.load(fileobj, fileformat, lang)
translation_importer.save(overwrite=overwrite)
def get_locales(lang=None):
if lang is None:
lang = locale.getlocale()[0]
if os.name == 'nt':
lang = _LOCALE2WIN32.get(lang, lang)
def process(enc):
ln = locale._build_localename((lang, enc))
yield ln
@ -1710,27 +1753,46 @@ def load_language(cr, lang):
:param str lang: language ISO code with optional underscore (``_``) and
l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
"""
env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
from odoo import api # noqa: PLC0415
env = api.Environment(cr, api.SUPERUSER_ID, {})
lang_ids = env['res.lang'].with_context(active_test=False).search([('code', '=', lang)]).ids
installer = env['base.language.install'].create({'lang_ids': [(6, 0, lang_ids)]})
installer.lang_install()
def get_po_paths(module_name: str, lang: str, env: odoo.api.Environment | None = None):
lang_base = lang.split('_', 1)[0]
# Load the base as a fallback in case a translation is missing:
po_names = [lang_base, lang]
# Exception for Spanish locales: they have two bases, es and es_419:
if lang_base == 'es' and lang not in ('es_ES', 'es_419'):
po_names.insert(1, 'es_419')
def get_base_langs(lang: str) -> list[str]:
base_lang = lang.split('_', 1)[0]
langs = [base_lang]
# LAC (~non-peninsular) spanishes have a second base
if base_lang == 'es' and lang not in ('es_ES', 'es_419'):
langs.append('es_419')
# HK Chinese ~ Taiwan Chinese
if lang == 'zh_HK':
langs.append('zh_TW')
if lang != base_lang:
langs.append(lang)
return langs
def get_po_paths(module_name: str, lang: str) -> Iterator[str]:
po_paths = (
join(module_name, dir_, filename + '.po')
for filename in OrderedSet(po_names)
for filename in get_base_langs(lang)
for dir_ in ('i18n', 'i18n_extra')
)
for path in po_paths:
with suppress(FileNotFoundError):
yield file_path(path, env=env)
yield file_path(path)
def get_datafile_translation_path(module_name: str) -> Iterator[str]:
from odoo.modules import Manifest # noqa: PLC0415
# if we are importing a module, we have an env, hide warnings
manifest = Manifest.for_addon(module_name, display_warning=False) or {}
for data_type in ('data', 'demo'):
for path in manifest.get(data_type, ()):
if path.endswith(('.xml', '.csv')):
yield file_path(join(module_name, path))
class CodeTranslations:
@ -1750,7 +1812,7 @@ class CodeTranslations:
# don't use it in the import logic
translations = {}
fileobj.seek(0)
reader = TranslationFileReader(fileobj, fileformat='po')
reader = translation_file_reader(fileobj, fileformat='po')
for row in reader:
if row.get('type') == 'code' and row.get('src') and filter_func(row):
translations[row['src']] = row['value']

View file

@ -0,0 +1,75 @@
import re
import urllib.parse
from urllib.parse import _WHATWG_C0_CONTROL_OR_SPACE
__all__ = ['urljoin']
def _contains_dot_segments(path: str) -> str:
# most servers decode url before doing dot segment resolutions
decoded_path = urllib.parse.unquote(path, errors='strict')
return any(seg in ('.', '..') for seg in decoded_path.split('/'))
def urljoin(base: str, extra: str) -> str:
"""Join a trusted base URL with a relative URL safely.
Unlike standard URL joins that follow RFC 3986 (e.g., `urllib.parse.urljoin`),
this function enforces strict behavior that better aligns with developer
expectations and guards against path traversals, unplanned redirects, and
accidental host/scheme overrides.
- Behaves similarly to `base + '/' + extra`
- Keeps scheme and netloc from `base`, and raises an error if `extra` has them
- Ignores any scheme/host in `extra`
- Forbids `.` and `..` path traversal
- merges path/query/fragment
:param base: Trusted base URL or path.
:type base: str
:param extra: Relative URL (`path`, `?query`, `#frag`). No scheme & host allowed unless it matches `base`
:type extra: str
:returns: joined URL.
:rtype: str
:raises AssertionError: If inputs are not strings.
:raises ValueError: `extra` contains dot-segments or is absolute URLs.
Examples::
>>> urljoin('https://api.example.com/v1/?bar=fiz', '/users/42?bar=bob')
'https://api.example.com/v1/users/42?bar=bob'
>>> urljoin('https://example.com/foo', 'http://8.8.8.8/foo')
Traceback (most recent call last):
...
ValueError: Extra URL must use same scheme and host as base, and begin with base path
>>> urljoin('https://api.example.com/data/', '/?lang=fr')
'https://api.example.com/data/?lang=fr'
"""
assert isinstance(base, str), "Base URL must be a string"
assert isinstance(extra, str), "Extra URL must be a string"
b_scheme, b_netloc, path, _, _ = urllib.parse.urlsplit(base)
e_scheme, e_netloc, e_path, e_query, e_fragment = urllib.parse.urlsplit(extra)
if e_scheme or e_netloc:
# allow absolute extra URL if it matches base
if (e_scheme != b_scheme) or (e_netloc != b_netloc) or not e_path.startswith(path):
raise ValueError("Extra URL must use same scheme and host as base, and begin with base path")
e_path = e_path[len(path):]
if e_path:
# prevent urljoin("/", "\\example.com/") to resolve as absolute to "//example.com/" in a browser redirect
# https://github.com/mozilla-firefox/firefox/blob/5e81b64f4ed88b610eb332e103744d68ee8b6c0d/netwerk/base/nsStandardURL.cpp#L2386-L2388
e_path = e_path.lstrip('/\\' + _WHATWG_C0_CONTROL_OR_SPACE)
path = f'{path}/{e_path}'
# normalize: foo//bar -> foo/bar
path = re.sub(r'/+', '/', path)
if _contains_dot_segments(path):
raise ValueError("Dot segments are not allowed")
return urllib.parse.urlunsplit((b_scheme, b_netloc, path, e_query, e_fragment))

View file

@ -6,9 +6,9 @@ import logging
import os
import re
import odoo.orm.domains as domains
from lxml import etree
from odoo import tools
from odoo.osv.expression import DOMAIN_OPERATORS
_logger = logging.getLogger(__name__)
@ -41,6 +41,11 @@ IGNORED_IN_EXPRESSION = {
'unicode',
'set',
}
DOMAIN_OPERATORS = {
domains.DomainNot.OPERATOR,
domains.DomainAnd.OPERATOR,
domains.DomainOr.OPERATOR,
}
def get_domain_value_names(domain):

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
@ -39,7 +39,7 @@
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
__all__ = ['F_OK', 'R_OK', 'W_OK', 'X_OK', 'defpath', 'defpathext', 'dirname', 'pathsep', 'which', 'which_files']
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
@ -70,13 +70,15 @@ def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
... assert all(path in result for path in expected) if expected else not result, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... result = which(*args, **argd)
... path = expected[0]
... assert split(result)[1] == split(expected[0])[1], 'which: %s not same binary %s' % (result, expected)
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
... result = None
... assert not expected, 'which: expecting %s' % expected
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')

View file

@ -1,30 +0,0 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import locale
import time
import datetime
if not hasattr(locale, 'D_FMT'):
locale.D_FMT = 1
if not hasattr(locale, 'T_FMT'):
locale.T_FMT = 2
if not hasattr(locale, 'nl_langinfo'):
def nl_langinfo(param):
if param == locale.D_FMT:
val = time.strptime('30/12/2004', '%d/%m/%Y')
dt = datetime.datetime(*val[:-2])
format_date = dt.strftime('%x')
for x, y in [('30', '%d'),('12', '%m'),('2004','%Y'),('04', '%Y')]:
format_date = format_date.replace(x, y)
return format_date
if param == locale.T_FMT:
val = time.strptime('13:24:56', '%H:%M:%S')
dt = datetime.datetime(*val[:-2])
format_time = dt.strftime('%X')
for x, y in [('13', '%H'),('24', '%M'),('56','%S')]:
format_time = format_time.replace(x, y)
return format_time
locale.nl_langinfo = nl_langinfo

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""Utilities for generating, parsing and checking XML/XSD files on top of the lxml.etree module."""
import base64
@ -8,7 +7,6 @@ import re
import zipfile
from io import BytesIO
import requests
from lxml import etree
from odoo.exceptions import UserError
@ -232,6 +230,7 @@ def load_xsd_files_from_url(env, url, file_name=None, force_reload=False,
:rtype: odoo.api.ir.attachment | bool
:return: every XSD attachment created/fetched or False if an error occurred (see warning logs)
"""
import requests # noqa: PLC0415
try:
_logger.info("Fetching file/archive from given URL: %s", url)
response = requests.get(url, timeout=request_max_timeout)