18.0 vanilla

This commit is contained in:
Ernad Husremovic 2025-10-03 18:06:50 +02:00
parent d72e748793
commit 0a7ae8db93
337 changed files with 399651 additions and 232598 deletions

View file

@ -1,34 +1,32 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import _monkeypatches
from . import _monkeypatches_pytz
from werkzeug import urls
if not hasattr(urls, 'url_join'):
# see https://github.com/pallets/werkzeug/compare/2.3.0..3.0.0
# see https://github.com/pallets/werkzeug/blob/2.3.0/src/werkzeug/urls.py for replacement
from . import _monkeypatches_urls
from . import appdirs
from . import arabic_reshaper
from . import cloc
from . import constants
from . import pdf
from . import pycompat
from . import template_inheritance
from . import win32
from .barcode import *
from .parse_version import parse_version
from .barcode import check_barcode_encoding
from .cache import ormcache, ormcache_context
from .config import config
from .date_utils import *
from .float_utils import *
from .func import *
from .image import *
from .i18n import format_list, py_to_js_locale
from .image import image_process
from .json import json_default
from .mail import *
from .misc import *
from .query import Query, _generate_table_alias
from .query import Query
from .sql import *
from .template_inheritance import *
from .translate import *
from .xml_utils import *
from .convert import *
from .translate import _, html_translate, xml_translate, LazyTranslate
from .xml_utils import cleanup_xml_node, load_xsd_files_from_url, validate_xml_from_attachment
from .convert import convert_csv_import, convert_file, convert_sql_import, convert_xml_import
from . import osutil
from .js_transpiler import transpile_javascript, is_odoo_module, URL_RE, ODOO_MODULE_RE
from .sourcemap_generator import SourceMapGenerator
from .set_expression import SetDefinitions

View file

@ -1,167 +0,0 @@
import ast
import os
import logging
from email._policybase import _PolicyBase
from odoo import MIN_PY_VERSION
from shutil import copyfileobj
from types import CodeType
_logger = logging.getLogger(__name__)
try:
import num2words
from .num2words_patch import Num2Word_AR_Fixed
except ImportError:
_logger.warning("num2words is not available, Arabic number to words conversion will not work")
num2words = None
from urllib3 import PoolManager
from werkzeug.datastructures import FileStorage, MultiDict
from werkzeug.routing import Rule
from werkzeug.wrappers import Request, Response
from .json import scriptsafe
try:
from stdnum import util
except ImportError:
util = None
try:
from xlrd import xlsx
except ImportError:
pass
else:
from lxml import etree
# xlrd.xlsx supports defusedxml, defusedxml's etree interface is broken
# (missing ElementTree and thus ElementTree.iter) which causes a fallback to
# Element.getiterator(), triggering a warning before 3.9 and an error from 3.9.
#
# We have defusedxml installed because zeep has a hard dep on defused and
# doesn't want to drop it (mvantellingen/python-zeep#1014).
#
# Ignore the check and set the relevant flags directly using lxml as we have a
# hard dependency on it.
xlsx.ET = etree
xlsx.ET_has_iterparse = True
xlsx.Element_has_iter = True
FileStorage.save = lambda self, dst, buffer_size=1<<20: copyfileobj(self.stream, dst, buffer_size)
def _multidict_deepcopy(self, memo=None):
return orig_deepcopy(self)
orig_deepcopy = MultiDict.deepcopy
MultiDict.deepcopy = _multidict_deepcopy
Request.json_module = Response.json_module = scriptsafe
get_func_code = getattr(Rule, '_get_func_code', None)
if get_func_code:
@staticmethod
def _get_func_code(code, name):
assert isinstance(code, CodeType)
return get_func_code(code, name)
Rule._get_func_code = _get_func_code
orig_literal_eval = ast.literal_eval
def literal_eval(expr):
# limit the size of the expression to avoid segmentation faults
# the default limit is set to 100KiB
# can be overridden by setting the ODOO_LIMIT_LITEVAL_BUFFER buffer_size_environment variable
buffer_size = 102400
buffer_size_env = os.getenv("ODOO_LIMIT_LITEVAL_BUFFER")
if buffer_size_env:
if buffer_size_env.isdigit():
buffer_size = int(buffer_size_env)
else:
_logger.error("ODOO_LIMIT_LITEVAL_BUFFER has to be an integer, defaulting to 100KiB")
if isinstance(expr, str) and len(expr) > buffer_size:
raise ValueError("expression can't exceed buffer limit")
return orig_literal_eval(expr)
ast.literal_eval = literal_eval
if MIN_PY_VERSION >= (3, 12):
raise RuntimeError("The num2words monkey patch is obsolete. Bump the version of the library to the latest available in the official package repository, if it hasn't already been done, and remove the patch.")
if num2words:
num2words.CONVERTER_CLASSES["ar"] = Num2Word_AR_Fixed()
_soap_clients = {}
def new_get_soap_client(wsdlurl, timeout=30):
# stdnum library does not set the timeout for the zeep Transport class correctly
# (timeout is to fetch the wsdl and operation_timeout is to perform the call),
# requiring us to monkey patch the get_soap_client function.
# Can be removed when https://github.com/arthurdejong/python-stdnum/issues/444 is
# resolved and the version of the dependency is updated.
# The code is a copy of the original apart for the line related to the Transport class.
# This was done to keep the code as similar to the original and to reduce the possibility
# of introducing import errors, even though some imports are not in the requirements.
# See https://github.com/odoo/odoo/pull/173359 for a more thorough explanation.
if (wsdlurl, timeout) not in _soap_clients:
try:
from zeep.transports import Transport
transport = Transport(operation_timeout=timeout, timeout=timeout) # operational_timeout added here
from zeep import CachingClient
client = CachingClient(wsdlurl, transport=transport).service
except ImportError:
# fall back to non-caching zeep client
try:
from zeep import Client
client = Client(wsdlurl, transport=transport).service
except ImportError:
# other implementations require passing the proxy config
try:
from urllib import getproxies
except ImportError:
from urllib.request import getproxies
# fall back to suds
try:
from suds.client import Client
client = Client(
wsdlurl, proxy=getproxies(), timeout=timeout).service
except ImportError:
# use pysimplesoap as last resort
try:
from pysimplesoap.client import SoapClient
client = SoapClient(
wsdl=wsdlurl, proxy=getproxies(), timeout=timeout)
except ImportError:
raise ImportError(
'No SOAP library (such as zeep) found')
_soap_clients[(wsdlurl, timeout)] = client
return _soap_clients[(wsdlurl, timeout)]
if util:
util.get_soap_client = new_get_soap_client
def pool_init(self, *args, **kwargs):
orig_pool_init(self, *args, **kwargs)
self.pool_classes_by_scheme = {**self.pool_classes_by_scheme}
orig_pool_init = PoolManager.__init__
PoolManager.__init__ = pool_init
def policy_clone(self, **kwargs):
for arg in kwargs:
if arg.startswith("_") or "__" in arg:
raise AttributeError(f"{self.__class__.__name__!r} object has no attribute {arg!r}")
return orig_policy_clone(self, **kwargs)
orig_policy_clone = _PolicyBase.clone
_PolicyBase.clone = policy_clone

View file

@ -1,132 +0,0 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
In ubuntu noble, some timezone where removed leading to errors when trying to assign/access them.
This was partially fixed in the code by removing all references to old timezones but one issue remains:
if a database contains timezones that are not defined in the os, the resolution will fail and break
at runtime.
This patches proposes to alter timezone to fallback on the new canonical timezone if the timezone was removed.
This list was generated by checking all symlink in /usr/share/zoneinfo in ubuntu 22.04 that disapeared in ubuntu 24.04
This solutions will work when moving a database from one server to another, even without migration.
This list could be improved for other purposes.
"""
import pytz
_tz_mapping = {
"Africa/Asmera": "Africa/Nairobi",
"America/Argentina/ComodRivadavia": "America/Argentina/Catamarca",
"America/Buenos_Aires": "America/Argentina/Buenos_Aires",
"America/Cordoba": "America/Argentina/Cordoba",
"America/Fort_Wayne": "America/Indiana/Indianapolis",
"America/Indianapolis": "America/Indiana/Indianapolis",
"America/Jujuy": "America/Argentina/Jujuy",
"America/Knox_IN": "America/Indiana/Knox",
"America/Louisville": "America/Kentucky/Louisville",
"America/Mendoza": "America/Argentina/Mendoza",
"America/Rosario": "America/Argentina/Cordoba",
"Antarctica/South_Pole": "Pacific/Auckland",
"Asia/Ashkhabad": "Asia/Ashgabat",
"Asia/Calcutta": "Asia/Kolkata",
"Asia/Chungking": "Asia/Shanghai",
"Asia/Dacca": "Asia/Dhaka",
"Asia/Katmandu": "Asia/Kathmandu",
"Asia/Macao": "Asia/Macau",
"Asia/Rangoon": "Asia/Yangon",
"Asia/Saigon": "Asia/Ho_Chi_Minh",
"Asia/Thimbu": "Asia/Thimphu",
"Asia/Ujung_Pandang": "Asia/Makassar",
"Asia/Ulan_Bator": "Asia/Ulaanbaatar",
"Atlantic/Faeroe": "Atlantic/Faroe",
"Australia/ACT": "Australia/Sydney",
"Australia/LHI": "Australia/Lord_Howe",
"Australia/North": "Australia/Darwin",
"Australia/NSW": "Australia/Sydney",
"Australia/Queensland": "Australia/Brisbane",
"Australia/South": "Australia/Adelaide",
"Australia/Tasmania": "Australia/Hobart",
"Australia/Victoria": "Australia/Melbourne",
"Australia/West": "Australia/Perth",
"Brazil/Acre": "America/Rio_Branco",
"Brazil/DeNoronha": "America/Noronha",
"Brazil/East": "America/Sao_Paulo",
"Brazil/West": "America/Manaus",
"Canada/Atlantic": "America/Halifax",
"Canada/Central": "America/Winnipeg",
"Canada/Eastern": "America/Toronto",
"Canada/Mountain": "America/Edmonton",
"Canada/Newfoundland": "America/St_Johns",
"Canada/Pacific": "America/Vancouver",
"Canada/Saskatchewan": "America/Regina",
"Canada/Yukon": "America/Whitehorse",
"Chile/Continental": "America/Santiago",
"Chile/EasterIsland": "Pacific/Easter",
"Cuba": "America/Havana",
"Egypt": "Africa/Cairo",
"Eire": "Europe/Dublin",
"Europe/Kiev": "Europe/Kyiv",
"Europe/Uzhgorod": "Europe/Kyiv",
"Europe/Zaporozhye": "Europe/Kyiv",
"GB": "Europe/London",
"GB-Eire": "Europe/London",
"GMT+0": "Etc/GMT",
"GMT-0": "Etc/GMT",
"GMT0": "Etc/GMT",
"Greenwich": "Etc/GMT",
"Hongkong": "Asia/Hong_Kong",
"Iceland": "Africa/Abidjan",
"Iran": "Asia/Tehran",
"Israel": "Asia/Jerusalem",
"Jamaica": "America/Jamaica",
"Japan": "Asia/Tokyo",
"Kwajalein": "Pacific/Kwajalein",
"Libya": "Africa/Tripoli",
"Mexico/BajaNorte": "America/Tijuana",
"Mexico/BajaSur": "America/Mazatlan",
"Mexico/General": "America/Mexico_City",
"Navajo": "America/Denver",
"NZ": "Pacific/Auckland",
"NZ-CHAT": "Pacific/Chatham",
"Pacific/Enderbury": "Pacific/Kanton",
"Pacific/Ponape": "Pacific/Guadalcanal",
"Pacific/Truk": "Pacific/Port_Moresby",
"Poland": "Europe/Warsaw",
"Portugal": "Europe/Lisbon",
"PRC": "Asia/Shanghai",
"ROC": "Asia/Taipei",
"ROK": "Asia/Seoul",
"Singapore": "Asia/Singapore",
"Türkiye": "Europe/Istanbul",
"UCT": "Etc/UTC",
"Universal": "Etc/UTC",
"US/Alaska": "America/Anchorage",
"US/Aleutian": "America/Adak",
"US/Arizona": "America/Phoenix",
"US/Central": "America/Chicago",
"US/Eastern": "America/New_York",
"US/East-Indiana": "America/Indiana/Indianapolis",
"US/Hawaii": "Pacific/Honolulu",
"US/Indiana-Starke": "America/Indiana/Knox",
"US/Michigan": "America/Detroit",
"US/Mountain": "America/Denver",
"US/Pacific": "America/Los_Angeles",
"US/Samoa": "Pacific/Pago_Pago",
"W-SU": "Europe/Moscow",
"Zulu": "Etc/UTC",
}
original_pytz_timezone = pytz.timezone
def timezone(name):
if name not in pytz.all_timezones_set and name in _tz_mapping:
name = _tz_mapping[name]
return original_pytz_timezone(name)
pytz.timezone = timezone

File diff suppressed because it is too large Load diff

View file

@ -18,10 +18,10 @@ r"""
import logging
import os
import re
import json
import tempfile
from hashlib import sha1
from os import path, replace as rename
from odoo.tools.misc import pickle
from time import time
from werkzeug.datastructures import CallbackDict
@ -191,9 +191,9 @@ class FilesystemSessionStore(SessionStore):
def save(self, session):
fn = self.get_session_filename(session.sid)
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path)
f = os.fdopen(fd, "wb")
f = os.fdopen(fd, "w", encoding="utf-8")
try:
pickle.dump(dict(session), f, pickle.HIGHEST_PROTOCOL)
json.dump(dict(session), f)
finally:
f.close()
try:
@ -213,7 +213,7 @@ class FilesystemSessionStore(SessionStore):
if not self.is_valid_key(sid):
return self.new()
try:
f = open(self.get_session_filename(sid), "rb")
f = open(self.get_session_filename(sid), "r", encoding="utf-8")
except IOError:
_logger.debug('Could not load session from disk. Use empty session.', exc_info=True)
if self.renew_missing:
@ -222,7 +222,7 @@ class FilesystemSessionStore(SessionStore):
else:
try:
try:
data = pickle.load(f, errors={})
data = json.load(f)
except Exception:
_logger.debug('Could not load session data. Use empty session.', exc_info=True)
data = {}

View file

@ -0,0 +1,143 @@
# The following code was copied from the original author's repository
# at https://github.com/mpcabd/python-arabic-reshaper/tree/v3.0.0/arabic_reshaper
# Version: 3.0.0
# This work is licensed under the MIT License.
# To view a copy of this license, visit https://opensource.org/licenses/MIT
# Written by Abdullah Diab (mpcabd)
# Email: mpcabd@gmail.com
# Website: http://mpcabd.xyz
#
# This code was simplified by removing configuration (keeping only the default
# configuration) then constant-folding all the configuration items by hand.
import re
from itertools import repeat
from .letters import (UNSHAPED, ISOLATED, TATWEEL, ZWJ, LETTERS_ARABIC, FINAL,
INITIAL, MEDIAL, connects_with_letters_before_and_after,
connects_with_letter_before, connects_with_letter_after)
__all__ = ['reshape']
HARAKAT_RE = re.compile(
'['
'\u0610-\u061a'
'\u064b-\u065f'
'\u0670'
'\u06d6-\u06dc'
'\u06df-\u06e8'
'\u06ea-\u06ed'
'\u08d4-\u08e1'
'\u08d4-\u08ed'
'\u08e3-\u08ff'
']',
re.UNICODE | re.VERBOSE
)
LIGATURES_RE = re.compile("""
\u0627\u0644\u0644\u0647 # ARABIC LIGATURE ALLAH
| \u0644\u0627 # ARABIC LIGATURE LAM WITH ALEF
| \u0644\u0623 # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE
| \u0644\u0625 # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW
| \u0644\u0622 # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE
""", re.UNICODE | re.VERBOSE)
GROUP_INDEX_TO_LIGATURE_FORMs = [
('\N{ARABIC LIGATURE ALLAH ISOLATED FORM}', '', '', ''),
('\N{ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM}', '', '', '\N{ARABIC LIGATURE LAM WITH ALEF FINAL FORM}'),
('\N{ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM}', '', '', '\N{ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM}'),
('\N{ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW ISOLATED FORM}', '', '', '\N{ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW FINAL FORM}'),
('\N{ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM}', '', '', '\N{ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM}'),
]
def reshape(text):
if not text:
return ''
output = []
LETTER = 0
FORM = 1
NOT_SUPPORTED = -1
for letter in text:
if HARAKAT_RE.match(letter):
pass
elif letter not in LETTERS_ARABIC:
output.append((letter, NOT_SUPPORTED))
elif not output: # first letter
output.append((letter, ISOLATED))
else:
previous_letter = output[-1]
if (
previous_letter[FORM] == NOT_SUPPORTED or
not connects_with_letter_before(letter, LETTERS_ARABIC) or
not connects_with_letter_after(previous_letter[LETTER], LETTERS_ARABIC) or
(previous_letter[FORM] == FINAL and not connects_with_letters_before_and_after(previous_letter[LETTER], LETTERS_ARABIC))
):
output.append((letter, ISOLATED))
elif previous_letter[FORM] == ISOLATED:
output[-1] = (previous_letter[LETTER], INITIAL)
output.append((letter, FINAL))
# Otherwise, we will change the previous letter to connect
# to the current letter
else:
output[-1] = (previous_letter[LETTER], MEDIAL)
output.append((letter, FINAL))
# Remove ZWJ if it's the second to last item as it won't be useful
if len(output) > 1 and output[-2][LETTER] == ZWJ:
output.pop(len(output) - 2)
if output and output[-1][LETTER] == ZWJ:
output.pop()
# Clean text from Harakat to be able to find ligatures
text = HARAKAT_RE.sub('', text)
for match in LIGATURES_RE.finditer(text):
group_index = next((
i for i, group in enumerate(match.groups()) if group
), -1)
forms = GROUP_INDEX_TO_LIGATURE_FORMs[group_index]
a, b = match.span()
a_form = output[a][FORM]
b_form = output[b - 1][FORM]
# +-----------+----------+---------+---------+----------+
# | a \ b | ISOLATED | INITIAL | MEDIAL | FINAL |
# +-----------+----------+---------+---------+----------+
# | ISOLATED | ISOLATED | INITIAL | INITIAL | ISOLATED |
# | INITIAL | ISOLATED | INITIAL | INITIAL | ISOLATED |
# | MEDIAL | FINAL | MEDIAL | MEDIAL | FINAL |
# | FINAL | FINAL | MEDIAL | MEDIAL | FINAL |
# +-----------+----------+---------+---------+----------+
if a_form in (ISOLATED, INITIAL):
if b_form in (ISOLATED, FINAL):
ligature_form = ISOLATED
else:
ligature_form = INITIAL
else:
if b_form in (ISOLATED, FINAL):
ligature_form = FINAL
else:
ligature_form = MEDIAL
if not forms[ligature_form]:
continue
output[a] = (forms[ligature_form], NOT_SUPPORTED)
output[a + 1:b] = repeat(('', NOT_SUPPORTED), b - 1 - a)
result = []
for o in output:
if o[LETTER]:
if o[FORM] == NOT_SUPPORTED or o[FORM] == UNSHAPED:
result.append(o[LETTER])
else:
result.append(LETTERS_ARABIC[o[LETTER]][o[FORM]])
return ''.join(result)

View file

@ -0,0 +1,539 @@
# The following code was copied from the original author's repository
# at https://github.com/mpcabd/python-arabic-reshaper/tree/v3.0.0/arabic_reshaper
# Version: 3.0.0
# This work is licensed under the MIT License.
# To view a copy of this license, visit https://opensource.org/licenses/MIT
# Written by Abdullah Diab (mpcabd)
# Email: mpcabd@gmail.com
# Website: http://mpcabd.xyz
# Each letter is of the format:
#
# ('<letter>', <replacement>)
#
# And replacement is of the format:
#
# ('<isolated>', '<initial>', '<medial>', '<final>')
#
# Where <letter> is the string to replace, and <isolated> is the replacement in
# case <letter> should be in isolated form, <initial> is the replacement in
# case <letter> should be in initial form, <medial> is the replacement in case
# <letter> should be in medial form, and <final> is the replacement in case
# <letter> should be in final form. If no replacement is specified for a form,
# then no that means the letter doesn't support this form.
UNSHAPED = 255
ISOLATED = 0
INITIAL = 1
MEDIAL = 2
FINAL = 3
TATWEEL = '\u0640'
ZWJ = '\u200D'
LETTERS_ARABIC = {
# ARABIC LETTER HAMZA
'\u0621': ('\uFE80', '', '', ''),
# ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0622': ('\uFE81', '', '', '\uFE82'),
# ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0623': ('\uFE83', '', '', '\uFE84'),
# ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0624': ('\uFE85', '', '', '\uFE86'),
# ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0625': ('\uFE87', '', '', '\uFE88'),
# ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0626': ('\uFE89', '\uFE8B', '\uFE8C', '\uFE8A'),
# ARABIC LETTER ALEF
'\u0627': ('\uFE8D', '', '', '\uFE8E'),
# ARABIC LETTER BEH
'\u0628': ('\uFE8F', '\uFE91', '\uFE92', '\uFE90'),
# ARABIC LETTER TEH MARBUTA
'\u0629': ('\uFE93', '', '', '\uFE94'),
# ARABIC LETTER TEH
'\u062A': ('\uFE95', '\uFE97', '\uFE98', '\uFE96'),
# ARABIC LETTER THEH
'\u062B': ('\uFE99', '\uFE9B', '\uFE9C', '\uFE9A'),
# ARABIC LETTER JEEM
'\u062C': ('\uFE9D', '\uFE9F', '\uFEA0', '\uFE9E'),
# ARABIC LETTER HAH
'\u062D': ('\uFEA1', '\uFEA3', '\uFEA4', '\uFEA2'),
# ARABIC LETTER KHAH
'\u062E': ('\uFEA5', '\uFEA7', '\uFEA8', '\uFEA6'),
# ARABIC LETTER DAL
'\u062F': ('\uFEA9', '', '', '\uFEAA'),
# ARABIC LETTER THAL
'\u0630': ('\uFEAB', '', '', '\uFEAC'),
# ARABIC LETTER REH
'\u0631': ('\uFEAD', '', '', '\uFEAE'),
# ARABIC LETTER ZAIN
'\u0632': ('\uFEAF', '', '', '\uFEB0'),
# ARABIC LETTER SEEN
'\u0633': ('\uFEB1', '\uFEB3', '\uFEB4', '\uFEB2'),
# ARABIC LETTER SHEEN
'\u0634': ('\uFEB5', '\uFEB7', '\uFEB8', '\uFEB6'),
# ARABIC LETTER SAD
'\u0635': ('\uFEB9', '\uFEBB', '\uFEBC', '\uFEBA'),
# ARABIC LETTER DAD
'\u0636': ('\uFEBD', '\uFEBF', '\uFEC0', '\uFEBE'),
# ARABIC LETTER TAH
'\u0637': ('\uFEC1', '\uFEC3', '\uFEC4', '\uFEC2'),
# ARABIC LETTER ZAH
'\u0638': ('\uFEC5', '\uFEC7', '\uFEC8', '\uFEC6'),
# ARABIC LETTER AIN
'\u0639': ('\uFEC9', '\uFECB', '\uFECC', '\uFECA'),
# ARABIC LETTER GHAIN
'\u063A': ('\uFECD', '\uFECF', '\uFED0', '\uFECE'),
# ARABIC TATWEEL
TATWEEL: (TATWEEL, TATWEEL, TATWEEL, TATWEEL),
# ARABIC LETTER FEH
'\u0641': ('\uFED1', '\uFED3', '\uFED4', '\uFED2'),
# ARABIC LETTER QAF
'\u0642': ('\uFED5', '\uFED7', '\uFED8', '\uFED6'),
# ARABIC LETTER KAF
'\u0643': ('\uFED9', '\uFEDB', '\uFEDC', '\uFEDA'),
# ARABIC LETTER LAM
'\u0644': ('\uFEDD', '\uFEDF', '\uFEE0', '\uFEDE'),
# ARABIC LETTER MEEM
'\u0645': ('\uFEE1', '\uFEE3', '\uFEE4', '\uFEE2'),
# ARABIC LETTER NOON
'\u0646': ('\uFEE5', '\uFEE7', '\uFEE8', '\uFEE6'),
# ARABIC LETTER HEH
'\u0647': ('\uFEE9', '\uFEEB', '\uFEEC', '\uFEEA'),
# ARABIC LETTER WAW
'\u0648': ('\uFEED', '', '', '\uFEEE'),
# ARABIC LETTER (UIGHUR KAZAKH KIRGHIZ)? ALEF MAKSURA
'\u0649': ('\uFEEF', '\uFBE8', '\uFBE9', '\uFEF0'),
# ARABIC LETTER YEH
'\u064A': ('\uFEF1', '\uFEF3', '\uFEF4', '\uFEF2'),
# ARABIC LETTER ALEF WASLA
'\u0671': ('\uFB50', '', '', '\uFB51'),
# ARABIC LETTER U WITH HAMZA ABOVE
'\u0677': ('\uFBDD', '', '', ''),
# ARABIC LETTER TTEH
'\u0679': ('\uFB66', '\uFB68', '\uFB69', '\uFB67'),
# ARABIC LETTER TTEHEH
'\u067A': ('\uFB5E', '\uFB60', '\uFB61', '\uFB5F'),
# ARABIC LETTER BEEH
'\u067B': ('\uFB52', '\uFB54', '\uFB55', '\uFB53'),
# ARABIC LETTER PEH
'\u067E': ('\uFB56', '\uFB58', '\uFB59', '\uFB57'),
# ARABIC LETTER TEHEH
'\u067F': ('\uFB62', '\uFB64', '\uFB65', '\uFB63'),
# ARABIC LETTER BEHEH
'\u0680': ('\uFB5A', '\uFB5C', '\uFB5D', '\uFB5B'),
# ARABIC LETTER NYEH
'\u0683': ('\uFB76', '\uFB78', '\uFB79', '\uFB77'),
# ARABIC LETTER DYEH
'\u0684': ('\uFB72', '\uFB74', '\uFB75', '\uFB73'),
# ARABIC LETTER TCHEH
'\u0686': ('\uFB7A', '\uFB7C', '\uFB7D', '\uFB7B'),
# ARABIC LETTER TCHEHEH
'\u0687': ('\uFB7E', '\uFB80', '\uFB81', '\uFB7F'),
# ARABIC LETTER DDAL
'\u0688': ('\uFB88', '', '', '\uFB89'),
# ARABIC LETTER DAHAL
'\u068C': ('\uFB84', '', '', '\uFB85'),
# ARABIC LETTER DDAHAL
'\u068D': ('\uFB82', '', '', '\uFB83'),
# ARABIC LETTER DUL
'\u068E': ('\uFB86', '', '', '\uFB87'),
# ARABIC LETTER RREH
'\u0691': ('\uFB8C', '', '', '\uFB8D'),
# ARABIC LETTER JEH
'\u0698': ('\uFB8A', '', '', '\uFB8B'),
# ARABIC LETTER VEH
'\u06A4': ('\uFB6A', '\uFB6C', '\uFB6D', '\uFB6B'),
# ARABIC LETTER PEHEH
'\u06A6': ('\uFB6E', '\uFB70', '\uFB71', '\uFB6F'),
# ARABIC LETTER KEHEH
'\u06A9': ('\uFB8E', '\uFB90', '\uFB91', '\uFB8F'),
# ARABIC LETTER NG
'\u06AD': ('\uFBD3', '\uFBD5', '\uFBD6', '\uFBD4'),
# ARABIC LETTER GAF
'\u06AF': ('\uFB92', '\uFB94', '\uFB95', '\uFB93'),
# ARABIC LETTER NGOEH
'\u06B1': ('\uFB9A', '\uFB9C', '\uFB9D', '\uFB9B'),
# ARABIC LETTER GUEH
'\u06B3': ('\uFB96', '\uFB98', '\uFB99', '\uFB97'),
# ARABIC LETTER NOON GHUNNA
'\u06BA': ('\uFB9E', '', '', '\uFB9F'),
# ARABIC LETTER RNOON
'\u06BB': ('\uFBA0', '\uFBA2', '\uFBA3', '\uFBA1'),
# ARABIC LETTER HEH DOACHASHMEE
'\u06BE': ('\uFBAA', '\uFBAC', '\uFBAD', '\uFBAB'),
# ARABIC LETTER HEH WITH YEH ABOVE
'\u06C0': ('\uFBA4', '', '', '\uFBA5'),
# ARABIC LETTER HEH GOAL
'\u06C1': ('\uFBA6', '\uFBA8', '\uFBA9', '\uFBA7'),
# ARABIC LETTER KIRGHIZ OE
'\u06C5': ('\uFBE0', '', '', '\uFBE1'),
# ARABIC LETTER OE
'\u06C6': ('\uFBD9', '', '', '\uFBDA'),
# ARABIC LETTER U
'\u06C7': ('\uFBD7', '', '', '\uFBD8'),
# ARABIC LETTER YU
'\u06C8': ('\uFBDB', '', '', '\uFBDC'),
# ARABIC LETTER KIRGHIZ YU
'\u06C9': ('\uFBE2', '', '', '\uFBE3'),
# ARABIC LETTER VE
'\u06CB': ('\uFBDE', '', '', '\uFBDF'),
# ARABIC LETTER FARSI YEH
'\u06CC': ('\uFBFC', '\uFBFE', '\uFBFF', '\uFBFD'),
# ARABIC LETTER E
'\u06D0': ('\uFBE4', '\uFBE6', '\uFBE7', '\uFBE5'),
# ARABIC LETTER YEH BARREE
'\u06D2': ('\uFBAE', '', '', '\uFBAF'),
# ARABIC LETTER YEH BARREE WITH HAMZA ABOVE
'\u06D3': ('\uFBB0', '', '', '\uFBB1'),
# ZWJ
ZWJ: (ZWJ, ZWJ, ZWJ, ZWJ),
}
LETTERS_ARABIC_V2 = {
# ARABIC LETTER HAMZA
'\u0621': ('\uFE80', '', '', ''),
# ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0622': ('\u0622', '', '', '\uFE82'),
# ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0623': ('\u0623', '', '', '\uFE84'),
# ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0624': ('\u0624', '', '', '\uFE86'),
# ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0625': ('\u0625', '', '', '\uFE88'),
# ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0626': ('\u0626', '\uFE8B', '\uFE8C', '\uFE8A'),
# ARABIC LETTER ALEF
'\u0627': ('\u0627', '', '', '\uFE8E'),
# ARABIC LETTER BEH
'\u0628': ('\u0628', '\uFE91', '\uFE92', '\uFE90'),
# ARABIC LETTER TEH MARBUTA
'\u0629': ('\u0629', '', '', '\uFE94'),
# ARABIC LETTER TEH
'\u062A': ('\u062A', '\uFE97', '\uFE98', '\uFE96'),
# ARABIC LETTER THEH
'\u062B': ('\u062B', '\uFE9B', '\uFE9C', '\uFE9A'),
# ARABIC LETTER JEEM
'\u062C': ('\u062C', '\uFE9F', '\uFEA0', '\uFE9E'),
# ARABIC LETTER HAH
'\u062D': ('\uFEA1', '\uFEA3', '\uFEA4', '\uFEA2'),
# ARABIC LETTER KHAH
'\u062E': ('\u062E', '\uFEA7', '\uFEA8', '\uFEA6'),
# ARABIC LETTER DAL
'\u062F': ('\u062F', '', '', '\uFEAA'),
# ARABIC LETTER THAL
'\u0630': ('\u0630', '', '', '\uFEAC'),
# ARABIC LETTER REH
'\u0631': ('\u0631', '', '', '\uFEAE'),
# ARABIC LETTER ZAIN
'\u0632': ('\u0632', '', '', '\uFEB0'),
# ARABIC LETTER SEEN
'\u0633': ('\u0633', '\uFEB3', '\uFEB4', '\uFEB2'),
# ARABIC LETTER SHEEN
'\u0634': ('\u0634', '\uFEB7', '\uFEB8', '\uFEB6'),
# ARABIC LETTER SAD
'\u0635': ('\u0635', '\uFEBB', '\uFEBC', '\uFEBA'),
# ARABIC LETTER DAD
'\u0636': ('\u0636', '\uFEBF', '\uFEC0', '\uFEBE'),
# ARABIC LETTER TAH
'\u0637': ('\u0637', '\uFEC3', '\uFEC4', '\uFEC2'),
# ARABIC LETTER ZAH
'\u0638': ('\u0638', '\uFEC7', '\uFEC8', '\uFEC6'),
# ARABIC LETTER AIN
'\u0639': ('\u0639', '\uFECB', '\uFECC', '\uFECA'),
# ARABIC LETTER GHAIN
'\u063A': ('\u063A', '\uFECF', '\uFED0', '\uFECE'),
# ARABIC TATWEEL
TATWEEL: (TATWEEL, TATWEEL, TATWEEL, TATWEEL),
# ARABIC LETTER FEH
'\u0641': ('\u0641', '\uFED3', '\uFED4', '\uFED2'),
# ARABIC LETTER QAF
'\u0642': ('\u0642', '\uFED7', '\uFED8', '\uFED6'),
# ARABIC LETTER KAF
'\u0643': ('\u0643', '\uFEDB', '\uFEDC', '\uFEDA'),
# ARABIC LETTER LAM
'\u0644': ('\u0644', '\uFEDF', '\uFEE0', '\uFEDE'),
# ARABIC LETTER MEEM
'\u0645': ('\u0645', '\uFEE3', '\uFEE4', '\uFEE2'),
# ARABIC LETTER NOON
'\u0646': ('\u0646', '\uFEE7', '\uFEE8', '\uFEE6'),
# ARABIC LETTER HEH
'\u0647': ('\u0647', '\uFEEB', '\uFEEC', '\uFEEA'),
# ARABIC LETTER WAW
'\u0648': ('\u0648', '', '', '\uFEEE'),
# ARABIC LETTER (UIGHUR KAZAKH KIRGHIZ)? ALEF MAKSURA
'\u0649': ('\u0649', '\uFBE8', '\uFBE9', '\uFEF0'),
# ARABIC LETTER YEH
'\u064A': ('\u064A', '\uFEF3', '\uFEF4', '\uFEF2'),
# ARABIC LETTER ALEF WASLA
'\u0671': ('\u0671', '', '', '\uFB51'),
# ARABIC LETTER U WITH HAMZA ABOVE
'\u0677': ('\u0677', '', '', ''),
# ARABIC LETTER TTEH
'\u0679': ('\u0679', '\uFB68', '\uFB69', '\uFB67'),
# ARABIC LETTER TTEHEH
'\u067A': ('\u067A', '\uFB60', '\uFB61', '\uFB5F'),
# ARABIC LETTER BEEH
'\u067B': ('\u067B', '\uFB54', '\uFB55', '\uFB53'),
# ARABIC LETTER PEH
'\u067E': ('\u067E', '\uFB58', '\uFB59', '\uFB57'),
# ARABIC LETTER TEHEH
'\u067F': ('\u067F', '\uFB64', '\uFB65', '\uFB63'),
# ARABIC LETTER BEHEH
'\u0680': ('\u0680', '\uFB5C', '\uFB5D', '\uFB5B'),
# ARABIC LETTER NYEH
'\u0683': ('\u0683', '\uFB78', '\uFB79', '\uFB77'),
# ARABIC LETTER DYEH
'\u0684': ('\u0684', '\uFB74', '\uFB75', '\uFB73'),
# ARABIC LETTER TCHEH
'\u0686': ('\u0686', '\uFB7C', '\uFB7D', '\uFB7B'),
# ARABIC LETTER TCHEHEH
'\u0687': ('\u0687', '\uFB80', '\uFB81', '\uFB7F'),
# ARABIC LETTER DDAL
'\u0688': ('\u0688', '', '', '\uFB89'),
# ARABIC LETTER DAHAL
'\u068C': ('\u068C', '', '', '\uFB85'),
# ARABIC LETTER DDAHAL
'\u068D': ('\u068D', '', '', '\uFB83'),
# ARABIC LETTER DUL
'\u068E': ('\u068E', '', '', '\uFB87'),
# ARABIC LETTER RREH
'\u0691': ('\u0691', '', '', '\uFB8D'),
# ARABIC LETTER JEH
'\u0698': ('\u0698', '', '', '\uFB8B'),
# ARABIC LETTER VEH
'\u06A4': ('\u06A4', '\uFB6C', '\uFB6D', '\uFB6B'),
# ARABIC LETTER PEHEH
'\u06A6': ('\u06A6', '\uFB70', '\uFB71', '\uFB6F'),
# ARABIC LETTER KEHEH
'\u06A9': ('\u06A9', '\uFB90', '\uFB91', '\uFB8F'),
# ARABIC LETTER NG
'\u06AD': ('\u06AD', '\uFBD5', '\uFBD6', '\uFBD4'),
# ARABIC LETTER GAF
'\u06AF': ('\u06AF', '\uFB94', '\uFB95', '\uFB93'),
# ARABIC LETTER NGOEH
'\u06B1': ('\u06B1', '\uFB9C', '\uFB9D', '\uFB9B'),
# ARABIC LETTER GUEH
'\u06B3': ('\u06B3', '\uFB98', '\uFB99', '\uFB97'),
# ARABIC LETTER NOON GHUNNA
'\u06BA': ('\u06BA', '', '', '\uFB9F'),
# ARABIC LETTER RNOON
'\u06BB': ('\u06BB', '\uFBA2', '\uFBA3', '\uFBA1'),
# ARABIC LETTER HEH DOACHASHMEE
'\u06BE': ('\u06BE', '\uFBAC', '\uFBAD', '\uFBAB'),
# ARABIC LETTER HEH WITH YEH ABOVE
'\u06C0': ('\u06C0', '', '', '\uFBA5'),
# ARABIC LETTER HEH GOAL
'\u06C1': ('\u06C1', '\uFBA8', '\uFBA9', '\uFBA7'),
# ARABIC LETTER KIRGHIZ OE
'\u06C5': ('\u06C5', '', '', '\uFBE1'),
# ARABIC LETTER OE
'\u06C6': ('\u06C6', '', '', '\uFBDA'),
# ARABIC LETTER U
'\u06C7': ('\u06C7', '', '', '\uFBD8'),
# ARABIC LETTER YU
'\u06C8': ('\u06C8', '', '', '\uFBDC'),
# ARABIC LETTER KIRGHIZ YU
'\u06C9': ('\u06C9', '', '', '\uFBE3'),
# ARABIC LETTER VE
'\u06CB': ('\u06CB', '', '', '\uFBDF'),
# ARABIC LETTER FARSI YEH
'\u06CC': ('\u06CC', '\uFBFE', '\uFBFF', '\uFBFD'),
# ARABIC LETTER E
'\u06D0': ('\u06D0', '\uFBE6', '\uFBE7', '\uFBE5'),
# ARABIC LETTER YEH BARREE
'\u06D2': ('\u06D2', '', '', '\uFBAF'),
# ARABIC LETTER YEH BARREE WITH HAMZA ABOVE
'\u06D3': ('\u06D3', '', '', '\uFBB1'),
# Kurdish letter YEAH
'\u06ce': ('\uE004', '\uE005', '\uE006', '\uE004'),
# Kurdish letter Hamza same as arabic Teh without the point
'\u06d5': ('\u06d5', '', '', '\uE000'),
# ZWJ
ZWJ: (ZWJ, ZWJ, ZWJ, ZWJ),
}
LETTERS_KURDISH = {
# ARABIC LETTER HAMZA
'\u0621': ('\uFE80', '', '', ''),
# ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0622': ('\u0622', '', '', '\uFE82'),
# ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0623': ('\u0623', '', '', '\uFE84'),
# ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0624': ('\u0624', '', '', '\uFE86'),
# ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0625': ('\u0625', '', '', '\uFE88'),
# ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0626': ('\u0626', '\uFE8B', '\uFE8C', '\uFE8A'),
# ARABIC LETTER ALEF
'\u0627': ('\u0627', '', '', '\uFE8E'),
# ARABIC LETTER BEH
'\u0628': ('\u0628', '\uFE91', '\uFE92', '\uFE90'),
# ARABIC LETTER TEH MARBUTA
'\u0629': ('\u0629', '', '', '\uFE94'),
# ARABIC LETTER TEH
'\u062A': ('\u062A', '\uFE97', '\uFE98', '\uFE96'),
# ARABIC LETTER THEH
'\u062B': ('\u062B', '\uFE9B', '\uFE9C', '\uFE9A'),
# ARABIC LETTER JEEM
'\u062C': ('\u062C', '\uFE9F', '\uFEA0', '\uFE9E'),
# ARABIC LETTER HAH
'\u062D': ('\uFEA1', '\uFEA3', '\uFEA4', '\uFEA2'),
# ARABIC LETTER KHAH
'\u062E': ('\u062E', '\uFEA7', '\uFEA8', '\uFEA6'),
# ARABIC LETTER DAL
'\u062F': ('\u062F', '', '', '\uFEAA'),
# ARABIC LETTER THAL
'\u0630': ('\u0630', '', '', '\uFEAC'),
# ARABIC LETTER REH
'\u0631': ('\u0631', '', '', '\uFEAE'),
# ARABIC LETTER ZAIN
'\u0632': ('\u0632', '', '', '\uFEB0'),
# ARABIC LETTER SEEN
'\u0633': ('\u0633', '\uFEB3', '\uFEB4', '\uFEB2'),
# ARABIC LETTER SHEEN
'\u0634': ('\u0634', '\uFEB7', '\uFEB8', '\uFEB6'),
# ARABIC LETTER SAD
'\u0635': ('\u0635', '\uFEBB', '\uFEBC', '\uFEBA'),
# ARABIC LETTER DAD
'\u0636': ('\u0636', '\uFEBF', '\uFEC0', '\uFEBE'),
# ARABIC LETTER TAH
'\u0637': ('\u0637', '\uFEC3', '\uFEC4', '\uFEC2'),
# ARABIC LETTER ZAH
'\u0638': ('\u0638', '\uFEC7', '\uFEC8', '\uFEC6'),
# ARABIC LETTER AIN
'\u0639': ('\u0639', '\uFECB', '\uFECC', '\uFECA'),
# ARABIC LETTER GHAIN
'\u063A': ('\u063A', '\uFECF', '\uFED0', '\uFECE'),
# ARABIC TATWEEL
TATWEEL: (TATWEEL, TATWEEL, TATWEEL, TATWEEL),
# ARABIC LETTER FEH
'\u0641': ('\u0641', '\uFED3', '\uFED4', '\uFED2'),
# ARABIC LETTER QAF
'\u0642': ('\u0642', '\uFED7', '\uFED8', '\uFED6'),
# ARABIC LETTER KAF
'\u0643': ('\u0643', '\uFEDB', '\uFEDC', '\uFEDA'),
# ARABIC LETTER LAM
'\u0644': ('\u0644', '\uFEDF', '\uFEE0', '\uFEDE'),
# ARABIC LETTER MEEM
'\u0645': ('\u0645', '\uFEE3', '\uFEE4', '\uFEE2'),
# ARABIC LETTER NOON
'\u0646': ('\u0646', '\uFEE7', '\uFEE8', '\uFEE6'),
# ARABIC LETTER HEH
'\u0647': ('\uFBAB', '\uFBAB', '\uFBAB', '\uFBAB'),
# ARABIC LETTER WAW
'\u0648': ('\u0648', '', '', '\uFEEE'),
# ARABIC LETTER (UIGHUR KAZAKH KIRGHIZ)? ALEF MAKSURA
'\u0649': ('\u0649', '\uFBE8', '\uFBE9', '\uFEF0'),
# ARABIC LETTER YEH
'\u064A': ('\u064A', '\uFEF3', '\uFEF4', '\uFEF2'),
# ARABIC LETTER ALEF WASLA
'\u0671': ('\u0671', '', '', '\uFB51'),
# ARABIC LETTER U WITH HAMZA ABOVE
'\u0677': ('\u0677', '', '', ''),
# ARABIC LETTER TTEH
'\u0679': ('\u0679', '\uFB68', '\uFB69', '\uFB67'),
# ARABIC LETTER TTEHEH
'\u067A': ('\u067A', '\uFB60', '\uFB61', '\uFB5F'),
# ARABIC LETTER BEEH
'\u067B': ('\u067B', '\uFB54', '\uFB55', '\uFB53'),
# ARABIC LETTER PEH
'\u067E': ('\u067E', '\uFB58', '\uFB59', '\uFB57'),
# ARABIC LETTER TEHEH
'\u067F': ('\u067F', '\uFB64', '\uFB65', '\uFB63'),
# ARABIC LETTER BEHEH
'\u0680': ('\u0680', '\uFB5C', '\uFB5D', '\uFB5B'),
# ARABIC LETTER NYEH
'\u0683': ('\u0683', '\uFB78', '\uFB79', '\uFB77'),
# ARABIC LETTER DYEH
'\u0684': ('\u0684', '\uFB74', '\uFB75', '\uFB73'),
# ARABIC LETTER TCHEH
'\u0686': ('\u0686', '\uFB7C', '\uFB7D', '\uFB7B'),
# ARABIC LETTER TCHEHEH
'\u0687': ('\u0687', '\uFB80', '\uFB81', '\uFB7F'),
# ARABIC LETTER DDAL
'\u0688': ('\u0688', '', '', '\uFB89'),
# ARABIC LETTER DAHAL
'\u068C': ('\u068C', '', '', '\uFB85'),
# ARABIC LETTER DDAHAL
'\u068D': ('\u068D', '', '', '\uFB83'),
# ARABIC LETTER DUL
'\u068E': ('\u068E', '', '', '\uFB87'),
# ARABIC LETTER RREH
'\u0691': ('\u0691', '', '', '\uFB8D'),
# ARABIC LETTER JEH
'\u0698': ('\u0698', '', '', '\uFB8B'),
# ARABIC LETTER VEH
'\u06A4': ('\u06A4', '\uFB6C', '\uFB6D', '\uFB6B'),
# ARABIC LETTER PEHEH
'\u06A6': ('\u06A6', '\uFB70', '\uFB71', '\uFB6F'),
# ARABIC LETTER KEHEH
'\u06A9': ('\u06A9', '\uFB90', '\uFB91', '\uFB8F'),
# ARABIC LETTER NG
'\u06AD': ('\u06AD', '\uFBD5', '\uFBD6', '\uFBD4'),
# ARABIC LETTER GAF
'\u06AF': ('\u06AF', '\uFB94', '\uFB95', '\uFB93'),
# ARABIC LETTER NGOEH
'\u06B1': ('\u06B1', '\uFB9C', '\uFB9D', '\uFB9B'),
# ARABIC LETTER GUEH
'\u06B3': ('\u06B3', '\uFB98', '\uFB99', '\uFB97'),
# ARABIC LETTER NOON GHUNNA
'\u06BA': ('\u06BA', '', '', '\uFB9F'),
# ARABIC LETTER RNOON
'\u06BB': ('\u06BB', '\uFBA2', '\uFBA3', '\uFBA1'),
# ARABIC LETTER HEH DOACHASHMEE
'\u06BE': ('\u06BE', '\uFBAC', '\uFBAD', '\uFBAB'),
# ARABIC LETTER HEH WITH YEH ABOVE
'\u06C0': ('\u06C0', '', '', '\uFBA5'),
# ARABIC LETTER HEH GOAL
'\u06C1': ('\u06C1', '\uFBA8', '\uFBA9', '\uFBA7'),
# ARABIC LETTER KIRGHIZ OE
'\u06C5': ('\u06C5', '', '', '\uFBE1'),
# ARABIC LETTER OE
'\u06C6': ('\u06C6', '', '', '\uFBDA'),
# ARABIC LETTER U
'\u06C7': ('\u06C7', '', '', '\uFBD8'),
# ARABIC LETTER YU
'\u06C8': ('\u06C8', '', '', '\uFBDC'),
# ARABIC LETTER KIRGHIZ YU
'\u06C9': ('\u06C9', '', '', '\uFBE3'),
# ARABIC LETTER VE
'\u06CB': ('\u06CB', '', '', '\uFBDF'),
# ARABIC LETTER FARSI YEH
'\u06CC': ('\u06CC', '\uFBFE', '\uFBFF', '\uFBFD'),
# ARABIC LETTER E
'\u06D0': ('\u06D0', '\uFBE6', '\uFBE7', '\uFBE5'),
# ARABIC LETTER YEH BARREE
'\u06D2': ('\u06D2', '', '', '\uFBAF'),
# ARABIC LETTER YEH BARREE WITH HAMZA ABOVE
'\u06D3': ('\u06D3', '', '', '\uFBB1'),
# Kurdish letter YEAH
'\u06ce': ('\uE004', '\uE005', '\uE006', '\uE004'),
# Kurdish letter Hamza same as arabic Teh without the point
'\u06d5': ('\u06d5', '', '', '\uE000'),
# ZWJ
ZWJ: (ZWJ, ZWJ, ZWJ, ZWJ),
}
def connects_with_letter_before(letter, LETTERS):
if letter not in LETTERS:
return False
forms = LETTERS[letter]
return forms[FINAL] or forms[MEDIAL]
def connects_with_letter_after(letter, LETTERS):
if letter not in LETTERS:
return False
forms = LETTERS[letter]
return forms[INITIAL] or forms[MEDIAL]
def connects_with_letters_before_and_after(letter, LETTERS):
if letter not in LETTERS:
return False
forms = LETTERS[letter]
return forms[MEDIAL]

View file

@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
__all__ = ['check_barcode_encoding']
def get_barcode_check_digit(numeric_barcode):
""" Computes and returns the barcode check digit. The used algorithm
follows the GTIN specifications and can be used by all compatible

View file

@ -6,6 +6,7 @@ from collections import Counter, defaultdict
from decorator import decorator
from inspect import signature, Parameter
import logging
import time
import warnings
unsafe_eval = eval
@ -15,12 +16,14 @@ _logger = logging.getLogger(__name__)
class ormcache_counter(object):
""" Statistic counters for cache entries. """
__slots__ = ['hit', 'miss', 'err']
__slots__ = ['hit', 'miss', 'err', 'gen_time', 'cache_name']
def __init__(self):
self.hit = 0
self.miss = 0
self.err = 0
self.gen_time = 0
self.cache_name = None
@property
def ratio(self):
@ -64,7 +67,8 @@ class ormcache(object):
def add_value(self, *args, cache_value=None, **kwargs):
model = args[0]
d, key0, _ = self.lru(model)
d, key0, counter = self.lru(model)
counter.cache_name = self.cache_name
key = key0 + self.key(*args, **kwargs)
d[key] = cache_value
@ -101,7 +105,10 @@ class ormcache(object):
return r
except KeyError:
counter.miss += 1
counter.cache_name = self.cache_name
start = time.time()
value = d[key] = self.method(*args, **kwargs)
counter.gen_time += time.time() - start
return value
except TypeError:
_logger.warning("cache lookup error on %r", key, exc_info=True)
@ -113,7 +120,6 @@ class ormcache(object):
warnings.warn('Deprecated method ormcache.clear(model, *args), use registry.clear_cache() instead')
model.pool.clear_all_caches()
class ormcache_context(ormcache):
""" This LRU cache decorator is a variant of :class:`ormcache`, with an
extra parameter ``keys`` that defines a sequence of dictionary keys. Those
@ -142,32 +148,29 @@ class ormcache_context(ormcache):
self.key = unsafe_eval(code)
def log_ormcache_stats(sig=None, frame=None):
def log_ormcache_stats(sig=None, frame=None): # noqa: ARG001 (arguments are there for signals)
""" Log statistics of ormcache usage by database, model, and method. """
from odoo.modules.registry import Registry
import threading
me = threading.current_thread()
me_dbname = getattr(me, 'dbname', 'n/a')
def _log_ormcache_stats(cache_name, cache):
entries = Counter(k[:2] for k in cache.d)
# show entries sorted by model name, method name
for key in sorted(entries, key=lambda key: (key[0], key[1].__name__)):
model, method = key
stat = STAT[(dbname, model, method)]
_logger.info(
"%s, %6d entries, %6d hit, %6d miss, %6d err, %4.1f%% ratio, for %s.%s",
cache_name.rjust(25), entries[key], stat.hit, stat.miss, stat.err, stat.ratio, model, method.__name__,
)
for dbname, reg in sorted(Registry.registries.d.items()):
# set logger prefix to dbname
me.dbname = dbname
for cache_name, cache in reg._Registry__caches.items():
_log_ormcache_stats(cache_name, cache)
me.dbname = me_dbname
cache_entries = {}
current_db = None
cache_stats = ['Caches stats:']
for (dbname, model, method), stat in sorted(STAT.items(), key=lambda k: (k[0][0] or '~', k[0][1], k[0][2].__name__)):
dbname_display = dbname or "<no_db>"
if current_db != dbname_display:
current_db = dbname_display
cache_stats.append(f"Database {dbname_display}")
if dbname: # mainly for MockPool
if (dbname, stat.cache_name) not in cache_entries:
cache = Registry.registries.d[dbname]._Registry__caches[stat.cache_name]
cache_entries[dbname, stat.cache_name] = Counter(k[:2] for k in cache.d)
nb_entries = cache_entries[dbname, stat.cache_name][model, method]
else:
nb_entries = 0
cache_name = stat.cache_name.rjust(25)
cache_stats.append(
f"{cache_name}, {nb_entries:6d} entries, {stat.hit:6d} hit, {stat.miss:6d} miss, {stat.err:6d} err, {stat.gen_time:10.3f}s time, {stat.ratio:6.1f}% ratio for {model}.{method.__name__}"
)
_logger.info('\n'.join(cache_stats))
def get_cache_key_counter(bound_method, *args, **kwargs):

View file

@ -287,7 +287,7 @@ class Cloc(object):
self.count_customization(env)
def count_database(self, database):
registry = odoo.registry(config['db_name'])
registry = odoo.modules.registry.Registry(config['db_name'])
with registry.cursor() as cr:
uid = odoo.SUPERUSER_ID
env = odoo.api.Environment(cr, uid, {})

View file

@ -75,7 +75,7 @@ class configmanager(object):
self.options = {
'admin_passwd': 'admin',
'csv_internal_sep': ',',
'publisher_warranty_url': 'http://services.openerp.com/publisher-warranty/',
'publisher_warranty_url': 'http://services.odoo.com/publisher-warranty/',
'reportgz': False,
'root_path': None,
'websocket_keep_alive_timeout': 3600,
@ -87,7 +87,6 @@ class configmanager(object):
self.blacklist_for_save = set([
'publisher_warranty_url', 'load_language', 'root_path',
'init', 'save', 'config', 'update', 'stop_after_init', 'dev_mode', 'shell_interface',
'longpolling_port',
])
# dictionary mapping option destination (keys in self.options) to MyOptions.
@ -140,8 +139,6 @@ class configmanager(object):
"Keep empty to listen on all interfaces (0.0.0.0)")
group.add_option("-p", "--http-port", dest="http_port", my_default=8069,
help="Listen port for the main HTTP service", type="int", metavar="PORT")
group.add_option("--longpolling-port", dest="longpolling_port", my_default=0,
help="Deprecated alias to the gevent-port option", type="int", metavar="PORT")
group.add_option("--gevent-port", dest="gevent_port", my_default=8072,
help="Listen port for the gevent worker", type="int", metavar="PORT")
group.add_option("--no-http", dest="http_enable", action="store_false", my_default=True,
@ -259,8 +256,12 @@ class configmanager(object):
group.add_option("--pg_path", dest="pg_path", help="specify the pg executable path")
group.add_option("--db_host", dest="db_host", my_default=False,
help="specify the database host")
group.add_option("--db_replica_host", dest="db_replica_host", my_default=False,
help="specify the replica host. Specify an empty db_replica_host to use the default unix socket.")
group.add_option("--db_port", dest="db_port", my_default=False,
help="specify the database port", type="int")
group.add_option("--db_replica_port", dest="db_replica_port", my_default=False,
help="specify the replica port", type="int")
group.add_option("--db_sslmode", dest="db_sslmode", type="choice", my_default='prefer',
choices=['disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full'],
help="specify the database ssl connection mode (see PostgreSQL documentation)")
@ -341,10 +342,18 @@ class configmanager(object):
help="Maximum allowed virtual memory per worker (in bytes), when reached the worker be "
"reset after the current request (default 2048MiB).",
type="int")
group.add_option("--limit-memory-soft-gevent", dest="limit_memory_soft_gevent", my_default=False,
help="Maximum allowed virtual memory per gevent worker (in bytes), when reached the worker will be "
"reset after the current request. Defaults to `--limit-memory-soft`.",
type="int")
group.add_option("--limit-memory-hard", dest="limit_memory_hard", my_default=2560 * 1024 * 1024,
help="Maximum allowed virtual memory per worker (in bytes), when reached, any memory "
"allocation will fail (default 2560MiB).",
type="int")
group.add_option("--limit-memory-hard-gevent", dest="limit_memory_hard_gevent", my_default=False,
help="Maximum allowed virtual memory per gevent worker (in bytes), when reached, any memory "
"allocation will fail. Defaults to `--limit-memory-hard`.",
type="int")
group.add_option("--limit-time-cpu", dest="limit_time_cpu", my_default=60,
help="Maximum allowed CPU time per request (default 60).",
type="int")
@ -370,7 +379,7 @@ class configmanager(object):
# generate default config
self._parse_config()
def parse_config(self, args=None):
def parse_config(self, args: list[str] | None = None, *, setup_logging: bool | None = None) -> None:
""" Parse the configuration file (if any) and the command-line
arguments.
@ -386,7 +395,18 @@ class configmanager(object):
odoo.tools.config.parse_config(sys.argv[1:])
"""
opt = self._parse_config(args)
odoo.netsvc.init_logger()
if setup_logging is not False:
odoo.netsvc.init_logger()
# warn after having done setup, so it has a chance to show up
# (mostly once this warning is bumped to DeprecationWarning proper)
if setup_logging is None:
warnings.warn(
"As of Odoo 18, it's recommended to specify whether"
" you want Odoo to setup its own logging (or want to"
" handle it yourself)",
category=PendingDeprecationWarning,
stacklevel=2,
)
self._warn_deprecated_options()
odoo.modules.module.initialize_sys_path()
return opt
@ -456,9 +476,9 @@ class configmanager(object):
self.options['server_wide_modules'] = 'base,web'
# if defined do not take the configfile value even if the defined value is None
keys = ['gevent_port', 'http_interface', 'http_port', 'longpolling_port', 'http_enable', 'x_sendfile',
'db_name', 'db_user', 'db_password', 'db_host', 'db_sslmode',
'db_port', 'db_template', 'logfile', 'pidfile', 'smtp_port',
keys = ['gevent_port', 'http_interface', 'http_port', 'http_enable', 'x_sendfile',
'db_name', 'db_user', 'db_password', 'db_host', 'db_replica_host', 'db_sslmode',
'db_port', 'db_replica_port', 'db_template', 'logfile', 'pidfile', 'smtp_port',
'email_from', 'smtp_server', 'smtp_user', 'smtp_password', 'from_filter',
'smtp_ssl_certificate_filename', 'smtp_ssl_private_key_filename',
'db_maxconn', 'db_maxconn_gevent', 'import_partial', 'addons_path', 'upgrade_path', 'pre_upgrade_scripts',
@ -495,7 +515,7 @@ class configmanager(object):
posix_keys = [
'workers',
'limit_memory_hard', 'limit_memory_soft',
'limit_memory_hard', 'limit_memory_hard_gevent', 'limit_memory_soft', 'limit_memory_soft_gevent',
'limit_time_cpu', 'limit_time_real', 'limit_request', 'limit_time_real_cron'
]
@ -575,13 +595,6 @@ class configmanager(object):
return opt
def _warn_deprecated_options(self):
if self.options.get('longpolling_port', 0):
warnings.warn(
"The longpolling-port is a deprecated alias to "
"the gevent-port option, please use the latter.",
DeprecationWarning)
self.options['gevent_port'] = self.options.pop('longpolling_port')
for old_option_name, new_option_name in [
('geoip_database', 'geoip_city_db'),
('osv_memory_age_limit', 'transient_age_limit')
@ -799,8 +812,7 @@ class configmanager(object):
return os.path.join(self['data_dir'], 'filestore', dbname)
def set_admin_password(self, new_password):
hash_password = crypt_context.hash if hasattr(crypt_context, 'hash') else crypt_context.encrypt
self.options['admin_passwd'] = hash_password(new_password)
self.options['admin_passwd'] = crypt_context.hash(new_password)
def verify_admin_password(self, password):
"""Verifies the super-admin password, possibly updating the stored hash if needed"""

View file

@ -5,20 +5,17 @@ __all__ = [
'convert_file', 'convert_sql_import',
'convert_csv_import', 'convert_xml_import'
]
import base64
import csv
import io
import logging
import os.path
import pprint
import re
import subprocess
import warnings
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pytz
from dateutil.relativedelta import relativedelta
from lxml import etree, builder
try:
import jingtrang
@ -26,33 +23,22 @@ except ImportError:
jingtrang = None
import odoo
from . import pycompat
from .config import config
from .misc import file_open, file_path, SKIPPED_ELEMENT_TYPES
from .translate import _
from odoo import SUPERUSER_ID, api
from odoo.exceptions import ValidationError
from .safe_eval import safe_eval as s_eval, pytz, time
_logger = logging.getLogger(__name__)
from .safe_eval import safe_eval as s_eval, pytz, time
safe_eval = lambda expr, ctx={}: s_eval(expr, ctx, nocopy=True)
def safe_eval(expr, ctx={}):
return s_eval(expr, ctx, nocopy=True)
class ParseError(Exception):
...
class RecordDictWrapper(dict):
"""
Used to pass a record as locals in eval:
records do not strictly behave like dict, so we force them to.
"""
def __init__(self, record):
self.record = record
def __getitem__(self, key):
if key in self.record:
return self.record[key]
return dict.__getitem__(self, key)
def _get_idref(self, env, model_str, idref):
idref2 = dict(idref,
Command=odoo.fields.Command,
@ -90,8 +76,7 @@ def _eval_xml(self, node, env):
if node.tag in ('field','value'):
t = node.get('type','char')
f_model = node.get('model')
if node.get('search'):
f_search = node.get("search")
if f_search := node.get('search'):
f_use = node.get("use",'id')
f_name = node.get("name")
idref2 = {}
@ -110,8 +95,7 @@ def _eval_xml(self, node, env):
if isinstance(f_val, tuple):
f_val = f_val[0]
return f_val
a_eval = node.get('eval')
if a_eval:
if a_eval := node.get('eval'):
idref2 = _get_idref(self, env, f_model, self.idref)
try:
return safe_eval(a_eval, idref2)
@ -144,44 +128,44 @@ def _eval_xml(self, node, env):
if t == 'html':
return _process("".join(etree.tostring(n, method='html', encoding='unicode') for n in node))
data = node.text
if node.get('file'):
with file_open(node.get('file'), 'rb', env=env) as f:
if t == 'base64':
with file_open(node.get('file'), 'rb', env=env) as f:
return base64.b64encode(f.read())
with file_open(node.get('file'), env=env) as f:
data = f.read()
else:
data = node.text or ''
if t == 'base64':
return base64.b64encode(data)
match t:
case 'file':
path = data.strip()
try:
file_path(os.path.join(self.module, path))
except FileNotFoundError:
raise FileNotFoundError(
f"No such file or directory: {path!r} in {self.module}"
) from None
return '%s,%s' % (self.module, path)
case 'char':
return data
case 'int':
d = data.strip()
if d == 'None':
return None
return int(d)
case 'float':
return float(data.strip())
case 'list':
return [_eval_xml(self, n, env) for n in node.iterchildren('value')]
case 'tuple':
return tuple(_eval_xml(self, n, env) for n in node.iterchildren('value'))
case 'base64':
raise ValueError("base64 type is only compatible with file data")
case t:
raise ValueError(f"Unknown type {t!r}")
# after that, only text content makes sense
data = pycompat.to_text(data)
if t == 'file':
path = data.strip()
try:
file_path(os.path.join(self.module, path))
except FileNotFoundError:
raise IOError("No such file or directory: '%s' in %s" % (
path, self.module))
return '%s,%s' % (self.module, path)
if t == 'char':
return data
if t == 'int':
d = data.strip()
if d == 'None':
return None
return int(d)
if t == 'float':
return float(data.strip())
if t in ('list','tuple'):
res=[]
for n in node.iterchildren(tag='value'):
res.append(_eval_xml(self, n, env))
if t=='tuple':
return tuple(res)
return res
elif node.tag == "function":
model_str = node.get('model')
model = env[model_str]
@ -189,9 +173,8 @@ def _eval_xml(self, node, env):
# determine arguments
args = []
kwargs = {}
a_eval = node.get('eval')
if a_eval:
if a_eval := node.get('eval'):
idref2 = _get_idref(self, env, model_str, self.idref)
args = list(safe_eval(a_eval, idref2))
for child in node:
@ -254,16 +237,14 @@ form: module.record_id""" % (xml_id,)
d_model = rec.get("model")
records = self.env[d_model]
d_search = rec.get("search")
if d_search:
if d_search := rec.get("search"):
idref = _get_idref(self, self.env, d_model, {})
try:
records = records.search(safe_eval(d_search, idref))
except ValueError:
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
d_id = rec.get("id")
if d_id:
if d_id := rec.get("id"):
try:
records += records.browse(self.id_get(d_id))
except ValueError:
@ -365,16 +346,14 @@ form: module.record_id""" % (xml_id,)
if not rec_id:
return None
record = env['ir.model.data']._load_xmlid(xid)
for child in rec.xpath('.//record[@id]'):
sub_xid = child.get("id")
self._test_xml_id(sub_xid)
sub_xid = self.make_xml_id(sub_xid)
sub_record = env['ir.model.data']._load_xmlid(sub_xid)
if sub_record:
self.idref[sub_xid] = sub_record.id
if record := env['ir.model.data']._load_xmlid(xid):
for child in rec.xpath('.//record[@id]'):
sub_xid = child.get("id")
self._test_xml_id(sub_xid)
sub_xid = self.make_xml_id(sub_xid)
if sub_record := env['ir.model.data']._load_xmlid(sub_xid):
self.idref[sub_xid] = sub_record.id
if record:
# if the resource already exists, don't update it but store
# its database id (can be useful)
self.idref[rec_id] = record.id
@ -384,10 +363,11 @@ form: module.record_id""" % (xml_id,)
return None
# else create it normally
foreign_record_to_create = False
if xid and xid.partition('.')[0] != self.module:
# updating a record created by another module
record = self.env['ir.model.data']._load_xmlid(xid)
if not record:
if not record and not (foreign_record_to_create := nodeattr2bool(rec, 'forcecreate')): # Allow foreign records if explicitely stated
if self.noupdate and not nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
return None
@ -395,18 +375,16 @@ form: module.record_id""" % (xml_id,)
res = {}
sub_records = []
for field in rec.findall('./field'):
for field in rec.iterchildren('field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name")
f_ref = field.get("ref")
f_search = field.get("search")
f_model = field.get("model")
if not f_model and f_name in model._fields:
f_model = model._fields[f_name].comodel_name
f_use = field.get("use",'') or 'id'
f_val = False
if f_search:
if f_search := field.get("search"):
idref2 = _get_idref(self, env, f_model, self.idref)
q = safe_eval(f_search, idref2)
assert f_model, 'Define an attribute model="..." in your .XML file!'
@ -421,7 +399,7 @@ form: module.record_id""" % (xml_id,)
# otherwise (we are probably in a many2one field),
# take the first element of the search
f_val = s[0][f_use]
elif f_ref:
elif f_ref := field.get("ref"):
if f_name in model._fields and model._fields[f_name].type == 'reference':
val = self.model_id_get(f_ref)
f_val = val[0] + ',' + str(val[1])
@ -443,7 +421,7 @@ form: module.record_id""" % (xml_id,)
elif field_type == 'boolean' and isinstance(f_val, str):
f_val = str2bool(f_val)
elif field_type == 'one2many':
for child in field.findall('./record'):
for child in field.iterchildren('record'):
sub_records.append((child, model._fields[f_name].inverse_name))
if isinstance(f_val, str):
# We do not want to write on the field since we will write
@ -461,6 +439,8 @@ form: module.record_id""" % (xml_id,)
res['sequence'] = sequence
data = dict(xml_id=xid, values=res, noupdate=self.noupdate)
if foreign_record_to_create:
model = model.with_context(foreign_record_to_create=foreign_record_to_create)
record = model._load_records([data], self.mode == 'update')
if rec_id:
self.idref[rec_id] = record.id
@ -650,7 +630,7 @@ def convert_csv_import(env, module, fname, csvcontent, idref=None, mode='init',
env = env(context=dict(env.context, lang=None))
filename, _ext = os.path.splitext(os.path.basename(fname))
model = filename.split('-')[0]
reader = pycompat.csv_reader(io.BytesIO(csvcontent), quotechar='"', delimiter=',')
reader = csv.reader(io.StringIO(csvcontent.decode()), quotechar='"', delimiter=',')
fields = next(reader)
if not (mode == 'init' or 'id' in fields):
@ -674,7 +654,12 @@ def convert_csv_import(env, module, fname, csvcontent, idref=None, mode='init',
if any(msg['type'] == 'error' for msg in result['messages']):
# Report failed import and abort module install
warning_msg = "\n".join(msg['message'] for msg in result['messages'])
raise Exception(_('Module loading %s failed: file %s could not be processed:\n %s') % (module, fname, warning_msg))
raise Exception(env._(
"Module loading %(module)s failed: file %(file)s could not be processed:\n%(message)s",
module=module,
file=fname,
message=warning_msg,
))
def convert_xml_import(env, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
doc = etree.parse(xmlfile)

View file

@ -1,18 +1,27 @@
# -*- coding: utf-8 -*-
import math
import calendar
import math
from datetime import date, datetime, time
from typing import Tuple
from typing import Tuple, TypeVar, Literal, Iterator, Type
import babel
import pytz
from dateutil.relativedelta import relativedelta, weekdays
from .func import lazy
from odoo.loglevels import ustr
D = TypeVar('D', date, datetime)
def date_type(value):
__all__ = [
'date_range',
'get_fiscal_year',
'get_month',
'get_quarter',
'get_quarter_number',
'get_timedelta',
]
def date_type(value: D) -> Type[D]:
''' Return either the datetime.datetime class or datetime.date type whether `value` is a datetime or a date.
:param value: A datetime.datetime or datetime.date object.
@ -21,49 +30,39 @@ def date_type(value):
return datetime if isinstance(value, datetime) else date
def get_month(date):
def get_month(date: D) -> Tuple[D, D]:
''' Compute the month dates range on which the 'date' parameter belongs to.
:param date: A datetime.datetime or datetime.date object.
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
'''
date_from = date_type(date)(date.year, date.month, 1)
date_to = date_type(date)(date.year, date.month, calendar.monthrange(date.year, date.month)[1])
return date_from, date_to
return date.replace(day=1), date.replace(day=calendar.monthrange(date.year, date.month)[1])
def get_quarter_number(date):
def get_quarter_number(date: date) -> int:
''' Get the number of the quarter on which the 'date' parameter belongs to.
:param date: A datetime.datetime or datetime.date object.
:return: A [1-4] integer.
'''
return math.ceil(date.month / 3)
def get_quarter(date):
def get_quarter(date: D) -> Tuple[D, D]:
''' Compute the quarter dates range on which the 'date' parameter belongs to.
:param date: A datetime.datetime or datetime.date object.
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
'''
quarter_number = get_quarter_number(date)
month_from = ((quarter_number - 1) * 3) + 1
date_from = date_type(date)(date.year, month_from, 1)
date_to = (date_from + relativedelta(months=2))
date_from = date.replace(month=month_from, day=1)
date_to = date_from + relativedelta(months=2)
date_to = date_to.replace(day=calendar.monthrange(date_to.year, date_to.month)[1])
return date_from, date_to
def get_fiscal_year(date, day=31, month=12):
def get_fiscal_year(date: D, day: int = 31, month: int = 12) -> Tuple[D, D]:
''' Compute the fiscal year dates range on which the 'date' parameter belongs to.
A fiscal year is the period used by governments for accounting purposes and vary between countries.
By default, calling this method with only one parameter gives the calendar year because the ending date of the
fiscal year is set to the YYYY-12-31.
:param date: A datetime.datetime or datetime.date object.
:param date: A date belonging to the fiscal year
:param day: The day of month the fiscal year ends.
:param month: The month of year the fiscal year ends.
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
:return: The start and end dates of the fiscal year.
'''
def fix_day(year, month, day):
@ -72,28 +71,23 @@ def get_fiscal_year(date, day=31, month=12):
return max_day
return min(day, max_day)
day = fix_day(date.year, month, day)
date_to = date_type(date)(date.year, month, day)
date_to = date.replace(month=month, day=fix_day(date.year, month, day))
if date <= date_to:
date_from = date_to - relativedelta(years=1)
day = fix_day(date_from.year, date_from.month, date_from.day)
date_from = date_type(date)(date_from.year, date_from.month, day)
date_from = date_from.replace(day=day)
date_from += relativedelta(days=1)
else:
date_from = date_to + relativedelta(days=1)
date_to = date_to + relativedelta(years=1)
day = fix_day(date_to.year, date_to.month, date_to.day)
date_to = date_type(date)(date_to.year, date_to.month, day)
date_to = date_to.replace(day=day)
return date_from, date_to
def get_timedelta(qty, granularity):
"""
Helper to get a `relativedelta` object for the given quantity and interval unit.
:param qty: the number of unit to apply on the timedelta to return
:param granularity: Type of period in string, can be year, quarter, month, week, day or hour.
def get_timedelta(qty: int, granularity: Literal['hour', 'day', 'week', 'month', 'year']):
""" Helper to get a `relativedelta` object for the given quantity and interval unit.
"""
switch = {
'hour': relativedelta(hours=qty),
@ -105,7 +99,10 @@ def get_timedelta(qty, granularity):
return switch[granularity]
def start_of(value, granularity):
Granularity = Literal['year', 'quarter', 'month', 'week', 'day', 'hour']
def start_of(value: D, granularity: Granularity) -> D:
"""
Get start of a time period from a date or a datetime.
@ -144,7 +141,7 @@ def start_of(value, granularity):
return datetime.combine(result, time.min) if is_datetime else result
def end_of(value, granularity):
def end_of(value: D, granularity: Granularity) -> D:
"""
Get end of a time period from a date or a datetime.
@ -183,7 +180,7 @@ def end_of(value, granularity):
return datetime.combine(result, time.max) if is_datetime else result
def add(value, *args, **kwargs):
def add(value: D, *args, **kwargs) -> D:
"""
Return the sum of ``value`` and a :class:`relativedelta`.
@ -195,7 +192,7 @@ def add(value, *args, **kwargs):
return value + relativedelta(*args, **kwargs)
def subtract(value, *args, **kwargs):
def subtract(value: D, *args, **kwargs) -> D:
"""
Return the difference between ``value`` and a :class:`relativedelta`.
@ -206,29 +203,16 @@ def subtract(value, *args, **kwargs):
"""
return value - relativedelta(*args, **kwargs)
def json_default(obj):
"""
Properly serializes date and datetime objects.
"""
from odoo import fields
if isinstance(obj, datetime):
return fields.Datetime.to_string(obj)
if isinstance(obj, date):
return fields.Date.to_string(obj)
if isinstance(obj, lazy):
return obj._value
return ustr(obj)
def date_range(start, end, step=relativedelta(months=1)):
def date_range(start: D, end: D, step: relativedelta = relativedelta(months=1)) -> Iterator[datetime]:
"""Date range generator with a step interval.
:param date | datetime start: beginning date of the range.
:param date | datetime end: ending date of the range.
:param relativedelta step: interval of the range.
:param start: beginning date of the range.
:param end: ending date of the range.
:param step: interval of the range.
:return: a range of datetime from start to end.
:rtype: Iterator[datetime]
"""
if isinstance(start, datetime) and isinstance(end, datetime):
are_naive = start.tzinfo is None and end.tzinfo is None
are_utc = start.tzinfo == pytz.utc and end.tzinfo == pytz.utc
@ -247,6 +231,9 @@ def date_range(start, end, step=relativedelta(months=1)):
post_process = start.tzinfo.localize if start.tzinfo else lambda dt: dt
elif isinstance(start, date) and isinstance(end, date):
# FIXME: not correctly typed, and will break if the step is a fractional
# day: `relativedelta` will return a datetime, which can't be
# compared with a `date`
dt, end_dt = start, end
post_process = lambda dt: dt

View file

@ -1,10 +1,17 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from __future__ import print_function
import builtins
import math
__all__ = [
"float_compare",
"float_is_zero",
"float_repr",
"float_round",
"float_split",
"float_split_str",
]
def round(f):
# P3's builtin round differs from P2 in the following manner:
@ -22,16 +29,22 @@ def round(f):
# copysign ensures round(-0.) -> -0 *and* result is a float
return math.copysign(roundf, f)
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
assert precision_rounding is None or precision_rounding > 0,\
"precision_rounding must be positive, got %s" % precision_rounding
if precision_digits is not None:
return 10 ** -precision_digits
if precision_rounding is not None and precision_digits is None:
assert precision_rounding > 0,\
f"precision_rounding must be positive, got {precision_rounding}"
elif precision_digits is not None and precision_rounding is None:
# TODO: `int`s will also get the `is_integer` method starting from python 3.12
assert float(precision_digits).is_integer() and precision_digits >= 0,\
f"precision_digits must be a non-negative integer, got {precision_digits}"
precision_rounding = 10 ** -precision_digits
else:
msg = "exactly one of precision_digits and precision_rounding must be specified"
raise AssertionError(msg)
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None, rounding_method='HALF-UP'):
"""Return ``value`` rounded to ``precision_digits`` decimal digits,
minimizing IEEE-754 floating point representation errors, and applying
@ -43,7 +56,7 @@ def float_round(value, precision_digits=None, precision_rounding=None, rounding_
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param rounding_method: the rounding method used:
- 'HALF-UP' will round to the closest number with ties going away from zero.
@ -63,7 +76,20 @@ def float_round(value, precision_digits=None, precision_rounding=None, rounding_
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# Due to IEE754 float/double representation limits, the approximation of the
def normalize(val):
return val / rounding_factor
def denormalize(val):
return val * rounding_factor
# inverting small rounding factors reduces rounding errors
if rounding_factor < 1:
rounding_factor = float_invert(rounding_factor)
normalize, denormalize = denormalize, normalize
normalized_value = normalize(value)
# Due to IEEE-754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
@ -71,47 +97,32 @@ def float_round(value, precision_digits=None, precision_rounding=None, rounding_
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
epsilon_magnitude = math.log2(abs(normalized_value))
# `2**(epsilon_magnitude - 52)` would be the minimal size, but we increase it to be
# more tolerant of inaccuracies accumulated after multiple floating point operations
epsilon = 2**(epsilon_magnitude - 50)
normalized_value = value / rounding_factor # normalize
sign = math.copysign(1.0, normalized_value)
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-52)
match rounding_method:
case 'HALF-UP': # 0.5 rounds away from 0
result = round(normalized_value + math.copysign(epsilon, normalized_value))
case 'HALF-EVEN': # 0.5 rounds towards closest even number
integral = math.floor(normalized_value)
remainder = abs(normalized_value - integral)
is_half = abs(0.5 - remainder) < epsilon
# if is_half & integral is odd, add odd bit to make it even
result = integral + (integral & 1) if is_half else round(normalized_value)
case 'HALF-DOWN': # 0.5 rounds towards 0
result = round(normalized_value - math.copysign(epsilon, normalized_value))
case 'UP': # round to number furthest from zero
result = math.trunc(normalized_value + math.copysign(1 - epsilon, normalized_value))
case 'DOWN': # round to number closest to zero
result = math.trunc(normalized_value + math.copysign(epsilon, normalized_value))
case _:
msg = f"unknown rounding method: {rounding_method}"
raise ValueError(msg)
# TIE-BREAKING: UP/DOWN (for ceiling[resp. flooring] operations)
# When rounding the value up[resp. down], we instead subtract[resp. add] the epsilon value
# as the approximation of the real value may be slightly *above* the
# tie limit, this would result in incorrectly rounding up[resp. down] to the next number
# The math.ceil[resp. math.floor] operation is applied on the absolute value in order to
# round "away from zero" and not "towards infinity", then the sign is
# restored.
return denormalize(result)
if rounding_method == 'UP':
normalized_value -= sign*epsilon
rounded_value = math.ceil(abs(normalized_value)) * sign
elif rounding_method == 'DOWN':
normalized_value += sign*epsilon
rounded_value = math.floor(abs(normalized_value)) * sign
# TIE-BREAKING: HALF-EVEN
# We want to apply HALF-EVEN tie-breaking rules, i.e. 0.5 rounds towards closest even number.
elif rounding_method == 'HALF-EVEN':
rounded_value = math.copysign(builtins.round(normalized_value), normalized_value)
# TIE-BREAKING: HALF-DOWN
# We want to apply HALF-DOWN tie-breaking rules, i.e. 0.5 rounds towards 0.
elif rounding_method == 'HALF-DOWN':
normalized_value -= math.copysign(epsilon, normalized_value)
rounded_value = round(normalized_value)
# TIE-BREAKING: HALF-UP (for normal rounding)
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
else:
normalized_value += math.copysign(epsilon, normalized_value)
rounded_value = round(normalized_value) # round to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
@ -120,23 +131,24 @@ def float_is_zero(value, precision_digits=None, precision_rounding=None):
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
precision_rounding=precision_rounding)
return value == 0.0 or abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
@ -152,28 +164,34 @@ def float_compare(value1, value2, precision_digits=None, precision_rounding=None
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
# equal numbers round equally, so we can skip that step
# doing this after _float_check_precision to validate parameters first
if value1 == value2:
return 0
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
if float_is_zero(delta, precision_rounding=rounding_factor):
return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
given number of fractional digits. This should not be
@ -187,9 +205,10 @@ def float_repr(value, precision_digits):
# Can't use str() here because it seems to have an intrinsic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if float_is_zero(value, precision_digits=precision_digits):
value = 0.0
return "%.*f" % (precision_digits, value)
_float_repr = float_repr
def float_split_str(value, precision_digits):
"""Splits the given float 'value' in its unitary and decimal parts,
@ -217,6 +236,7 @@ def float_split_str(value, precision_digits):
value_repr = float_repr(value, precision_digits)
return tuple(value_repr.split('.')) if precision_digits else (value_repr, '')
def float_split(value, precision_digits):
""" same as float_split_str() except that it returns the unitary and decimal
parts as integers instead of strings. In case ``precision_digits`` is zero,
@ -229,6 +249,7 @@ def float_split(value, precision_digits):
return int(units), 0
return int(units), int(cents)
def json_float_round(value, precision_digits, rounding_method='HALF-UP'):
"""Not suitable for float calculations! Similar to float_repr except that it
returns a float suitable for json dump
@ -259,20 +280,44 @@ def json_float_round(value, precision_digits, rounding_method='HALF-UP'):
return float(rounded_repr)
_INVERTDICT = {
1e-1: 1e+1, 1e-2: 1e+2, 1e-3: 1e+3, 1e-4: 1e+4, 1e-5: 1e+5,
1e-6: 1e+6, 1e-7: 1e+7, 1e-8: 1e+8, 1e-9: 1e+9, 1e-10: 1e+10,
2e-1: 5e+0, 2e-2: 5e+1, 2e-3: 5e+2, 2e-4: 5e+3, 2e-5: 5e+4,
2e-6: 5e+5, 2e-7: 5e+6, 2e-8: 5e+7, 2e-9: 5e+8, 2e-10: 5e+9,
5e-1: 2e+0, 5e-2: 2e+1, 5e-3: 2e+2, 5e-4: 2e+3, 5e-5: 2e+4,
5e-6: 2e+5, 5e-7: 2e+6, 5e-8: 2e+7, 5e-9: 2e+8, 5e-10: 2e+9,
}
def float_invert(value):
"""Inverts a floating point number with increased accuracy.
:param float value: value to invert.
:param bool store: whether store the result in memory for future calls.
:return: rounded float.
"""
result = _INVERTDICT.get(value)
if result is None:
coefficient, exponent = f'{value:.15e}'.split('e')
# invert exponent by changing sign, and coefficient by dividing by its square
result = float(f'{coefficient}e{-int(exponent)}') / float(coefficient)**2
return result
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print('###!!! Rounding error: got %s , expected %s' % (result, expected))
return complex(1, 1)
return 1
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
@ -280,14 +325,15 @@ if __name__ == "__main__":
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for frac, exp, prec in zip(fractions, expecteds, precisions):
for sign in [-1,1]:
for sign in [-1, 1]:
for x in range(0, 10000, 97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
count += try_round(f, f_exp, precision_digits=prec)
stop = time.time()
count, errors = int(count.real), int(count.imag)
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64

View file

@ -1,28 +1,42 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
__all__ = ['synchronized', 'lazy_classproperty', 'lazy_property',
'classproperty', 'conditional', 'lazy']
import warnings
from inspect import getsourcefile, Parameter, signature
from functools import wraps
from json import JSONEncoder
from __future__ import annotations
import typing
from inspect import Parameter, getsourcefile, signature
from decorator import decorator
class lazy_property(object):
__all__ = [
'classproperty',
'conditional',
'lazy',
'lazy_classproperty',
'lazy_property',
'synchronized',
]
T = typing.TypeVar("T")
if typing.TYPE_CHECKING:
from collections.abc import Callable
class lazy_property(typing.Generic[T]):
""" Decorator for a lazy property of an object, i.e., an object attribute
that is determined by the result of a method call evaluated once. To
reevaluate the property, simply delete the attribute on the object, and
get it again.
"""
def __init__(self, fget):
def __init__(self, fget: Callable[[typing.Any], T]):
assert not fget.__name__.startswith('__'),\
"lazy_property does not support mangled names"
self.fget = fget
def __get__(self, obj, cls):
@typing.overload
def __get__(self, obj: None, cls: typing.Any, /) -> typing.Any: ...
@typing.overload
def __get__(self, obj: object, cls: typing.Any, /) -> T: ...
def __get__(self, obj, cls, /):
if obj is None:
return self
value = self.fget(obj)
@ -34,7 +48,7 @@ class lazy_property(object):
return self.fget.__doc__
@staticmethod
def reset_all(obj):
def reset_all(obj) -> None:
""" Reset all lazy properties on the instance `obj`. """
cls = type(obj)
obj_dict = vars(obj)
@ -42,12 +56,6 @@ class lazy_property(object):
if isinstance(getattr(cls, name, None), lazy_property):
obj_dict.pop(name)
class lazy_classproperty(lazy_property):
""" Similar to :class:`lazy_property`, but for classes. """
def __get__(self, obj, cls):
val = self.fget(cls)
setattr(cls, self.fget.__name__, val)
return val
def conditional(condition, decorator):
""" Decorator for a conditionally applied decorator.
@ -63,7 +71,8 @@ def conditional(condition, decorator):
else:
return lambda fn: fn
def filter_kwargs(func, kwargs):
def filter_kwargs(func: Callable, kwargs: dict[str, typing.Any]) -> dict[str, typing.Any]:
""" Filter the given keyword arguments to only return the kwargs
that binds to the function's signature.
"""
@ -80,19 +89,22 @@ def filter_kwargs(func, kwargs):
return {key: kwargs[key] for key in kwargs if key not in leftovers}
def synchronized(lock_attr='_lock'):
def synchronized(lock_attr: str = '_lock'):
@decorator
def locked(func, inst, *args, **kwargs):
with getattr(inst, lock_attr):
return func(inst, *args, **kwargs)
return locked
locked = synchronized()
def frame_codeinfo(fframe, back=0):
""" Return a (filename, line) pair for a previous frame .
@return (filename, lineno) where lineno is either int or string==''
"""
try:
if not fframe:
return "<unknown>", ''
@ -107,33 +119,25 @@ def frame_codeinfo(fframe, back=0):
except Exception:
return "<unknown>", ''
def compose(a, b):
""" Composes the callables ``a`` and ``b``. ``compose(a, b)(*args)`` is
equivalent to ``a(b(*args))``.
Can be used as a decorator by partially applying ``a``::
class classproperty(typing.Generic[T]):
def __init__(self, fget: Callable[[typing.Any], T]) -> None:
self.fget = classmethod(fget)
@partial(compose, a)
def b():
...
"""
warnings.warn(
"Since 16.0, just byo or use a dedicated library like funcy.",
DeprecationWarning,
stacklevel=2,
)
@wraps(b)
def wrapper(*args, **kwargs):
return a(b(*args, **kwargs))
return wrapper
class _ClassProperty(property):
def __get__(self, cls, owner):
def __get__(self, cls, owner: type | None = None, /) -> T:
return self.fget.__get__(None, owner)()
def classproperty(func):
return _ClassProperty(classmethod(func))
@property
def __doc__(self):
return self.fget.__doc__
class lazy_classproperty(classproperty[T], typing.Generic[T]):
""" Similar to :class:`lazy_property`, but for classes. """
def __get__(self, cls, owner: type | None = None, /) -> T:
val = super().__get__(cls, owner)
setattr(owner, self.fget.__name__, val)
return val
class lazy(object):

View file

@ -0,0 +1,104 @@
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Literal, Optional, Sequence
from babel import lists
from odoo.tools.misc import babel_locale_parse, get_lang
if TYPE_CHECKING:
import odoo.api
XPG_LOCALE_RE = re.compile(
r"""^
([a-z]+) # language
(_[A-Z\d]+)? # maybe _territory
# no support for .codeset (we don't use that in Odoo)
(@.+)? # maybe @modifier
$""",
re.VERBOSE,
)
def format_list(
env: odoo.api.Environment,
lst: Sequence[str],
style: Literal["standard", "standard-short", "or", "or-short", "unit", "unit-short", "unit-narrow"] = "standard",
lang_code: Optional[str] = None,
) -> str:
"""
Format the items in `lst` as a list in a locale-dependent manner with the chosen style.
The available styles are defined by babel according to the Unicode TR35-49 spec:
* standard:
A typical 'and' list for arbitrary placeholders.
e.g. "January, February, and March"
* standard-short:
A short version of an 'and' list, suitable for use with short or abbreviated placeholder values.
e.g. "Jan., Feb., and Mar."
* or:
A typical 'or' list for arbitrary placeholders.
e.g. "January, February, or March"
* or-short:
A short version of an 'or' list.
e.g. "Jan., Feb., or Mar."
* unit:
A list suitable for wide units.
e.g. "3 feet, 7 inches"
* unit-short:
A list suitable for short units
e.g. "3 ft, 7 in"
* unit-narrow:
A list suitable for narrow units, where space on the screen is very limited.
e.g. "3 7″"
See https://www.unicode.org/reports/tr35/tr35-49/tr35-general.html#ListPatterns for more details.
:param env: the current environment.
:param lst: the sequence of items to format into a list.
:param style: the style to format the list with.
:param lang_code: the locale (i.e. en_US).
:return: the formatted list.
"""
locale = babel_locale_parse(lang_code or get_lang(env).code)
# Some styles could be unavailable for the chosen locale
if style not in locale.list_patterns:
style = "standard"
return lists.format_list(lst, style, locale)
def py_to_js_locale(locale: str) -> str:
"""
Converts a locale from Python to JavaScript format.
Most of the time the conversion is simply to replace _ with -.
Example: fr_BE fr-BE
Exception: Serbian can be written in both Latin and Cyrillic scripts
interchangeably, therefore its locale includes a special modifier
to indicate which script to use.
Example: sr@latin sr-Latn
BCP 47 (JS):
language[-extlang][-script][-region][-variant][-extension][-privateuse]
https://www.ietf.org/rfc/rfc5646.txt
XPG syntax (Python):
language[_territory][.codeset][@modifier]
https://www.gnu.org/software/libc/manual/html_node/Locale-Names.html
:param locale: The locale formatted for use on the Python-side.
:return: The locale formatted for use on the JavaScript-side.
"""
match_ = XPG_LOCALE_RE.match(locale)
if not match_:
return locale
language, territory, modifier = match_.groups()
subtags = [language]
if modifier == "@Cyrl":
subtags.append("Cyrl")
elif modifier == "@latin":
subtags.append("Latn")
if territory:
subtags.append(territory.removeprefix("_"))
return "-".join(subtags)

View file

@ -3,6 +3,7 @@
import base64
import binascii
import io
from typing import Tuple, Union
from PIL import Image, ImageOps
# We can preload Ico too because it is considered safe
@ -16,9 +17,12 @@ from random import randrange
from odoo.exceptions import UserError
from odoo.tools.misc import DotDict
from odoo.tools.translate import _
from odoo.tools.translate import LazyTranslate
__all__ = ["image_process"]
_lt = LazyTranslate('base')
# Preload PIL with the minimal subset of image formats we need
Image.preinit()
Image._initialized = 2
@ -53,16 +57,15 @@ EXIF_TAG_ORIENTATION_TO_TRANSPOSE_METHODS = { # Initial side on 1st row/col:
IMAGE_MAX_RESOLUTION = 50e6
class ImageProcess():
class ImageProcess:
def __init__(self, source, verify_resolution=True):
"""Initialize the `source` image for processing.
"""Initialize the ``source`` image for processing.
:param source: the original image binary
:param bytes source: the original image binary
No processing will be done if the `source` is falsy or if
the image is SVG.
:param verify_resolution: if True, make sure the original image size is not
excessive before starting to process it. The max allowed resolution is
defined by `IMAGE_MAX_RESOLUTION`.
@ -82,7 +85,7 @@ class ImageProcess():
try:
self.image = Image.open(io.BytesIO(source))
except (OSError, binascii.Error):
raise UserError(_("This file could not be decoded as an image file."))
raise UserError(_lt("This file could not be decoded as an image file."))
# Original format has to be saved before fixing the orientation or
# doing any other operations because the information will be lost on
@ -93,18 +96,15 @@ class ImageProcess():
w, h = self.image.size
if verify_resolution and w * h > IMAGE_MAX_RESOLUTION:
raise UserError(_("Too large image (above %sMpx), reduce the image size.", str(IMAGE_MAX_RESOLUTION / 1e6)))
raise UserError(_lt("Too large image (above %sMpx), reduce the image size.", str(IMAGE_MAX_RESOLUTION / 1e6)))
def image_quality(self, quality=0, output_format=''):
"""Return the image resulting of all the image processing
operations that have been applied previously.
Return False if the initialized `image` was falsy, and return
the initialized `image` without change if it was SVG.
Also return the initialized `image` if no operations have been applied
and the `output_format` is the same as the original format and the
quality is not specified.
The source is returned as-is if it's an SVG, or if no operations have
been applied, the `output_format` is the same as the original format,
and the quality is not specified.
:param int quality: quality setting to apply. Default to 0.
@ -113,11 +113,12 @@ class ImageProcess():
was changed, otherwise the original image is returned.
- for PNG: set falsy to prevent conversion to a WEB palette.
- for other formats: no effect.
:param str output_format: the output format. Can be PNG, JPEG, GIF, or ICO.
Default to the format of the original image. BMP is converted to
PNG, other formats than those mentioned above are converted to JPEG.
:return: image
:rtype: bytes or False
:param str output_format: Can be PNG, JPEG, GIF, or ICO.
Default to the format of the original image if a valid output format,
otherwise BMP is converted to PNG and the rest are converted to JPEG.
:return: the final image, or ``False`` if the original ``source`` was falsy.
:rtype: bytes | False
"""
if not self.image:
return self.source
@ -158,11 +159,12 @@ class ImageProcess():
return self.source
return output_bytes
def resize(self, max_width=0, max_height=0):
def resize(self, max_width=0, max_height=0, expand=False):
"""Resize the image.
The image is never resized above the current image size. This method is
only to create a smaller version of the image.
The image is not resized above the current image size, unless the expand
parameter is True. This method is used by default to create smaller versions
of the image.
The current ratio is preserved. To change the ratio, see `crop_resize`.
@ -174,6 +176,7 @@ class ImageProcess():
:param int max_width: max width
:param int max_height: max height
:param bool expand: whether or not the image size can be increased
:return: self to allow chaining
:rtype: ImageProcess
"""
@ -181,6 +184,10 @@ class ImageProcess():
w, h = self.image.size
asked_width = max_width or (w * max_height) // h
asked_height = max_height or (h * max_width) // w
if expand and (asked_width > w or asked_height > h):
self.image = self.image.resize((asked_width, asked_height))
self.operationsCount += 1
return self
if asked_width != w or asked_height != h:
self.image.thumbnail((asked_width, asked_height), Resampling.LANCZOS)
if self.image.width != w or self.image.height != h:
@ -246,27 +253,43 @@ class ImageProcess():
return self.resize(max_width, max_height)
def colorize(self):
"""Replace the transparent background by a random color.
def colorize(self, color=None):
"""Replace the transparent background by a given color, or by a random one.
:param tuple color: RGB values for the color to use
:return: self to allow chaining
:rtype: ImageProcess
"""
if color is None:
color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))
if self.image:
original = self.image
color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))
self.image = Image.new('RGB', original.size)
self.image.paste(color, box=(0, 0) + original.size)
self.image.paste(original, mask=original)
self.operationsCount += 1
return self
def add_padding(self, padding):
"""Expand the image size by adding padding around the image
def image_process(source, size=(0, 0), verify_resolution=False, quality=0, crop=None, colorize=False, output_format=''):
:param int padding: thickness of the padding
:return: self to allow chaining
:rtype: ImageProcess
"""
if self.image:
img_width, img_height = self.image.size
self.image = self.image.resize((img_width - 2 * padding, img_height - 2 * padding))
self.image = ImageOps.expand(self.image, border=padding)
self.operationsCount += 1
return self
def image_process(source, size=(0, 0), verify_resolution=False, quality=0, expand=False, crop=None, colorize=False, output_format='', padding=False):
"""Process the `source` image by executing the given operations and
return the result image.
"""
if not source or ((not size or (not size[0] and not size[1])) and not verify_resolution and not quality and not crop and not colorize and not output_format):
if not source or ((not size or (not size[0] and not size[1])) and not verify_resolution and not quality and not crop and not colorize and not output_format and not padding):
# for performance: don't do anything if the image is falsy or if
# no operations have been requested
return source
@ -282,9 +305,11 @@ def image_process(source, size=(0, 0), verify_resolution=False, quality=0, crop=
center_y = 1
image.crop_resize(max_width=size[0], max_height=size[1], center_x=center_x, center_y=center_y)
else:
image.resize(max_width=size[0], max_height=size[1])
image.resize(max_width=size[0], max_height=size[1], expand=expand)
if padding:
image.add_padding(padding)
if colorize:
image.colorize()
image.colorize(colorize if isinstance(colorize, tuple) else None)
return image.image_quality(quality=quality, output_format=output_format)
@ -397,30 +422,28 @@ def binary_to_image(source):
try:
return Image.open(io.BytesIO(source))
except (OSError, binascii.Error):
raise UserError(_("This file could not be decoded as an image file."))
raise UserError(_lt("This file could not be decoded as an image file."))
def base64_to_image(base64_source):
def base64_to_image(base64_source: Union[str, bytes]) -> Image:
"""Return a PIL image from the given `base64_source`.
:param base64_source: the image base64 encoded
:type base64_source: string or bytes
:rtype: ~PIL.Image.Image
:raise: UserError if the base64 is incorrect or the image can't be identified by PIL
"""
try:
return Image.open(io.BytesIO(base64.b64decode(base64_source)))
except (OSError, binascii.Error):
raise UserError(_("This file could not be decoded as an image file."))
raise UserError(_lt("This file could not be decoded as an image file."))
def image_apply_opt(image, output_format, **params):
"""Return the given PIL `image` using `params`.
def image_apply_opt(image: Image, output_format: str, **params) -> bytes:
"""Return the serialization of the provided `image` to `output_format`
using `params`.
:type image: ~PIL.Image.Image
:param str output_format: :meth:`~PIL.Image.Image.save`'s ``format`` parameter
:param image: the image to encode
:param output_format: :meth:`~PIL.Image.Image.save`'s ``format`` parameter
:param dict params: params to expand when calling :meth:`~PIL.Image.Image.save`
:return: the image formatted
:rtype: bytes
"""
if output_format == 'JPEG' and image.mode not in ['1', 'L', 'RGB']:
image = image.convert("RGB")
@ -452,7 +475,7 @@ def get_webp_size(source):
:return: (width, height) tuple, or None if not supported
"""
if not (source[0:4] == b'RIFF' and source[8:15] == b'WEBPVP8'):
raise UserError(_("This file is not a webp file."))
raise UserError(_lt("This file is not a webp file."))
vp8_type = source[15]
if vp8_type == 0x20: # 0x20 = ' '
@ -506,23 +529,22 @@ def is_image_size_above(base64_source_1, base64_source_2):
return image_source.width > image_target.width or image_source.height > image_target.height
def image_guess_size_from_field_name(field_name):
def image_guess_size_from_field_name(field_name: str) -> Tuple[int, int]:
"""Attempt to guess the image size based on `field_name`.
If it can't be guessed or if it is a custom field: return (0, 0) instead.
:param str field_name: the name of a field
:param field_name: the name of a field
:return: the guessed size
:rtype: tuple (width, height)
"""
if field_name == 'image':
return (1024, 1024)
if field_name.startswith('x_'):
return (0, 0)
try:
suffix = int(field_name.split('_')[-1])
suffix = int(field_name.rsplit('_', 1)[-1])
except ValueError:
return (0, 0)
return 0, 0
if suffix < 16:
# If the suffix is less than 16, it's probably not the size
@ -531,7 +553,7 @@ def image_guess_size_from_field_name(field_name):
return (suffix, suffix)
def image_data_uri(base64_source):
def image_data_uri(base64_source: bytes) -> str:
"""This returns data URL scheme according RFC 2397
(https://tools.ietf.org/html/rfc2397) for all kind of supported images
(PNG, GIF, JPG and SVG), defaulting on PNG type if not mimetype detected.

View file

@ -127,7 +127,7 @@ EXPORT_FCT_RE = re.compile(r"""
(?P<space>\s*) # space and empty line
export\s+ # export
(?P<type>(async\s+)?function)\s+ # async function or function
(?P<identifier>\w+) # name the function
(?P<identifier>[\w$]+) # name of the function
""", re.MULTILINE | re.VERBOSE)
@ -156,7 +156,7 @@ EXPORT_CLASS_RE = re.compile(r"""
(?P<space>\s*) # space and empty line
export\s+ # export
(?P<type>class)\s+ # class
(?P<identifier>\w+) # name of the class
(?P<identifier>[\w$]+) # name of the class
""", re.MULTILINE | re.VERBOSE)
@ -181,7 +181,7 @@ EXPORT_FCT_DEFAULT_RE = re.compile(r"""
(?P<space>\s*) # space and empty line
export\s+default\s+ # export default
(?P<type>(async\s+)?function)\s+ # async function or function
(?P<identifier>\w+) # name of the function
(?P<identifier>[\w$]+) # name of the function
""", re.MULTILINE | re.VERBOSE)
@ -210,7 +210,7 @@ EXPORT_CLASS_DEFAULT_RE = re.compile(r"""
(?P<space>\s*) # space and empty line
export\s+default\s+ # export default
(?P<type>class)\s+ # class
(?P<identifier>\w+) # name of the class or the function
(?P<identifier>[\w$]+) # name of the class or the function
""", re.MULTILINE | re.VERBOSE)
@ -234,7 +234,7 @@ EXPORT_VAR_RE = re.compile(r"""
(?P<space>\s*) # space and empty line
export\s+ # export
(?P<type>let|const|var)\s+ # let or cont or var
(?P<identifier>\w+) # variable name
(?P<identifier>[\w$]+) # variable name
""", re.MULTILINE | re.VERBOSE)
@ -260,7 +260,7 @@ EXPORT_DEFAULT_VAR_RE = re.compile(r"""
(?P<space>\s*) # space and empty line
export\s+default\s+ # export default
(?P<type>let|const|var)\s+ # let or const or var
(?P<identifier>\w+)\s* # variable name
(?P<identifier>[\w$]+)\s* # variable name
""", re.MULTILINE | re.VERBOSE)
@ -284,7 +284,7 @@ EXPORT_OBJECT_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s* # export
(?P<object>{[\w\s,]+}) # { a, b, c as x, ... }
(?P<object>{[\w$\s,]+}) # { a, b, c as x, ... }
""", re.MULTILINE | re.VERBOSE)
@ -310,7 +310,7 @@ EXPORT_FROM_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s* # export
(?P<object>{[\w\s,]+})\s* # { a, b, c as x, ... }
(?P<object>{[\w$\s,]+})\s* # { a, b, c as x, ... }
from\s* # from
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path.js")
""", re.MULTILINE | re.VERBOSE)
@ -366,7 +366,7 @@ EXPORT_DEFAULT_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s+default # export default
(\s+\w+\s*=)? # something (optional)
(\s+[\w$]+\s*=)? # something (optional)
""", re.MULTILINE | re.VERBOSE)
@ -402,7 +402,7 @@ IMPORT_BASIC_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
import\s+ # import
(?P<object>{[\s\w,]+})\s* # { a, b, c as x, ... }
(?P<object>{[\s\w$,]+})\s* # { a, b, c as x, ... }
from\s* # from
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path")
""", re.MULTILINE | re.VERBOSE)
@ -429,7 +429,7 @@ IMPORT_LEGACY_DEFAULT_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
import\s+ # import
(?P<identifier>\w+)\s* # default variable name
(?P<identifier>[\w$]+)\s* # default variable name
from\s* # from
(?P<path>(?P<quote>["'`])([^@\."'`][^"'`]*)(?P=quote)) # legacy alias file ("addon_name.module_name" or "some/path")
""", re.MULTILINE | re.VERBOSE)
@ -456,7 +456,7 @@ IMPORT_DEFAULT = re.compile(r"""
^
(?P<space>\s*) # space and empty line
import\s+ # import
(?P<identifier>\w+)\s* # default variable name
(?P<identifier>[\w$]+)\s* # default variable name
from\s* # from
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path")
""", re.MULTILINE | re.VERBOSE)
@ -483,8 +483,8 @@ IMPORT_DEFAULT_AND_NAMED_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
import\s+ # import
(?P<default_export>\w+)\s*,\s* # default variable name,
(?P<named_exports>{[\s\w,]+})\s* # { a, b, c as x, ... }
(?P<default_export>[\w$]+)\s*,\s* # default variable name,
(?P<named_exports>{[\s\w$,]+})\s* # { a, b, c as x, ... }
from\s* # from
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path")
""", re.MULTILINE | re.VERBOSE)
@ -552,11 +552,11 @@ def convert_relative_require(url, dependencies, content):
IMPORT_STAR = re.compile(r"""
^(?P<space>\s*) # indentation
import\s+\*\s+as\s+ # import * as
(?P<identifier>\w+) # alias
\s*from\s* # from
(?P<path>[^;\n]+) # path
^(?P<space>\s*) # indentation
import\s+\*\s+as\s+ # import * as
(?P<identifier>[\w$]+) # alias
\s*from\s* # from
(?P<path>[^;\n]+) # path
""", re.MULTILINE | re.VERBOSE)
@ -576,13 +576,13 @@ def convert_star_import(content):
IMPORT_DEFAULT_AND_STAR = re.compile(r"""
^(?P<space>\s*) # indentation
import\s+ # import
(?P<default_export>\w+)\s*,\s* # default export name,
\*\s+as\s+ # * as
(?P<named_exports_alias>\w+) # alias
\s*from\s* # from
(?P<path>[^;\n]+) # path
^(?P<space>\s*) # indentation
import\s+ # import
(?P<default_export>[\w$]+)\s*,\s* # default export name,
\*\s+as\s+ # * as
(?P<named_exports_alias>[\w$]+) # alias
\s*from\s* # from
(?P<path>[^;\n]+) # path
""", re.MULTILINE | re.VERBOSE)
@ -662,23 +662,31 @@ def relative_path_to_module_path(url, path_rel):
ODOO_MODULE_RE = re.compile(r"""
\s* # some starting space
\/(\*|\/).*\s* # // or /*
@odoo-module # @odoo-module
(\s+alias=(?P<alias>[\w.]+))? # alias=web.AbstractAction (optional)
(\s+default=(?P<default>False|false|0))? # default=False or false or 0 (optional)
\s* # starting white space
\/(\*|\/) # /* or //
.* # any comment in between (optional)
@odoo-module # '@odoo-module' statement
(?P<ignore>\s+ignore)? # module in src | tests which should not be transpiled (optional)
(\s+alias=(?P<alias>[^\s*]+))? # alias (e.g. alias=web.Widget, alias=@web/../tests/utils) (optional)
(\s+default=(?P<default>[\w$]+))? # no implicit default export (e.g. default=false) (optional)
""", re.VERBOSE)
def is_odoo_module(content):
def is_odoo_module(url, content):
"""
Detect if the file is a native odoo module.
We look for a comment containing @odoo-module.
:param url:
:param content: source code
:return: is this a odoo module that need transpilation ?
"""
result = ODOO_MODULE_RE.match(content)
if result and result['ignore']:
return False
addon = url.split('/')[1]
if url.startswith(f'/{addon}/static/src') or url.startswith(f'/{addon}/static/tests'):
return True
return bool(result)

View file

@ -1,8 +1,11 @@
# -*- coding: utf-8 -*-
from datetime import date, datetime
import json as json_
import re
import markupsafe
from .func import lazy
from .misc import ReadonlyDict
JSON_SCRIPTSAFE_MAPPER = {
'&': r'\u0026',
@ -53,3 +56,18 @@ class JSON:
"""
return _ScriptSafe(json_.dumps(*args, **kwargs))
scriptsafe = JSON()
def json_default(obj):
from odoo import fields # noqa: PLC0415
if isinstance(obj, datetime):
return fields.Datetime.to_string(obj)
if isinstance(obj, date):
return fields.Date.to_string(obj)
if isinstance(obj, lazy):
return obj._value
if isinstance(obj, ReadonlyDict):
return dict(obj)
if isinstance(obj, bytes):
return obj.decode()
return str(obj)

View file

@ -1,57 +1,60 @@
# -*- coding: utf-8 -*-
import collections
import threading
import typing
from collections.abc import Iterable, Iterator, MutableMapping
from .func import locked
__all__ = ['LRU']
class LRU(object):
K = typing.TypeVar('K')
V = typing.TypeVar('V')
class LRU(MutableMapping[K, V], typing.Generic[K, V]):
"""
Implementation of a length-limited O(1) LRU map.
Original Copyright 2003 Josiah Carlson, later rebuilt on OrderedDict.
Original Copyright 2003 Josiah Carlson, later rebuilt on OrderedDict and added typing.
"""
def __init__(self, count, pairs=()):
def __init__(self, count: int, pairs: Iterable[tuple[K, V]] = ()):
self._lock = threading.RLock()
self.count = max(count, 1)
self.d = collections.OrderedDict()
self.d: collections.OrderedDict[K, V] = collections.OrderedDict()
for key, value in pairs:
self[key] = value
@locked
def __contains__(self, obj):
def __contains__(self, obj: K) -> bool:
return obj in self.d
def get(self, obj, val=None):
try:
return self[obj]
except KeyError:
return val
@locked
def __getitem__(self, obj):
def __getitem__(self, obj: K) -> V:
a = self.d[obj]
self.d.move_to_end(obj, last=False)
return a
@locked
def __setitem__(self, obj, val):
def __setitem__(self, obj: K, val: V):
self.d[obj] = val
self.d.move_to_end(obj, last=False)
while len(self.d) > self.count:
self.d.popitem(last=True)
@locked
def __delitem__(self, obj):
def __delitem__(self, obj: K):
del self.d[obj]
@locked
def __len__(self):
def __len__(self) -> int:
return len(self.d)
@locked
def pop(self,key):
def __iter__(self) -> Iterator[K]:
return iter(self.d)
@locked
def pop(self, key: K) -> V:
return self.d.pop(key)
@locked

View file

@ -3,11 +3,11 @@
import base64
import collections
import itertools
import logging
import random
import re
import socket
import threading
import time
import email.utils
from email.utils import getaddresses as orig_getaddresses
@ -20,10 +20,25 @@ from lxml import etree, html
from lxml.html import clean, defs
from werkzeug import urls
import odoo
from odoo.loglevels import ustr
from odoo.tools import misc
__all__ = [
"email_domain_extract",
"email_domain_normalize",
"email_normalize",
"email_normalize_all",
"email_split",
"encapsulate_email",
"formataddr",
"html2plaintext",
"html_normalize",
"html_sanitize",
"is_html_empty",
"parse_contact_from_email",
"plaintext2html",
"single_email_re",
]
_logger = logging.getLogger(__name__)
@ -44,12 +59,13 @@ else:
safe_attrs = defs.safe_attrs | frozenset(
['style',
'data-o-mail-quote', 'data-o-mail-quote-node', # quote detection
'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translation-initial-sha', 'data-oe-nodeid',
'data-last-history-steps', 'data-oe-protected', 'data-oe-transient-content', 'data-width', 'data-height', 'data-scale-x', 'data-scale-y', 'data-x', 'data-y',
'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translation-source-sha', 'data-oe-nodeid',
'data-last-history-steps', 'data-oe-protected', 'data-embedded', 'data-embedded-editable', 'data-embedded-props', 'data-oe-version',
'data-oe-transient-content', 'data-behavior-props', 'data-prop-name', 'data-width', 'data-height', 'data-scale-x', 'data-scale-y', 'data-x', 'data-y', # legacy editor
'data-oe-role', 'data-oe-aria-label',
'data-publish', 'data-id', 'data-res_id', 'data-interval', 'data-member_id', 'data-scroll-background-ratio', 'data-view-id',
'data-class', 'data-mimetype', 'data-original-src', 'data-original-id', 'data-gl-filter', 'data-quality', 'data-resize-width',
'data-shape', 'data-shape-colors', 'data-file-name', 'data-original-mimetype',
'data-behavior-props', 'data-prop-name', # knowledge commands
'data-mimetype-before-conversion',
])
SANITIZE_TAGS = {
@ -68,10 +84,12 @@ class _Cleaner(clean.Cleaner):
_style_whitelist = [
'font-size', 'font-family', 'font-weight', 'font-style', 'background-color', 'color', 'text-align',
'line-height', 'letter-spacing', 'text-transform', 'text-decoration', 'text-decoration', 'opacity',
'float', 'vertical-align', 'display',
'float', 'vertical-align', 'display', 'object-fit',
'padding', 'padding-top', 'padding-left', 'padding-bottom', 'padding-right',
'margin', 'margin-top', 'margin-left', 'margin-bottom', 'margin-right',
'white-space',
# appearance
'background-image', 'background-position', 'background-size', 'background-repeat', 'background-origin',
# box model
'border', 'border-color', 'border-radius', 'border-style', 'border-width', 'border-top', 'border-bottom',
'height', 'width', 'max-width', 'min-width', 'min-height',
@ -86,6 +104,7 @@ class _Cleaner(clean.Cleaner):
strip_classes = False
sanitize_style = False
conditional_comments = True
def __call__(self, doc):
super(_Cleaner, self).__call__(doc)
@ -118,6 +137,24 @@ class _Cleaner(clean.Cleaner):
else:
del el.attrib['style']
def kill_conditional_comments(self, doc):
"""Override the default behavior of lxml.
https://github.com/lxml/lxml/blob/e82c9153c4a7d505480b94c60b9a84d79d948efb/src/lxml/html/clean.py#L501-L510
In some use cases, e.g. templates used for mass mailing,
we send emails containing conditional comments targeting Microsoft Outlook,
to give special styling instructions.
https://github.com/odoo/odoo/pull/119325/files#r1301064789
Within these conditional comments, unsanitized HTML can lie.
However, in modern browser, these comments are considered as simple comments,
their content is not executed.
https://caniuse.com/sr_ie-features
"""
if self.conditional_comments:
super().kill_conditional_comments(doc)
def tag_quote(el):
def _create_new_node(tag, text, tail=None, attrs=None):
@ -209,13 +246,23 @@ def tag_quote(el):
# remove single node
el.set('data-o-mail-quote-node', '1')
el.set('data-o-mail-quote', '1')
if el.getparent() is not None and (el.getparent().get('data-o-mail-quote') or el.getparent().get('data-o-mail-quote-container')) and not el.getparent().get('data-o-mail-quote-node'):
el.set('data-o-mail-quote', '1')
if el.getparent() is not None and not el.getparent().get('data-o-mail-quote-node'):
if el.getparent().get('data-o-mail-quote'):
el.set('data-o-mail-quote', '1')
# only quoting the elements following the first quote in the container
# avoids issues with repeated calls to html_normalize
elif el.getparent().get('data-o-mail-quote-container'):
if (first_sibling_quote := el.getparent().find("*[@data-o-mail-quote]")) is not None:
siblings = el.getparent().getchildren()
quote_index = siblings.index(first_sibling_quote)
element_index = siblings.index(el)
if quote_index < element_index:
el.set('data-o-mail-quote', '1')
if el.getprevious() is not None and el.getprevious().get('data-o-mail-quote') and not el.text_content().strip():
el.set('data-o-mail-quote', '1')
def html_normalize(src, filter_callback=None):
def html_normalize(src, filter_callback=None, output_method="html"):
""" Normalize `src` for storage as an html field value.
The string is parsed as an html tag soup, made valid, then decorated for
@ -228,27 +275,27 @@ def html_normalize(src, filter_callback=None):
:param filter_callback: optional callable taking a single `etree._Element`
document parameter, to be called during normalization in order to
filter the output document
:param output_method: defines the output method to pass to `html.tostring`.
It defaults to 'html', but can also be 'xml' for xhtml output.
"""
if not src:
return src
src = ustr(src, errors='replace')
# html: remove encoding attribute inside tags
doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
src = doctype.sub(u"", src)
src = re.sub(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', "", src, flags=re.IGNORECASE | re.DOTALL)
src = src.replace('--!>', '-->')
src = re.sub(r'(<!-->|<!--->)', '<!-- -->', src)
# On the specific case of Outlook desktop it adds unnecessary '<o:.*></o:.*>' tags which are parsed
# in '<p></p>' which may alter the appearance (eg. spacing) of the mail body
src = re.sub(r'</?o:.*?>', '', src)
try:
src = src.replace('--!>', '-->')
src = re.sub(r'(<!-->|<!--->)', '<!-- -->', src)
# On the specific case of Outlook desktop it adds unnecessary '<o:.*></o:.*>' tags which are parsed
# in '<p></p>' which may alter the appearance (eg. spacing) of the mail body
src = re.sub(r'</?o:.*?>', '', src)
doc = html.fromstring(src)
except etree.ParserError as e:
# HTML comment only string, whitespace only..
if 'empty' in str(e):
return u""
return ""
raise
# perform quote detection before cleaning and class removal
@ -259,7 +306,7 @@ def html_normalize(src, filter_callback=None):
if filter_callback:
doc = filter_callback(doc)
src = html.tostring(doc, encoding='unicode')
src = html.tostring(doc, encoding='unicode', method=output_method)
# this is ugly, but lxml/etree tostring want to put everything in a
# 'div' that breaks the editor -> remove that
@ -272,7 +319,7 @@ def html_normalize(src, filter_callback=None):
return src
def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=False, sanitize_style=False, sanitize_form=True, strip_style=False, strip_classes=False):
def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=False, sanitize_style=False, sanitize_form=True, sanitize_conditional_comments=True, strip_style=False, strip_classes=False, output_method="html"):
if not src:
return src
@ -286,6 +333,7 @@ def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=Fals
'forms': sanitize_form, # True = remove form tags
'remove_unknown_tags': False,
'comments': False,
'conditional_comments': sanitize_conditional_comments, # True = remove conditional comments
'processing_instructions': False
}
if sanitize_tags:
@ -311,7 +359,7 @@ def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=Fals
return doc
try:
sanitized = html_normalize(src, filter_callback=sanitize_handler)
sanitized = html_normalize(src, filter_callback=sanitize_handler, output_method=output_method)
except etree.ParserError:
if not silent:
raise
@ -329,7 +377,8 @@ def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=Fals
# HTML/Text management
# ----------------------------------------------------------
URL_REGEX = r'(\bhref=[\'"](?!mailto:|tel:|sms:)([^\'"]+)[\'"])'
URL_SKIP_PROTOCOL_REGEX = r'mailto:|tel:|sms:'
URL_REGEX = rf'''(\bhref=['"](?!{URL_SKIP_PROTOCOL_REGEX})([^'"]+)['"])'''
TEXT_URL_REGEX = r'https?://[\w@:%.+&~#=/-]+(?:\?\S+)?'
# retrieve inner content of the link
HTML_TAG_URL_REGEX = URL_REGEX + r'([^<>]*>([^<>]+)<\/)?'
@ -392,20 +441,24 @@ def create_link(url, label):
return f'<a href="{url}" target="_blank" rel="noreferrer noopener">{label}</a>'
def html2plaintext(html, body_id=None, encoding='utf-8'):
def html2plaintext(html, body_id=None, encoding='utf-8', include_references=True):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
:param include_references: If False, numbered references and
URLs for links and images will not be included.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <peter@fry-it.com>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
if not html.strip():
if not (html and html.strip()):
return ''
if isinstance(html, bytes):
html = html.decode(encoding)
else:
assert isinstance(html, str), f"expected str got {html.__class__.__name__}"
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
@ -416,28 +469,25 @@ def html2plaintext(html, body_id=None, encoding='utf-8'):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
linkrefs = itertools.count(1)
if include_references:
for link in tree.findall('.//a'):
if url := link.get('href'):
link.tag = 'span'
link.text = f'{link.text} [{next(linkrefs)}]'
url_index.append(url)
for img in tree.findall('.//img'):
src = img.get('src')
if src:
i += 1
img.tag = 'span'
if src.startswith('data:'):
img_name = None # base64 image
else:
img_name = re.search(r'[^/]+(?=\.[a-zA-Z]+(?:\?|$))', src)
img.text = '%s [%s]' % (img_name.group(0) if img_name else 'Image', i)
url_index.append(src)
for img in tree.findall('.//img'):
if src := img.get('src'):
img.tag = 'span'
if src.startswith('data:'):
img_name = None # base64 image
else:
img_name = re.search(r'[^/]+(?=\.[a-zA-Z]+(?:\?|$))', src)
img.text = '%s [%s]' % (img_name[0] if img_name else 'Image', next(linkrefs))
url_index.append(src)
html = ustr(etree.tostring(tree, encoding=encoding))
html = etree.tostring(tree, encoding="unicode")
# \r char is converted into &#13;, must remove it
html = html.replace('&#13;', '')
@ -461,10 +511,10 @@ def html2plaintext(html, body_id=None, encoding='utf-8'):
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
if url_index:
html += '\n\n'
for i, url in enumerate(url_index, start=1):
html += f'[{i}] {url}\n'
return html.strip()
@ -482,7 +532,8 @@ def plaintext2html(text, container_tag=None):
embedded into a ``<div>``
:rtype: markupsafe.Markup
"""
text = misc.html_escape(ustr(text))
assert isinstance(text, str)
text = misc.html_escape(text)
# 1. replace \n and \r
text = re.sub(r'(\r\n|\r|\n)', '<br/>', text)
@ -508,7 +559,7 @@ def append_content_to_html(html, content, plaintext=True, preserve=False, contai
""" Append extra content at the end of an HTML snippet, trying
to locate the end of the HTML document (</body>, </html>, or
EOF), and converting the provided content in html unless ``plaintext``
is False.
is ``False``.
Content conversion can be done in two ways:
@ -529,17 +580,16 @@ def append_content_to_html(html, content, plaintext=True, preserve=False, contai
:param str container_tag: tag to wrap the content into, defaults to `div`.
:rtype: markupsafe.Markup
"""
html = ustr(html)
if plaintext and preserve:
content = u'\n<pre>%s</pre>\n' % misc.html_escape(ustr(content))
content = '\n<pre>%s</pre>\n' % misc.html_escape(content)
elif plaintext:
content = '\n%s\n' % plaintext2html(content, container_tag)
else:
content = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', content)
content = u'\n%s\n' % ustr(content)
content = '\n%s\n' % content
# Force all tags to lowercase
html = re.sub(r'(</?)(\w+)([ >])',
lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
lambda m: '%s%s%s' % (m[1], m[2].lower(), m[3]), html)
insert_location = html.find('</body>')
if insert_location == -1:
insert_location = html.find('</html>')

View file

@ -15,6 +15,7 @@ import zipfile
__all__ = ['guess_mimetype']
_logger = logging.getLogger(__name__)
_logger_guess_mimetype = _logger.getChild('guess_mimetype')
# We define our own guess_mimetype implementation and if magic is available we
# use it instead.
@ -74,6 +75,13 @@ def _check_open_container_format(data):
return False
_old_ms_office_mimetypes = {
'.doc': 'application/msword',
'.xls': 'application/vnd.ms-excel',
'.ppt': 'application/vnd.ms-powerpoint',
}
_olecf_mimetypes = ('application/x-ole-storage', 'application/CDFV2')
_xls_pattern = re.compile(b"""
\x09\x08\x10\x00\x00\x06\x05\x00
| \xFD\xFF\xFF\xFF(\x10|\x1F|\x20|"|\\#|\\(|\\))
@ -160,7 +168,7 @@ def _odoo_guess_mimetype(bin_data, default='application/octet-stream'):
if guess: return guess
except Exception:
# log-and-next
_logger.getChild('guess_mimetype').warn(
_logger_guess_mimetype.warning(
"Sub-checker '%s' of type '%s' failed",
discriminant.__name__, entry.mimetype,
exc_info=True
@ -168,6 +176,11 @@ def _odoo_guess_mimetype(bin_data, default='application/octet-stream'):
# if no discriminant or no discriminant matches, return
# primary mime type
return entry.mimetype
try:
if bin_data and all(c >= ' ' or c in '\t\n\r' for c in bin_data[:1024].decode()):
return 'text/plain'
except ValueError:
pass
return default
@ -193,6 +206,19 @@ if magic:
# https://github.com/file/file/commit/1a08bb5c235700ba623ffa6f3c95938fe295b262
if mimetype == 'image/svg':
return 'image/svg+xml'
# application/CDFV2 and application/x-ole-storage are two files
# formats that Microsoft Office was using before 2006. Use our
# own guesser to further discriminate the mimetype.
if mimetype in _olecf_mimetypes:
try:
if msoffice_mimetype := _check_olecf(bin_data):
return msoffice_mimetype
except Exception: # noqa: BLE001
_logger_guess_mimetype.warning(
"Sub-checker '_check_olecf' of type '%s' failed",
mimetype,
exc_info=True,
)
return mimetype
else:
guess_mimetype = _odoo_guess_mimetype
@ -204,12 +230,14 @@ def neuter_mimetype(mimetype, user):
return 'text/plain'
return mimetype
_extension_pattern = re.compile(r'\w+')
def get_extension(filename):
# A file has no extension if it has no dot (ignoring the leading one
# of hidden files) or that what follow the last dot is not a single
# word, e.g. "Mr. Doe"
_stem, dot, ext = filename.lstrip('.').rpartition('.')
if not dot or not ext.isalnum():
if not dot or not _extension_pattern.fullmatch(ext):
return ''
# Assume all 4-chars extensions to be valid extensions even if it is
@ -227,3 +255,32 @@ def get_extension(filename):
# Unknown extension.
return ''
def fix_filename_extension(filename, mimetype):
"""
Make sure the filename ends with an extension of the mimetype.
:param str filename: the filename with an unsafe extension
:param str mimetype: the mimetype detected reading the file's content
:returns: the same filename if its extension matches the detected
mimetype, otherwise the same filename with the mimetype's
extension added at the end.
"""
extension_mimetype = mimetypes.guess_type(filename)[0]
if extension_mimetype == mimetype:
return filename
extension = get_extension(filename)
if mimetype in _olecf_mimetypes and extension in _old_ms_office_mimetypes:
return filename
if mimetype == 'application/zip' and extension in {'.docx', '.xlsx', '.pptx'}:
return filename
if extension := mimetypes.guess_extension(mimetype):
_logger.warning("File %r has an invalid extension for mimetype %r, adding %r", filename, mimetype, extension)
return filename + extension
_logger.warning("File %r has an unknown extension for mimetype %r", filename, mimetype)
return filename

File diff suppressed because it is too large Load diff

View file

@ -1,710 +0,0 @@
import decimal
import math
import re
from collections import OrderedDict
from decimal import ROUND_HALF_UP, Decimal
from math import floor
# The following section of the code is used to monkey patch
# the Arabic class of num2words package as there are some problems
# upgrading the package to the newer version that fixed the bugs
# so a temporary fix was to patch the old version with the code
# from the new version manually.
# The code is taken from num2words package: https://github.com/savoirfairelinux/num2words
CURRENCY_SR = [("ريال", "ريالان", "ريالات", "ريالاً"),
("هللة", "هللتان", "هللات", "هللة")]
CURRENCY_EGP = [("جنيه", "جنيهان", "جنيهات", "جنيهاً"),
("قرش", "قرشان", "قروش", "قرش")]
CURRENCY_KWD = [("دينار", "ديناران", "دينارات", "ديناراً"),
("فلس", "فلسان", "فلس", "فلس")]
ARABIC_ONES = [
"", "واحد", "اثنان", "ثلاثة", "أربعة", "خمسة", "ستة", "سبعة", "ثمانية",
"تسعة",
"عشرة", "أحد عشر", "اثنا عشر", "ثلاثة عشر", "أربعة عشر", "خمسة عشر",
"ستة عشر", "سبعة عشر", "ثمانية عشر",
"تسعة عشر"
]
class Num2Word_Base:
CURRENCY_FORMS = {}
CURRENCY_ADJECTIVES = {}
def __init__(self):
self.is_title = False
self.precision = 2
self.exclude_title = []
self.negword = "(-) "
self.pointword = "(.)"
self.errmsg_nonnum = "type: %s not in [long, int, float]"
self.errmsg_floatord = "Cannot treat float %s as ordinal."
self.errmsg_negord = "Cannot treat negative num %s as ordinal."
self.errmsg_toobig = "abs(%s) must be less than %s."
self.setup()
# uses cards
if any(hasattr(self, field) for field in
['high_numwords', 'mid_numwords', 'low_numwords']):
self.cards = OrderedDict()
self.set_numwords()
self.MAXVAL = 1000 * next(iter(self.cards.keys()))
def set_numwords(self):
self.set_high_numwords(self.high_numwords)
self.set_mid_numwords(self.mid_numwords)
self.set_low_numwords(self.low_numwords)
def set_high_numwords(self, *args):
raise NotImplementedError
def set_mid_numwords(self, mid):
for key, val in mid:
self.cards[key] = val
def set_low_numwords(self, numwords):
for word, n in zip(numwords, range(len(numwords) - 1, -1, -1)):
self.cards[n] = word
def splitnum(self, value):
for elem in self.cards:
if elem > value:
continue
out = []
if value == 0:
div, mod = 1, 0
else:
div, mod = divmod(value, elem)
if div == 1:
out.append((self.cards[1], 1))
else:
if div == value: # The system tallies, eg Roman Numerals
return [(div * self.cards[elem], div * elem)]
out.append(self.splitnum(div))
out.append((self.cards[elem], elem))
if mod:
out.append(self.splitnum(mod))
return out
def parse_minus(self, num_str):
"""Detach minus and return it as symbol with new num_str."""
if num_str.startswith('-'):
# Extra spacing to compensate if there is no minus.
return '%s ' % self.negword.strip(), num_str[1:]
return '', num_str
def str_to_number(self, value):
return Decimal(value)
def to_cardinal(self, value):
try:
assert int(value) == value
except (ValueError, TypeError, AssertionError):
return self.to_cardinal_float(value)
out = ""
if value < 0:
value = abs(value)
out = "%s " % self.negword.strip()
if value >= self.MAXVAL:
raise OverflowError(self.errmsg_toobig % (value, self.MAXVAL))
val = self.splitnum(value)
words, _ = self.clean(val)
return self.title(out + words)
def float2tuple(self, value):
pre = int(value)
# Simple way of finding decimal places to update the precision
self.precision = abs(Decimal(str(value)).as_tuple().exponent)
post = abs(value - pre) * 10**self.precision
if abs(round(post) - post) < 0.01:
# We generally floor all values beyond our precision (rather than
# rounding), but in cases where we have something like 1.239999999,
# which is probably due to python's handling of floats, we actually
# want to consider it as 1.24 instead of 1.23
post = int(round(post))
else:
post = int(math.floor(post))
return pre, post
def to_cardinal_float(self, value):
try:
float(value) == value
except (ValueError, TypeError, AssertionError, AttributeError):
raise TypeError(self.errmsg_nonnum % value)
pre, post = self.float2tuple(float(value))
post = str(post)
post = '0' * (self.precision - len(post)) + post
out = [self.to_cardinal(pre)]
if self.precision:
out.append(self.title(self.pointword))
for i in range(self.precision):
curr = int(post[i])
out.append(to_s(self.to_cardinal(curr)))
return " ".join(out)
def merge(self, left, right):
raise NotImplementedError
def clean(self, val):
out = val
while len(val) != 1:
out = []
left, right = val[:2]
if isinstance(left, tuple) and isinstance(right, tuple):
out.append(self.merge(left, right))
if val[2:]:
out.append(val[2:])
else:
for elem in val:
if isinstance(elem, list):
if len(elem) == 1:
out.append(elem[0])
else:
out.append(self.clean(elem))
else:
out.append(elem)
val = out
return out[0]
def title(self, value):
if self.is_title:
out = []
value = value.split()
for word in value:
if word in self.exclude_title:
out.append(word)
else:
out.append(word[0].upper() + word[1:])
value = " ".join(out)
return value
def verify_ordinal(self, value):
if not value == int(value):
raise TypeError(self.errmsg_floatord % value)
if not abs(value) == value:
raise TypeError(self.errmsg_negord % value)
def to_ordinal(self, value):
return self.to_cardinal(value)
def to_ordinal_num(self, value):
return value
# Trivial version
def inflect(self, value, text):
text = text.split("/")
if value == 1:
return text[0]
return "".join(text)
# //CHECK: generalise? Any others like pounds/shillings/pence?
def to_splitnum(self, val, hightxt="", lowtxt="", jointxt="",
divisor=100, longval=True, cents=True):
out = []
if isinstance(val, float):
high, low = self.float2tuple(val)
else:
try:
high, low = val
except TypeError:
high, low = divmod(val, divisor)
if high:
hightxt = self.title(self.inflect(high, hightxt))
out.append(self.to_cardinal(high))
if low:
if longval:
if hightxt:
out.append(hightxt)
if jointxt:
out.append(self.title(jointxt))
elif hightxt:
out.append(hightxt)
if low:
if cents:
out.append(self.to_cardinal(low))
else:
out.append("%02d" % low)
if lowtxt and longval:
out.append(self.title(self.inflect(low, lowtxt)))
return " ".join(out)
def to_year(self, value, **kwargs):
return self.to_cardinal(value)
def pluralize(self, n, forms):
"""
Should resolve gettext form:
http://docs.translatehouse.org/projects/localization-guide/en/latest/l10n/pluralforms.html
"""
raise NotImplementedError
def _money_verbose(self, number, currency):
return self.to_cardinal(number)
def _cents_verbose(self, number, currency):
return self.to_cardinal(number)
def _cents_terse(self, number, currency):
return "%02d" % number
def to_currency(self, val, currency='EUR', cents=True, separator=',',
adjective=False):
"""
Args:
val: Numeric value
currency (str): Currency code
cents (bool): Verbose cents
separator (str): Cent separator
adjective (bool): Prefix currency name with adjective
Returns:
str: Formatted string
"""
left, right, is_negative = parse_currency_parts(val)
try:
cr1, cr2 = self.CURRENCY_FORMS[currency]
except KeyError:
raise NotImplementedError(
'Currency code "%s" not implemented for "%s"' %
(currency, self.__class__.__name__))
if adjective and currency in self.CURRENCY_ADJECTIVES:
cr1 = prefix_currency(self.CURRENCY_ADJECTIVES[currency], cr1)
minus_str = "%s " % self.negword.strip() if is_negative else ""
money_str = self._money_verbose(left, currency)
cents_str = self._cents_verbose(right, currency) \
if cents else self._cents_terse(right, currency)
return '%s%s %s%s %s %s' % (
minus_str,
money_str,
self.pluralize(left, cr1),
separator,
cents_str,
self.pluralize(right, cr2)
)
def setup(self):
pass
class Num2Word_AR_Fixed(Num2Word_Base):
errmsg_toobig = "abs(%s) must be less than %s."
MAXVAL = 10**51
def __init__(self):
super().__init__()
self.number = 0
self.arabicPrefixText = ""
self.arabicSuffixText = ""
self.integer_value = 0
self._decimalValue = ""
self.partPrecision = 2
self.currency_unit = CURRENCY_SR[0]
self.currency_subunit = CURRENCY_SR[1]
self.isCurrencyPartNameFeminine = True
self.isCurrencyNameFeminine = False
self.separator = 'و'
self.arabicOnes = ARABIC_ONES
self.arabicFeminineOnes = [
"", "إحدى", "اثنتان", "ثلاث", "أربع", "خمس", "ست", "سبع", "ثمان",
"تسع",
"عشر", "إحدى عشرة", "اثنتا عشرة", "ثلاث عشرة", "أربع عشرة",
"خمس عشرة", "ست عشرة", "سبع عشرة", "ثماني عشرة",
"تسع عشرة"
]
self.arabicOrdinal = [
"", "اول", "ثاني", "ثالث", "رابع", "خامس", "سادس", "سابع", "ثامن",
"تاسع", "عاشر", "حادي عشر", "ثاني عشر", "ثالث عشر", "رابع عشر",
"خامس عشر", "سادس عشر", "سابع عشر", "ثامن عشر", "تاسع عشر"
]
self.arabicTens = [
"عشرون", "ثلاثون", "أربعون", "خمسون", "ستون", "سبعون", "ثمانون",
"تسعون"
]
self.arabicHundreds = [
"", "مائة", "مئتان", "ثلاثمائة", "أربعمائة", "خمسمائة", "ستمائة",
"سبعمائة", "ثمانمائة", "تسعمائة"
]
self.arabicAppendedTwos = [
"مئتا", "ألفا", "مليونا", "مليارا", "تريليونا", "كوادريليونا",
"كوينتليونا", "سكستيليونا", "سبتيليونا", "أوكتيليونا ",
"نونيليونا", "ديسيليونا", "أندسيليونا", "دوديسيليونا",
"تريديسيليونا", "كوادريسيليونا", "كوينتينيليونا"
]
self.arabicTwos = [
"مئتان", "ألفان", "مليونان", "ملياران", "تريليونان",
"كوادريليونان", "كوينتليونان", "سكستيليونان", "سبتيليونان",
"أوكتيليونان ", "نونيليونان ", "ديسيليونان", "أندسيليونان",
"دوديسيليونان", "تريديسيليونان", "كوادريسيليونان", "كوينتينيليونان"
]
self.arabicGroup = [
"مائة", "ألف", "مليون", "مليار", "تريليون", "كوادريليون",
"كوينتليون", "سكستيليون", "سبتيليون", "أوكتيليون", "نونيليون",
"ديسيليون", "أندسيليون", "دوديسيليون", "تريديسيليون",
"كوادريسيليون", "كوينتينيليون"
]
self.arabicAppendedGroup = [
"", "ألفاً", "مليوناً", "ملياراً", "تريليوناً", "كوادريليوناً",
"كوينتليوناً", "سكستيليوناً", "سبتيليوناً", "أوكتيليوناً",
"نونيليوناً", "ديسيليوناً", "أندسيليوناً", "دوديسيليوناً",
"تريديسيليوناً", "كوادريسيليوناً", "كوينتينيليوناً"
]
self.arabicPluralGroups = [
"", "آلاف", "ملايين", "مليارات", "تريليونات", "كوادريليونات",
"كوينتليونات", "سكستيليونات", "سبتيليونات", "أوكتيليونات",
"نونيليونات", "ديسيليونات", "أندسيليونات", "دوديسيليونات",
"تريديسيليونات", "كوادريسيليونات", "كوينتينيليونات"
]
assert len(self.arabicAppendedGroup) == len(self.arabicGroup)
assert len(self.arabicPluralGroups) == len(self.arabicGroup)
assert len(self.arabicAppendedTwos) == len(self.arabicTwos)
def number_to_arabic(self, arabic_prefix_text, arabic_suffix_text):
self.arabicPrefixText = arabic_prefix_text
self.arabicSuffixText = arabic_suffix_text
self.extract_integer_and_decimal_parts()
def extract_integer_and_decimal_parts(self):
splits = re.split('\\.', str(self.number))
self.integer_value = int(splits[0])
if len(splits) > 1:
self._decimalValue = int(self.decimal_value(splits[1]))
else:
self._decimalValue = 0
def decimal_value(self, decimal_part):
if self.partPrecision is not len(decimal_part):
decimal_part_length = len(decimal_part)
decimal_part_builder = decimal_part
for _ in range(0, self.partPrecision - decimal_part_length):
decimal_part_builder += '0'
decimal_part = decimal_part_builder
if len(decimal_part) <= self.partPrecision:
dec = len(decimal_part)
else:
dec = self.partPrecision
result = decimal_part[0: dec]
else:
result = decimal_part
# The following is useless (never happens)
# for i in range(len(result), self.partPrecision):
# result += '0'
return result
def digit_feminine_status(self, digit, group_level):
if group_level == -1:
if self.isCurrencyPartNameFeminine:
return self.arabicFeminineOnes[int(digit)]
else:
# Note: this never happens
return self.arabicOnes[int(digit)]
elif group_level == 0:
if self.isCurrencyNameFeminine:
return self.arabicFeminineOnes[int(digit)]
else:
return self.arabicOnes[int(digit)]
else:
return self.arabicOnes[int(digit)]
def process_arabic_group(self, group_number, group_level,
remaining_number):
tens = Decimal(group_number) % Decimal(100)
hundreds = Decimal(group_number) / Decimal(100)
ret_val = ""
if int(hundreds) > 0:
if tens == 0 and int(hundreds) == 2:
ret_val = f"{self.arabicAppendedTwos[0]}"
else:
ret_val = f"{self.arabicHundreds[int(hundreds)]}"
if ret_val and tens != 0:
ret_val += " و "
if tens > 0:
if tens < 20:
# if int(group_level) >= len(self.arabicTwos):
# raise OverflowError(self.errmsg_toobig %
# (self.number, self.MAXVAL))
assert int(group_level) < len(self.arabicTwos)
if tens == 2 and int(hundreds) == 0 and group_level > 0:
power = int(math.log10(self.integer_value))
if self.integer_value > 10 and power % 3 == 0 and \
self.integer_value == 2 * (10 ** power):
ret_val = f"{self.arabicAppendedTwos[int(group_level)]}"
else:
ret_val = f"{self.arabicTwos[int(group_level)]}"
else:
if tens == 1 and group_level > 0 and hundreds == 0:
# Note: this never happens
# (hundreds == 0 only if group_number is 0)
ret_val += ""
elif (tens == 1 or tens == 2) and (
group_level == 0 or group_level == -1) and \
hundreds == 0 and remaining_number == 0:
# Note: this never happens (idem)
ret_val += ""
elif tens == 1 and group_level > 0:
ret_val += self.arabicGroup[int(group_level)]
else:
ret_val += self.digit_feminine_status(int(tens),
group_level)
else:
ones = tens % 10
tens = (tens / 10) - 2
if ones > 0:
ret_val += self.digit_feminine_status(ones, group_level)
if ret_val and ones != 0:
ret_val += " و "
ret_val += self.arabicTens[int(tens)]
return ret_val
# We use this instead of built-in `abs` function,
# because `abs` suffers from loss of precision for big numbers
def absolute(self, number):
return number if number >= 0 else -number
# We use this instead of `"{:09d}".format(number)`,
# because the string conversion suffers from loss of
# precision for big numbers
def to_str(self, number):
integer = int(number)
if integer == number:
return str(integer)
decimal = round((number - integer) * 10**9)
return f"{integer}.{decimal:09d}"
def convert(self, value):
self.number = self.to_str(value)
self.number_to_arabic(self.arabicPrefixText, self.arabicSuffixText)
return self.convert_to_arabic()
def convert_to_arabic(self):
temp_number = Decimal(self.number)
if temp_number == Decimal(0):
return "صفر"
decimal_string = self.process_arabic_group(self._decimalValue,
-1,
Decimal(0))
ret_val = ""
group = 0
while temp_number > Decimal(0):
temp_number_dec = Decimal(str(temp_number))
try:
number_to_process = int(temp_number_dec % Decimal(str(1000)))
except decimal.InvalidOperation:
decimal.getcontext().prec = len(
temp_number_dec.as_tuple().digits
)
number_to_process = int(temp_number_dec % Decimal(str(1000)))
temp_number = int(temp_number_dec / Decimal(1000))
group_description = \
self.process_arabic_group(number_to_process,
group,
Decimal(floor(temp_number)))
if group_description:
if group > 0:
if ret_val:
ret_val = f"و {ret_val}"
if number_to_process != 2 and number_to_process != 1:
# if group >= len(self.arabicGroup):
# raise OverflowError(self.errmsg_toobig %
# (self.number, self.MAXVAL)
# )
assert group < len(self.arabicGroup)
if number_to_process % 100 != 1:
if 3 <= number_to_process <= 10:
ret_val = f"{self.arabicPluralGroups[group]} {ret_val}"
else:
if ret_val:
ret_val = f"{self.arabicAppendedGroup[group]} {ret_val}"
else:
ret_val = f"{self.arabicGroup[group]} {ret_val}"
else:
ret_val = f"{self.arabicGroup[group]} {ret_val}"
ret_val = f"{group_description} {ret_val}"
group += 1
formatted_number = ""
if self.arabicPrefixText:
formatted_number += f"{self.arabicPrefixText} "
formatted_number += ret_val
if self.integer_value != 0:
remaining100 = int(self.integer_value % 100)
if remaining100 == 0 or remaining100 == 1:
formatted_number += self.currency_unit[0]
elif remaining100 == 2:
if self.integer_value == 2:
formatted_number += self.currency_unit[1]
else:
formatted_number += self.currency_unit[0]
elif 3 <= remaining100 <= 10:
formatted_number += self.currency_unit[2]
elif 11 <= remaining100 <= 99:
formatted_number += self.currency_unit[3]
if self._decimalValue != 0:
formatted_number += f" {self.separator} "
formatted_number += decimal_string
if self._decimalValue != 0:
formatted_number += " "
remaining100 = int(self._decimalValue % 100)
if remaining100 == 0 or remaining100 == 1:
formatted_number += self.currency_subunit[0]
elif remaining100 == 2:
formatted_number += self.currency_subunit[1]
elif 3 <= remaining100 <= 10:
formatted_number += self.currency_subunit[2]
elif 11 <= remaining100 <= 99:
formatted_number += self.currency_subunit[3]
if self.arabicSuffixText:
formatted_number += f" {self.arabicSuffixText}"
return formatted_number
def validate_number(self, number):
if number >= self.MAXVAL:
raise OverflowError(self.errmsg_toobig % (number, self.MAXVAL))
return number
def set_currency_prefer(self, currency):
if currency == 'EGP':
self.currency_unit = CURRENCY_EGP[0]
self.currency_subunit = CURRENCY_EGP[1]
elif currency == 'KWD':
self.currency_unit = CURRENCY_KWD[0]
self.currency_subunit = CURRENCY_KWD[1]
else:
self.currency_unit = CURRENCY_SR[0]
self.currency_subunit = CURRENCY_SR[1]
def to_currency(self, value, currency='SR', prefix='', suffix=''):
self.set_currency_prefer(currency)
self.isCurrencyNameFeminine = False
self.separator = "و"
self.arabicOnes = ARABIC_ONES
self.arabicPrefixText = prefix
self.arabicSuffixText = suffix
return self.convert(value=value)
def to_ordinal(self, number, prefix=''):
if number <= 19:
return f"{self.arabicOrdinal[number]}"
if number < 100:
self.isCurrencyNameFeminine = True
else:
self.isCurrencyNameFeminine = False
self.currency_subunit = ('', '', '', '')
self.currency_unit = ('', '', '', '')
self.arabicPrefixText = prefix
self.arabicSuffixText = ""
return f"{self.convert(self.absolute(number)).strip()}"
def to_year(self, value):
value = self.validate_number(value)
return self.to_cardinal(value)
def to_ordinal_num(self, value):
return self.to_ordinal(value).strip()
def to_cardinal(self, number):
self.isCurrencyNameFeminine = False
number = self.validate_number(number)
minus = ''
if number < 0:
minus = 'سالب '
self.separator = ','
self.currency_subunit = ('', '', '', '')
self.currency_unit = ('', '', '', '')
self.arabicPrefixText = ""
self.arabicSuffixText = ""
self.arabicOnes = ARABIC_ONES
return minus + self.convert(value=self.absolute(number)).strip()
def parse_currency_parts(value, is_int_with_cents=True):
if isinstance(value, int):
if is_int_with_cents:
# assume cents if value is integer
negative = value < 0
value = abs(value)
integer, cents = divmod(value, 100)
else:
negative = value < 0
integer, cents = abs(value), 0
else:
value = Decimal(value)
value = value.quantize(
Decimal('.01'),
rounding=ROUND_HALF_UP
)
negative = value < 0
value = abs(value)
integer, fraction = divmod(value, 1)
integer = int(integer)
cents = int(fraction * 100)
return integer, cents, negative
def prefix_currency(prefix, base):
return tuple("%s %s" % (prefix, i) for i in base)
try:
strtype = basestring
except NameError:
strtype = str
def to_s(val):
try:
return unicode(val)
except NameError:
return str(val)

View file

@ -6,11 +6,8 @@ Some functions related to the os and os.path module
"""
import os
import re
import warnings
import zipfile
from os.path import join as opj
WINDOWS_RESERVED = re.compile(r'''
^
@ -51,21 +48,6 @@ def clean_filename(name, replacement=''):
return "Untitled"
return re.sub(r'[^\w_.()\[\] -]+', replacement, name).lstrip('.-') or "Untitled"
def listdir(dir, recursive=False):
"""Allow to recursively get the file listing following symlinks, returns
paths relative to the provided `dir` except completely broken if the symlink
it follows leaves `dir`...
"""
assert recursive, "use `os.listdir` or `pathlib.Path.iterdir`"
warnings.warn("Since 16.0, use os.walk or a recursive glob", DeprecationWarning, stacklevel=2)
dir = os.path.normpath(dir)
res = []
for root, _, files in os.walk(dir, followlinks=True):
r = os.path.relpath(root, dir)
yield from (opj(r, f) for f in files)
return res
def zip_dir(path, stream, include_dir=True, fnct_sort=None): # TODO add ignore list
"""
: param fnct_sort : Function to be passed to "key" parameter of built-in

View file

@ -1,15 +1,14 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
## this functions are taken from the setuptools package (version 0.6c8)
## http://peak.telecommunity.com/DevCenter/PkgResources#parsing-utilities
from __future__ import print_function
import re
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','_':'final-','rc':'c','dev':'@','saas':'','~':''}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
@ -22,7 +21,8 @@ def _parse_version_parts(s):
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
def parse_version(s: str) -> tuple[str, ...]:
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
@ -52,7 +52,7 @@ def parse_version(s):
candidates, and therefore are not as new as a version string that does not
contain them.
"""
parts = []
parts: list[str] = []
for part in _parse_version_parts((s or '0.1').lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag

View file

@ -2,6 +2,7 @@
import importlib
import io
import re
import unicodedata
import sys
from datetime import datetime
from hashlib import md5
@ -14,6 +15,7 @@ from reportlab.lib.units import cm
from reportlab.lib.utils import ImageReader
from reportlab.pdfgen import canvas
from odoo.tools.arabic_reshaper import reshape
from odoo.tools.parse_version import parse_version
from odoo.tools.misc import file_open
@ -23,6 +25,24 @@ try:
except ImportError:
TTFont = None
# ----------------------------------------------------------
# PyPDF2 hack
# ensure that zlib does not throw error -5 when decompressing
# because some pdf won't fit into allocated memory
# https://docs.python.org/3/library/zlib.html#zlib.decompressobj
# ----------------------------------------------------------
try:
import zlib
def _decompress(data):
zobj = zlib.decompressobj()
return zobj.decompress(data)
import PyPDF2.filters # needed after PyPDF2 2.0.0 and before 2.11.0
PyPDF2.filters.decompress = _decompress
except ImportError:
pass # no fix required
# might be a good case for exception groups
error = None
@ -177,16 +197,6 @@ def fill_form_fields_pdf(writer, form_fields):
_logger.info("Fields couldn't be filled in this page.")
continue
for raw_annot in page.get('/Annots', []):
annot = raw_annot.getObject()
for field in form_fields:
# Modifying the form flags to force all text fields read-only
if annot.get('/T') == field:
form_flags = annot.get('/Ff', 0)
readonly_flag = 1 # 1st bit sets readonly
new_flags = form_flags | readonly_flag
annot.update({NameObject("/Ff"): NumberObject(new_flags)})
def rotate_pdf(pdf):
''' Rotate clockwise PDF (90°) into a new PDF.
@ -280,6 +290,38 @@ def add_banner(pdf_stream, text=None, logo=False, thickness=2 * cm):
return output
def reshape_text(text):
"""
Display the text based on his first character unicode name to choose Right-to-left or Left-to-right
This is just a hotfix to make things work
In the future the clean way be to use arabic-reshaper and python3-bidi libraries
Here we want to check the text is in a right-to-left language and if then, flip before returning it.
Depending on the language, the type should be Left-to-Right, Right-to-Left, or Right-to-Left Arabic
(Refer to this https://www.unicode.org/reports/tr9/#Bidirectional_Character_Types)
The base module ```unicodedata``` with his function ```bidirectional(str)``` helps us by taking a character in
argument and returns his type:
- 'L' for Left-to-Right character
- 'R' or 'AL' for Right-to-Left character
So we have to check if the first character of the text is of type 'R' or 'AL', and check that there is no
character in the rest of the text that is of type 'L'. Based on that we can confirm we have a fully Right-to-Left language,
then we can flip the text before returning it.
"""
if not text:
return ''
maybe_rtl_letter = text.lstrip()[:1] or ' '
maybe_ltr_text = text[1:]
first_letter_is_rtl = unicodedata.bidirectional(maybe_rtl_letter) in ('AL', 'R')
no_letter_is_ltr = not any(unicodedata.bidirectional(letter) == 'L' for letter in maybe_ltr_text)
if first_letter_is_rtl and no_letter_is_ltr:
text = reshape(text)
text = text[::-1]
return text
class OdooPdfFileReader(PdfFileReader):
# OVERRIDE of PdfFileReader to add the management of multiple embedded files.

View file

@ -42,6 +42,9 @@ class PdfReader(_Reader):
def getDocumentInfo(self):
return self.metadata
def getFormTextFields(self):
return self.get_form_text_fields()
class PdfWriter(_Writer):
def add_metadata(self, infos: Dict[str, Any]) -> None:

View file

@ -17,6 +17,12 @@ class PdfReader(PdfFileReader):
def __init__(self, stream, strict=True, warndest=None, overwriteWarnings=True):
super().__init__(stream, strict=strict, warndest=warndest, overwriteWarnings=False)
def getFormTextFields(self):
if self.getFields() is None:
# Prevent this version of PyPDF2 from trying to iterate over `None`
return None
return super().getFormTextFields()
class PdfWriter(PdfFileWriter):
def get_fields(self, *args, **kwargs):

View file

@ -1,181 +1,431 @@
import random
"""
Database Population via Duplication
This tool provides utilities to duplicate records across models in Odoo, while maintaining referential integrity,
handling field variations, and optimizing insertion performance. The duplication is controlled by a `factors` argument
that specifies how many times each record should be duplicated. The duplication process takes into account fields
that require unique constraints, distributed values (e.g., date fields), and relational fields (e.g., Many2one, Many2many).
Key Features:
-------------
1. **Field Variations**: Handles variations for certain fields to ensure uniqueness or to distribute values, such as:
- Char/Text fields: Appends a postfix or variation to existing data.
- Date/Datetime fields: Distributes dates within a specified range.
2. **Efficient Duplication**: Optimizes the duplication process by:
- Dropping and restoring indexes to speed up bulk inserts.
- Disabling foreign key constraint checks during duplication to avoid integrity errors.
- Dynamically adjusting sequences to maintain consistency in auto-increment fields like `id`.
3. **Field-Specific Logic**:
- Unique fields are identified and modified to avoid constraint violations.
- Many2one fields are remapped to newly duplicated records.
- One2many and Many2many relationships are handled by duplicating both sides of the relationship.
4. **Foreign Key and Index Management**:
- Indexes are temporarily dropped during record creation and restored afterward to improve performance.
- Foreign key checks are disabled temporarily to prevent constraint violations during record insertion.
5. **Dependency Management**: Ensures proper population order of models with dependencies (e.g., `_inherits` fields)
by resolving dependencies before duplicating records.
6. **Dynamic SQL Generation**: Uses SQL queries to manipulate and duplicate data directly at the database level,
ensuring performance and flexibility in handling large datasets.
"""
from collections import defaultdict
from contextlib import contextmanager
from datetime import datetime
from psycopg2.errors import InsufficientPrivilege
from dateutil.relativedelta import relativedelta
from odoo.tools import pycompat
import logging
from odoo.api import Environment
from odoo.tools.sql import SQL
from odoo.fields import Field, Many2one
from odoo.models import Model
def Random(seed):
""" Return a random number generator object with the given seed. """
r = random.Random()
r.seed(seed, version=2)
return r
_logger = logging.getLogger(__name__)
# Min/Max value for a date/datetime field
MIN_DATETIME = datetime((datetime.now() - relativedelta(years=4)).year, 1, 1)
MAX_DATETIME = datetime.now()
def format_str(val, counter, values):
""" Format the given value (with method ``format``) when it is a string. """
if isinstance(val, str):
return val.format(counter=counter, values=values)
return val
def chain_factories(field_factories, model_name):
""" Instantiate a generator by calling all the field factories. """
generator = root_factory()
for (fname, field_factory) in field_factories:
generator = field_factory(generator, fname, model_name)
return generator
def root_factory():
""" Return a generator with empty values dictionaries (except for the flag ``__complete``). """
yield {'__complete': False}
while True:
yield {'__complete': True}
def randomize(vals, weights=None, seed=False, formatter=format_str, counter_offset=0):
""" Return a factory for an iterator of values dicts with pseudo-randomly
chosen values (among ``vals``) for a field.
:param list vals: list in which a value will be chosen, depending on `weights`
:param list weights: list of probabilistic weights
:param seed: optional initialization of the random number generator
:param function formatter: (val, counter, values) --> formatted_value
:param int counter_offset:
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
def get_field_variation_date(model: Model, field: Field, factor: int, series_alias: str) -> SQL:
"""
def generate(iterator, field_name, model_name):
r = Random('%s+field+%s' % (model_name, seed or field_name))
for counter, values in enumerate(iterator):
val = r.choices(vals, weights)[0]
values[field_name] = formatter(val, counter + counter_offset, values)
yield values
return generate
def cartesian(vals, weights=None, seed=False, formatter=format_str, then=None):
""" Return a factory for an iterator of values dicts that combines all ``vals`` for
the field with the other field values in input.
:param list vals: list in which a value will be chosen, depending on `weights`
:param list weights: list of probabilistic weights
:param seed: optional initialization of the random number generator
:param function formatter: (val, counter, values) --> formatted_value
:param function then: if defined, factory used when vals has been consumed.
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
Distribute the duplication series evenly between [field-total_days, field].
We use a hard limit of (MAX_DATETIME - MIN_DATETIME) years in the past to avoid
setting duplicated records too far back in the past.
"""
def generate(iterator, field_name, model_name):
counter = 0
for values in iterator:
if values['__complete']:
break # will consume and lose an element, (complete so a filling element). If it is a problem, use peekable instead.
for val in vals:
yield {**values, field_name: formatter(val, counter, values)}
counter += 1
factory = then or randomize(vals, weights, seed, formatter, counter)
yield from factory(iterator, field_name, model_name)
return generate
total_days = min((MAX_DATETIME - MIN_DATETIME).days, factor)
cast_type = SQL(field._column_type[1])
def redistribute(value):
return SQL(
"(%(value)s - (%(factor)s - %(series_alias)s) * (%(total_days)s::float/%(factor)s) * interval '1 days')::%(cast_type)s",
value=value,
factor=factor,
series_alias=SQL.identifier(series_alias),
total_days=total_days,
cast_type=cast_type,
)
if not field.company_dependent:
return redistribute(SQL.identifier(field.name))
# company_dependent -> jsonb
return SQL(
'(SELECT jsonb_object_agg(key, %(expr)s) FROM jsonb_each_text(%(field)s))',
expr=redistribute(SQL('value::%s', cast_type)),
field=SQL.identifier(field.name),
)
def iterate(vals, weights=None, seed=False, formatter=format_str, then=None):
""" Return a factory for an iterator of values dicts that picks a value among ``vals``
for each input. Once all ``vals`` have been used once, resume as ``then`` or as a
``randomize`` generator.
:param list vals: list in which a value will be chosen, depending on `weights`
:param list weights: list of probabilistic weights
:param seed: optional initialization of the random number generator
:param function formatter: (val, counter, values) --> formatted_value
:param function then: if defined, factory used when vals has been consumed.
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
def get_field_variation_char(field: Field, postfix: str | SQL | None = None) -> SQL:
"""
def generate(iterator, field_name, model_name):
counter = 0
for val in vals: # iteratable order is important, shortest first
values = next(iterator)
values[field_name] = formatter(val, counter, values)
values['__complete'] = False
yield values
counter += 1
factory = then or randomize(vals, weights, seed, formatter, counter)
yield from factory(iterator, field_name, model_name)
return generate
def constant(val, formatter=format_str):
""" Return a factory for an iterator of values dicts that sets the field
to the given value in each input dict.
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
Append the `postfix` string to a char|text field.
If no postfix is provided, returns no variation
"""
def generate(iterator, field_name, _):
for counter, values in enumerate(iterator):
values[field_name] = formatter(val, counter, values)
yield values
return generate
if postfix is None:
return SQL.identifier(field.name)
if not isinstance(postfix, SQL):
postfix = SQL.identifier(postfix)
# if the field is translatable, it's a JSONB column, we vary all values for each key
if field.translate:
return SQL("""(
SELECT jsonb_object_agg(key, value || %(postfix)s)
FROM jsonb_each_text(%(field)s)
)""", field=SQL.identifier(field.name), postfix=postfix)
else:
# no postfix for fields that are an '' (no point to)
# or '/' (default/draft name for many model's records)
return SQL("""
CASE
WHEN %(field)s IS NULL OR %(field)s IN ('/', '')
THEN %(field)s
ELSE %(field)s || %(postfix)s
END
""", field=SQL.identifier(field.name), postfix=postfix)
def compute(function, seed=None):
""" Return a factory for an iterator of values dicts that computes the field value
as ``function(values, counter, random)``, where ``values`` is the other field values,
``counter`` is an integer, and ``random`` is a pseudo-random number generator.
class PopulateContext:
def __init__(self):
self.has_session_replication_role = True
:param callable function: (values, counter, random) --> field_values
:param seed: optional initialization of the random number generator
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
@contextmanager
def ignore_indexes(self, model: Model):
"""
Temporarily drop indexes on table to speed up insertion.
PKey and Unique indexes are kept for constraints
"""
indexes = model.env.execute_query_dict(SQL("""
SELECT indexname AS name, indexdef AS definition
FROM pg_indexes
WHERE tablename = %s
AND indexname NOT LIKE %s
AND indexdef NOT LIKE %s
""", model._table, '%pkey', '%UNIQUE%'))
if indexes:
_logger.info('Dropping indexes on table %s...', model._table)
for index in indexes:
model.env.cr.execute(SQL('DROP INDEX %s CASCADE', SQL.identifier(index['name'])))
yield
_logger.info('Adding indexes back on table %s...', model._table)
for index in indexes:
model.env.cr.execute(index['definition'])
else:
yield
@contextmanager
def ignore_fkey_constraints(self, model: Model):
"""
Disable Fkey constraints checks by setting the session to replica.
"""
if self.has_session_replication_role:
try:
model.env.cr.execute('SET session_replication_role TO replica')
yield
model.env.cr.execute('RESET session_replication_role')
except InsufficientPrivilege:
_logger.warning("Cannot ignore Fkey constraints during insertion due to insufficient privileges for current pg_role. "
"Resetting transaction and retrying to populate without dropping the check on Fkey constraints. "
"The bulk insertion will be vastly slower than anticipated.")
model.env.cr.rollback()
self.has_session_replication_role = False
yield
else:
yield
def field_needs_variation(model: Model, field: Field) -> bool:
"""
def generate(iterator, field_name, model_name):
r = Random('%s+field+%s' % (model_name, seed or field_name))
for counter, values in enumerate(iterator):
val = function(values=values, counter=counter, random=r)
values[field_name] = val
yield values
return generate
def randint(a, b, seed=None):
""" Return a factory for an iterator of values dicts that sets the field
to a random integer between a and b included in each input dict.
:param int a: minimal random value
:param int b: maximal random value
:param int seed:
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
Return True/False depending on if the field needs to be varied.
Might be necessary in the case of:
- unique constraints
- varying dates for better distribution
- field will be part of _rec_name_search, therefor variety is needed for effective searches
- field has a trigram index on it
"""
def get_rand_int(random=None, **kwargs):
return random.randint(a, b)
return compute(get_rand_int, seed=seed)
def is_unique(model_, field_):
"""
An unique constraint is enforced by Postgres as an unique index,
whether it's defined as a constraint on the table, or as an manual unique index.
Both type of constraint are present in the index catalog
"""
query = SQL("""
SELECT EXISTS(SELECT 1
FROM pg_index idx
JOIN pg_class t ON t.oid = idx.indrelid
JOIN pg_class i ON i.oid = idx.indexrelid
JOIN pg_attribute a ON a.attnum = ANY (idx.indkey) AND a.attrelid = t.oid
WHERE t.relname = %s -- tablename
AND a.attname = %s -- column
AND idx.indisunique = TRUE) AS is_unique;
""", model_._table, field_.name)
return model_.env.execute_query(query)[0][0]
def randfloat(a, b, seed=None):
""" Return a factory for an iterator of values dicts that sets the field
to a random float between a and b included in each input dict.
# Many2one fields are not considered, as a name_search would resolve it to the _rec_names_search of the related model
in_names_search = model._rec_names_search and field.name in model._rec_names_search
in_name = model._rec_name and field.name == model._rec_name
if (in_name or in_names_search) and field.type != 'many2one':
return True
if field.type in ('date', 'datetime'):
return True
if field.index == 'trigram':
return True
return is_unique(model, field)
def get_field_variation(model: Model, field: Field, factor: int, series_alias: str) -> SQL:
"""
def get_rand_float(random=None, **kwargs):
return random.uniform(a, b)
return compute(get_rand_float, seed=seed)
Returns a variation of the source field,
to avoid unique constraint, or better distribute data.
def randdatetime(*, base_date=None, relative_before=None, relative_after=None, seed=None):
""" Return a factory for an iterator of values dicts that sets the field
to a random datetime between relative_before and relative_after, relatively to
base_date
:param datetime base_date: override the default base date if needed.
:param relativedelta|timedelta relative_after: range up which we can go after the
base date. If not set, defaults to 0, i.e. only in the past of reference.
:param relativedelta|timedelta relative_before: range up which we can go before the
base date. If not set, defaults to 0, i.e. only in the future of reference.
:param seed:
:return: iterator for random dates inside the defined range
:return: a SQL(identifier|expression|subquery)
"""
base_date = base_date or datetime(2020, 1, 1)
seconds_before = relative_before and ((base_date + relative_before) - base_date).total_seconds() or 0
seconds_after = relative_after and ((base_date + relative_after) - base_date).total_seconds() or 0
match field.type:
case 'char' | 'text':
return get_field_variation_char(field, postfix=series_alias)
case 'date' | 'datetime':
return get_field_variation_date(model, field, factor, series_alias)
case 'html':
# For the sake of simplicity we don't vary html fields
return SQL.identifier(field.name)
case _:
_logger.warning("The field %s of type %s was marked to be varied, "
"but no variation branch was found! Defaulting to a raw copy.", field, field.type)
# fallback on a raw copy
return SQL.identifier(field.name)
def get_rand_datetime(random=None, **kwargs):
return base_date + relativedelta(seconds=random.randint(int(seconds_before), int(seconds_after)))
return compute(get_rand_datetime, seed=seed)
def fetch_last_id(model: Model) -> int:
query = SQL('SELECT id FROM %s ORDER BY id DESC LIMIT 1', SQL.identifier(model._table))
return model.env.execute_query(query)[0][0]
def populate_field(model: Model, field: Field, populated: dict[Model, int], factors: dict[Model, int],
table_alias: str = 't', series_alias: str = 's') -> SQL | None:
"""
Returns the source expression for copying the field (SQL(identifier|expression|subquery) | None)
`table_alias` and `series_alias` are the identifiers used to reference
the currently being populated table and it's series, respectively.
"""
def copy_noop():
return None
def copy_raw(field_):
return SQL.identifier(field_.name)
def copy(field_):
if field_needs_variation(model, field_):
return get_field_variation(model, field_, factors[model], series_alias)
else:
return copy_raw(field_)
def copy_id():
last_id = fetch_last_id(model)
populated[model] = last_id # this adds the model in the populated dict
return SQL('id + %(last_id)s * %(series_alias)s', last_id=last_id, series_alias=SQL.identifier(series_alias))
def copy_many2one(field_):
# if the comodel was priorly populated, remap the many2one to the new copies
if (comodel := model.env[field_.comodel_name]) in populated:
comodel_max_id = populated[comodel]
# we use MOD() instead of %, because % cannot be correctly escaped, it's a limitation of the SQL wrapper
return SQL("%(table_alias)s.%(field_name)s + %(comodel_max_id)s * (MOD(%(series_alias)s - 1, %(factor)s) + 1)",
table_alias=SQL.identifier(table_alias),
field_name=SQL.identifier(field_.name),
comodel_max_id=comodel_max_id,
series_alias=SQL.identifier(series_alias),
factor=factors[comodel])
return copy(field_)
if field.name == 'id':
return copy_id()
match field.type:
case 'one2many':
# there is nothing to copy, as it's value is implicitly read from the inverse Many2one
return copy_noop()
case 'many2many':
# there is nothing to do, the copying of the m2m will be handled when copying the relation table
return copy_noop()
case 'many2one':
return copy_many2one(field)
case 'many2one_reference':
# TODO: in the case of a reference field, there is no comodel,
# but it's specified as the value of the field specified by model_field.
# Not really sure how to handle this, as it involves reading the content pointed by model_field
# to check on-the-fly if it's populated or not python-side, so for now we raw-copy it.
# If we need to read on-the-fly, the populated structure needs to be in DB (via a new Model?)
return copy(field)
case 'binary':
# copy only binary field that are inlined in the table
return copy(field) if not field.attachment else copy_noop()
case _:
return copy(field)
def populate_model(model: Model, populated: dict[Model, int], factors: dict[Model, int], separator_code: str) -> None:
def update_sequence(model_):
model_.env.execute_query(SQL("SELECT SETVAL(%(sequence)s, %(last_id)s, TRUE)",
sequence=f"{model_._table}_id_seq", last_id=fetch_last_id(model_)))
def has_column(field_):
return field_.store and field_.column_type
assert model not in populated, f"We do not populate a model ({model}) that has already been populated."
_logger.info('Populating model %s %s times...', model._name, factors[model])
dest_fields = []
src_fields = []
update_fields = []
table_alias = 't'
series_alias = 's'
# process all stored fields (that has a respective column), if the model has an 'id', it's processed first
for _, field in sorted(model._fields.items(), key=lambda pair: pair[0] != 'id'):
if has_column(field):
if field_needs_variation(model, field) and field.type in ('char', 'text'):
update_fields.append(field)
if src := populate_field(model, field, populated, factors, table_alias, series_alias):
dest_fields.append(SQL.identifier(field.name))
src_fields.append(src)
# Update char/text fields for existing rows, to allow re-entrance
if update_fields:
query = SQL('UPDATE %(table)s SET (%(src_columns)s) = ROW(%(dest_columns)s)',
table=SQL.identifier(model._table),
src_columns=SQL(', ').join(SQL.identifier(field.name) for field in update_fields),
dest_columns=SQL(', ').join(
get_field_variation_char(field, postfix=SQL('CHR(%s)', separator_code))
for field in update_fields))
model.env.cr.execute(query)
query = SQL("""
INSERT INTO %(table)s (%(dest_columns)s)
SELECT %(src_columns)s FROM %(table)s %(table_alias)s,
GENERATE_SERIES(1, %(factor)s) %(series_alias)s
""", table=SQL.identifier(model._table), factor=factors[model],
dest_columns=SQL(', ').join(dest_fields), src_columns=SQL(', ').join(src_fields),
table_alias=SQL.identifier(table_alias), series_alias=SQL.identifier(series_alias))
model.env.cr.execute(query)
# normally copying the 'id' will set the model entry in the populated dict,
# but for the case of a table with no 'id' (ex: Many2many), we add manually,
# by reading the key and having the defaultdict do the insertion, with a default value of 0
if populated[model]:
# in case we populated a model with an 'id', we update the sequence
update_sequence(model)
class Many2oneFieldWrapper(Many2one):
def __init__(self, model, field_name, comodel_name):
super().__init__(comodel_name)
self._setup_attrs(model, field_name) # setup most of the default attrs
class Many2manyModelWrapper:
def __init__(self, env, field):
self._name = field.relation # a m2m doesn't have a _name, so we use the tablename
self._table = field.relation
self._inherits = {}
self.env = env
self._rec_name = None
self._rec_names_search = []
# if the field is inherited, the column attributes are defined on the base_field
column1 = field.column1 or field.base_field.column1
column2 = field.column2 or field.base_field.column2
# column1 refers to the model, while column2 refers to the comodel
self._fields = {
field.column1: Many2oneFieldWrapper(self, column1, field.model_name),
field.column2: Many2oneFieldWrapper(self, column2, field.comodel_name),
}
def __repr__(self):
return f"<Many2manyModelWrapper({self._name!r})>"
def __eq__(self, other):
return self._name == other._name
def __hash__(self):
return hash(self._name)
def infer_many2many_model(env: Environment, field: Field) -> Model | Many2manyModelWrapper:
"""
Returns the relation model used for the m2m:
- If it's a custom model, return the model
- If it's an implicite table generated by the ORM,
return a wrapped model that behaves like a fake duck-typed model for the population algorithm
"""
# check if the relation is an existing model
for model_name, model_class in env.registry.items():
if model_class._table == field.relation:
return env[model_name]
# the relation is a relational table, return a wrapped version
return Many2manyModelWrapper(env, field)
def populate_models(model_factors: dict[Model, int], separator_code: int) -> None:
"""
Create factors new records using existing records as templates.
If a dependency is found for a specific model, but it isn't specified by the user,
it will inherit the factor of the dependant model.
"""
def has_records(model_):
query = SQL('SELECT EXISTS (SELECT 1 FROM %s)', SQL.identifier(model_._table))
return model_.env.execute_query(query)[0][0]
populated: dict[Model, int] = defaultdict(int)
ctx: PopulateContext = PopulateContext()
def process(model_):
if model_ in populated:
return
if not has_records(model_): # if there are no records, there is nothing to populate
populated[model_] = 0
return
# if the model has _inherits, the delegated models need to have been populated before the current one
for model_name in model_._inherits:
process(model_.env[model_name])
with ctx.ignore_fkey_constraints(model_), ctx.ignore_indexes(model_):
populate_model(model_, populated, model_factors, separator_code)
# models on the other end of X2many relation should also be populated (ex: to avoid SO with no SOL)
for field in model_._fields.values():
if field.store and field.copy:
match field.type:
case 'one2many':
comodel = model_.env[field.comodel_name]
if comodel != model_:
model_factors[comodel] = model_factors[model_]
process(comodel)
case 'many2many':
m2m_model = infer_many2many_model(model_.env, field)
model_factors[m2m_model] = model_factors[model_]
process(m2m_model)
for model in list(model_factors):
process(model)

View file

@ -11,9 +11,11 @@ import threading
import re
import functools
from psycopg2 import sql
from psycopg2 import OperationalError
from odoo import tools
from odoo.tools import SQL
_logger = logging.getLogger(__name__)
@ -146,6 +148,9 @@ class Collector:
self._processed = True
return self._entries
def summary(self):
return f"{'='*10} {self.name} {'='*10} \n Entries: {len(self._entries)}"
class SQLCollector(Collector):
"""
@ -170,6 +175,13 @@ class SQLCollector(Collector):
'time': query_time,
})
def summary(self):
total_time = sum(entry['time'] for entry in self._entries) or 1
sql_entries = ''
for entry in self._entries:
sql_entries += f"\n{'-' * 100}'\n'{entry['time']} {'*' * int(entry['time'] / total_time * 100)}'\n'{entry['full_query']}"
return super().summary() + sql_entries
class PeriodicCollector(Collector):
"""
@ -515,7 +527,7 @@ class Profiler:
Will save sql and async stack trace by default.
"""
def __init__(self, collectors=None, db=..., profile_session=None,
description=None, disable_gc=False, params=None):
description=None, disable_gc=False, params=None, log=False):
"""
:param db: database name to use to save results.
Will try to define database automatically by default.
@ -537,6 +549,8 @@ class Profiler:
self.filecache = {}
self.params = params or {} # custom parameters usable by collectors
self.profile_id = None
self.log = log
self.sub_profilers = []
self.entry_count_limit = int(self.params.get("entry_count_limit", 0)) # the limit could be set using a smarter way
self.done = False
@ -621,18 +635,23 @@ class Profiler:
for collector in self.collectors:
if collector.entries:
values[collector.name] = json.dumps(collector.entries)
query = sql.SQL("INSERT INTO {}({}) VALUES %s RETURNING id").format(
sql.Identifier("ir_profile"),
sql.SQL(",").join(map(sql.Identifier, values)),
query = SQL(
"INSERT INTO ir_profile(%s) VALUES %s RETURNING id",
SQL(",").join(map(SQL.identifier, values)),
tuple(values.values()),
)
cr.execute(query, [tuple(values.values())])
cr.execute(query)
self.profile_id = cr.fetchone()[0]
_logger.info('ir_profile %s (%s) created', self.profile_id, self.profile_session)
except OperationalError:
_logger.exception("Could not save profile in database")
finally:
if self.disable_gc:
gc.enable()
if self.params:
del self.init_thread.profiler_params
if self.log:
_logger.info(self.summary())
def _get_cm_proxy(self):
return _Nested(self)
@ -695,6 +714,13 @@ class Profiler:
"collectors": {collector.name: collector.entries for collector in self.collectors},
}, indent=4)
def summary(self):
result = ''
for profiler in [self, *self.sub_profilers]:
for collector in profiler.collectors:
result += f'\n{self.description}\n{collector.summary()}'
return result
class _Nested:
__slots__ = ("__profiler",)

View file

@ -3,37 +3,41 @@
import csv
import codecs
import io
import typing
import warnings
_reader = codecs.getreader('utf-8')
_writer = codecs.getwriter('utf-8')
def csv_reader(stream, **params):
warnings.warn("Deprecated since Odoo 18.0: can just use `csv.reader` with a text stream or use `TextIOWriter` or `codec.getreader` to transcode.", DeprecationWarning, 2)
assert not isinstance(stream, io.TextIOBase),\
"For cross-compatibility purposes, csv_reader takes a bytes stream"
return csv.reader(_reader(stream), **params)
def csv_writer(stream, **params):
warnings.warn("Deprecated since Odoo 18.0: can just use `csv.writer` with a text stream or use `TextIOWriter` or `codec.getwriter` to transcode.", DeprecationWarning, 2)
assert not isinstance(stream, io.TextIOBase), \
"For cross-compatibility purposes, csv_writer takes a bytes stream"
return csv.writer(_writer(stream), **params)
def to_text(source):
""" Generates a text value (an instance of text_type) from an arbitrary
source.
def to_text(source: typing.Any) -> str:
""" Generates a text value from an arbitrary source.
* False and None are converted to empty strings
* text is passed through
* bytes are decoded as UTF-8
* rest is textified via the current version's relevant data model method
* rest is textified
"""
warnings.warn("Deprecated since Odoo 18.0.", DeprecationWarning, 2)
if source is None or source is False:
return u''
return ''
if isinstance(source, bytes):
return source.decode('utf-8')
return source.decode()
if isinstance(source, str):
return source

View file

@ -1,24 +1,18 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import itertools
from collections.abc import Iterable, Iterator
from odoo.tools.sql import make_identifier, SQL, IDENT_RE
from .sql import SQL, make_identifier
def _sql_table(table: str | SQL | None) -> SQL | None:
""" Wrap an optional table as an SQL object. """
if isinstance(table, str):
return SQL.identifier(table) if IDENT_RE.match(table) else SQL(f"({table})")
return table
def _sql_from_table(alias: str, table: SQL | None) -> SQL:
def _sql_from_table(alias: str, table: SQL) -> SQL:
""" Return a FROM clause element from ``alias`` and ``table``. """
if table is None:
return SQL.identifier(alias)
return SQL("%s AS %s", table, SQL.identifier(alias))
if (alias_identifier := SQL.identifier(alias)) == table:
return table
return SQL("%s AS %s", table, alias_identifier)
def _sql_from_join(kind: SQL, alias: str, table: SQL | None, condition: SQL) -> SQL:
def _sql_from_join(kind: SQL, alias: str, table: SQL, condition: SQL) -> SQL:
""" Return a FROM clause element for a JOIN. """
return SQL("%s %s ON (%s)", kind, _sql_from_table(alias, table), condition)
@ -29,7 +23,7 @@ _SQL_JOINS = {
}
def _generate_table_alias(src_table_alias, link):
def _generate_table_alias(src_table_alias: str, link: str) -> str:
""" Generate a standard table alias name. An alias is generated as following:
- the base is the source table name (that can already be an alias)
@ -49,45 +43,49 @@ def _generate_table_alias(src_table_alias, link):
return make_identifier(f"{src_table_alias}__{link}")
class Query(object):
class Query:
""" Simple implementation of a query object, managing tables with aliases,
join clauses (with aliases, condition and parameters), where clauses (with
parameters), order, limit and offset.
:param cr: database cursor (for lazy evaluation)
:param env: model environment (for lazy evaluation)
:param alias: name or alias of the table
:param table: a table expression (``str`` or ``SQL`` object), optional
"""
def __init__(self, cr, alias: str, table: (str | SQL | None) = None):
def __init__(self, env, alias: str, table: (SQL | None) = None):
# database cursor
self._cr = cr
self._env = env
# tables {alias: table(SQL|None)}
self._tables = {alias: _sql_table(table)}
self._tables: dict[str, SQL] = {
alias: table if table is not None else SQL.identifier(alias),
}
# joins {alias: (kind(SQL), table(SQL|None), condition(SQL))}
self._joins = {}
# joins {alias: (kind(SQL), table(SQL), condition(SQL))}
self._joins: dict[str, tuple[SQL, SQL, SQL]] = {}
# holds the list of WHERE conditions (to be joined with 'AND')
self._where_clauses = []
self._where_clauses: list[SQL] = []
# order, limit, offset
self._order = None
self.limit = None
self.offset = None
# groupby, having, order, limit, offset
self.groupby: SQL | None = None
self.having: SQL | None = None
self._order: SQL | None = None
self.limit: int | None = None
self.offset: int | None = None
# memoized result
self._ids = None
self._ids: tuple[int, ...] | None = None
def make_alias(self, alias: str, link: str) -> str:
@staticmethod
def make_alias(alias: str, link: str) -> str:
""" Return an alias based on ``alias`` and ``link``. """
return _generate_table_alias(alias, link)
def add_table(self, alias: str, table: (str | SQL | None) = None):
def add_table(self, alias: str, table: (SQL | None) = None):
""" Add a table with a given alias to the from clause. """
assert alias not in self._tables and alias not in self._joins, f"Alias {alias!r} already in {self}"
self._tables[alias] = _sql_table(table)
self._tables[alias] = table if table is not None else SQL.identifier(alias)
self._ids = None
def add_join(self, kind: str, alias: str, table: str | SQL | None, condition: SQL):
@ -95,7 +93,9 @@ class Query(object):
sql_kind = _SQL_JOINS.get(kind.upper())
assert sql_kind is not None, f"Invalid JOIN type {kind!r}"
assert alias not in self._tables, f"Alias {alias!r} already used"
table = _sql_table(table)
table = table or alias
if isinstance(table, str):
table = SQL.identifier(table)
if alias in self._joins:
assert self._joins[alias] == (sql_kind, table, condition)
@ -105,10 +105,10 @@ class Query(object):
def add_where(self, where_clause: str | SQL, where_params=()):
""" Add a condition to the where clause. """
self._where_clauses.append(SQL(where_clause, *where_params))
self._where_clauses.append(SQL(where_clause, *where_params)) # pylint: disable = sql-injection
self._ids = None
def join(self, lhs_alias: str, lhs_column: str, rhs_table: str, rhs_column: str, link: str):
def join(self, lhs_alias: str, lhs_column: str, rhs_table: str | SQL, rhs_column: str, link: str) -> str:
"""
Perform a join between a table already present in the current Query object and
another table. This method is essentially a shortcut for methods :meth:`~.make_alias`
@ -127,7 +127,7 @@ class Query(object):
self.add_join('JOIN', rhs_alias, rhs_table, condition)
return rhs_alias
def left_join(self, lhs_alias: str, lhs_column: str, rhs_table: str, rhs_column: str, link: str):
def left_join(self, lhs_alias: str, lhs_column: str, rhs_table: str, rhs_column: str, link: str) -> str:
""" Add a LEFT JOIN to the current table (if necessary), and return the
alias corresponding to ``rhs_table``.
@ -146,7 +146,7 @@ class Query(object):
@order.setter
def order(self, value: SQL | str | None):
self._order = SQL(value) if value is not None else None
self._order = SQL(value) if value is not None else None # pylint: disable = sql-injection
@property
def table(self) -> str:
@ -156,15 +156,16 @@ class Query(object):
@property
def from_clause(self) -> SQL:
""" Return the FROM clause of ``self``, without the FROM keyword. """
tables = SQL(", ").join(
_sql_from_table(alias, table)
for alias, table in self._tables.items()
)
tables = SQL(", ").join(itertools.starmap(_sql_from_table, self._tables.items()))
if not self._joins:
return tables
items = [tables]
for alias, (kind, table, condition) in self._joins.items():
items.append(_sql_from_join(kind, alias, table, condition))
items = (
tables,
*(
_sql_from_join(kind, alias, table, condition)
for alias, (kind, table, condition) in self._joins.items()
),
)
return SQL(" ").join(items)
@property
@ -172,7 +173,7 @@ class Query(object):
""" Return the WHERE condition of ``self``, without the WHERE keyword. """
return SQL(" AND ").join(self._where_clauses)
def is_empty(self):
def is_empty(self) -> bool:
""" Return whether the query is known to return nothing. """
return self._ids == ()
@ -180,10 +181,12 @@ class Query(object):
""" Return the SELECT query as an ``SQL`` object. """
sql_args = map(SQL, args) if args else [SQL.identifier(self.table, 'id')]
return SQL(
"%s%s%s%s%s%s",
"%s%s%s%s%s%s%s%s",
SQL("SELECT %s", SQL(", ").join(sql_args)),
SQL(" FROM %s", self.from_clause),
SQL(" WHERE %s", self.where_clause) if self._where_clauses else SQL(),
SQL(" GROUP BY %s", self.groupby) if self.groupby else SQL(),
SQL(" HAVING %s", self.having) if self.having else SQL(),
SQL(" ORDER BY %s", self._order) if self._order else SQL(),
SQL(" LIMIT %s", self.limit) if self.limit else SQL(),
SQL(" OFFSET %s", self.offset) if self.offset else SQL(),
@ -196,7 +199,12 @@ class Query(object):
"""
if self._ids is not None and not args:
# inject the known result instead of the subquery
return SQL("%s", self._ids or (None,))
if not self._ids:
# in case we have nothing, we want to use a sub_query with no records
# because an empty tuple leads to a syntax error
# and a tuple containing just None creates issues for `NOT IN`
return SQL("(SELECT 1 WHERE FALSE)")
return SQL("%s", self._ids)
if self.limit or self.offset:
# in this case, the ORDER BY clause is necessary
@ -210,22 +218,15 @@ class Query(object):
SQL(" WHERE %s", self.where_clause) if self._where_clauses else SQL(),
)
def get_sql(self):
""" Returns (query_from, query_where, query_params). """
from_string, from_params = self.from_clause
where_string, where_params = self.where_clause
return from_string, where_string, from_params + where_params
def get_result_ids(self):
def get_result_ids(self) -> tuple[int, ...]:
""" Return the result of ``self.select()`` as a tuple of ids. The result
is memoized for future use, which avoids making the same query twice.
"""
if self._ids is None:
self._cr.execute(self.select())
self._ids = tuple(row[0] for row in self._cr.fetchall())
self._ids = tuple(id_ for id_, in self._env.execute_query(self.select()))
return self._ids
def set_result_ids(self, ids, ordered=True):
def set_result_ids(self, ids: Iterable[int], ordered: bool = True) -> None:
""" Set up the query to return the lines given by ``ids``. The parameter
``ordered`` tells whether the query must be ordered to match exactly the
sequence ``ids``.
@ -253,23 +254,22 @@ class Query(object):
self.add_where(SQL("%s IN %s", SQL.identifier(self.table, 'id'), ids))
self._ids = ids
def __str__(self):
def __str__(self) -> str:
sql = self.select()
return f"<Query: {sql.code!r} with params: {sql.params!r}>"
def __bool__(self):
return bool(self.get_result_ids())
def __len__(self):
def __len__(self) -> int:
if self._ids is None:
if self.limit or self.offset:
# optimization: generate a SELECT FROM, and then count the rows
sql = SQL("SELECT COUNT(*) FROM (%s) t", self.select(""))
else:
sql = self.select('COUNT(*)')
self._cr.execute(sql)
return self._cr.fetchone()[0]
return self._env.execute_query(sql)[0][0]
return len(self.get_result_ids())
def __iter__(self):
def __iter__(self) -> Iterator[int]:
return iter(self.get_result_ids())

View file

@ -10,7 +10,7 @@ from werkzeug import urls
from odoo.tools import safe_eval
INLINE_TEMPLATE_REGEX = re.compile(r"\{\{(.+?)\}\}")
INLINE_TEMPLATE_REGEX = re.compile(r"\{\{(.+?)(\|\|\|\s*(.*?))?\}\}")
def relativedelta_proxy(*args, **kwargs):
# dateutil.relativedelta is an old-style class and cannot be directly
@ -42,33 +42,34 @@ def parse_inline_template(text):
for match in INLINE_TEMPLATE_REGEX.finditer(text):
literal = text[current_literal_index:match.start()]
expression = match.group(1)
groups.append((literal, expression))
default = match.group(3)
groups.append((literal, expression.strip(), default or ''))
current_literal_index = match.end()
# string past last regex match
literal = text[current_literal_index:]
if literal:
groups.append((literal, ''))
groups.append((literal, '', ''))
return groups
def convert_inline_template_to_qweb(template):
template_instructions = parse_inline_template(template or '')
preview_markup = []
for string, expression in template_instructions:
for string, expression, default in template_instructions:
if expression:
preview_markup.append(Markup('{}<t t-out="{}"/>').format(string, expression))
preview_markup.append(Markup('{}<t t-out="{}">{}</t>').format(string, expression, default))
else:
preview_markup.append(string)
return Markup('').join(preview_markup)
def render_inline_template(template_instructions, variables):
results = []
for string, expression in template_instructions:
for string, expression, default in template_instructions:
results.append(string)
if expression:
result = safe_eval.safe_eval(expression, variables)
result = safe_eval.safe_eval(expression, variables) or default
if result:
results.append(str(result))

View file

@ -19,14 +19,12 @@ import functools
import logging
import sys
import types
from opcode import HAVE_ARGUMENT, opmap, opname
from opcode import opmap, opname
from types import CodeType
import werkzeug
from psycopg2 import OperationalError
from .misc import ustr
import odoo
unsafe_eval = eval
@ -261,7 +259,7 @@ def test_expr(expr, allowed_codes, mode="eval", filename=None):
except (SyntaxError, TypeError, ValueError):
raise
except Exception as e:
raise ValueError('"%s" while compiling\n%r' % (ustr(e), expr))
raise ValueError('%r while compiling\n%r' % (e, expr))
assert_valid_codeobj(allowed_codes, code_obj, expr)
return code_obj
@ -410,7 +408,7 @@ def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=Fal
except ZeroDivisionError:
raise
except Exception as e:
raise ValueError('%s: "%s" while evaluating\n%r' % (ustr(type(e)), ustr(e), expr))
raise ValueError('%r while evaluating\n%r' % (e, expr))
def test_python_expr(expr, mode="eval"):
try:
test_expr(expr, _SAFE_OPCODES, mode=mode)
@ -425,7 +423,7 @@ def test_python_expr(expr, mode="eval"):
}
msg = "%s : %s at line %d\n%s" % (type(err).__name__, error['message'], error['lineno'], error['error_line'])
else:
msg = ustr(err)
msg = str(err)
return msg
return False
@ -474,6 +472,10 @@ import dateutil
mods = ['parser', 'relativedelta', 'rrule', 'tz']
for mod in mods:
__import__('dateutil.%s' % mod)
# make sure to patch pytz before exposing
from odoo._monkeypatches.pytz import patch_pytz # noqa: E402, F401
patch_pytz()
datetime = wrap_module(__import__('datetime'), ['date', 'datetime', 'time', 'timedelta', 'timezone', 'tzinfo', 'MAXYEAR', 'MINYEAR'])
dateutil = wrap_module(dateutil, {
"tz": ["UTC", "tzutc"],

View file

@ -0,0 +1,559 @@
from __future__ import annotations
import ast
from abc import ABC, abstractmethod
import typing
if typing.TYPE_CHECKING:
from collections.abc import Collection, Iterable
class SetDefinitions:
""" A collection of set definitions, where each set is defined by an id, a
name, its supersets, and the sets that are disjoint with it. This object
is used as a factory to create set expressions, which are combinations of
named sets with union, intersection and complement.
"""
__slots__ = ('__leaves',)
def __init__(self, definitions: dict[int, dict]):
""" Initialize the object with ``definitions``, a dict which maps each
set id to a dict with optional keys ``"ref"`` (value is the set's name),
``"supersets"`` (value is a collection of set ids), and ``"disjoints"``
(value is a collection of set ids).
Here is an example of set definitions, with natural numbers (N), integer
numbers (Z), rational numbers (Q), real numbers (R), imaginary numbers
(I) and complex numbers (C)::
{
1: {"ref": "N", "supersets": [2]},
2: {"ref": "Z", "supersets": [3]},
3: {"ref": "Q", "supersets": [4]},
4: {"ref": "R", "supersets": [6]},
5: {"ref": "I", "supersets": [6], "disjoints": [4]},
6: {"ref": "C"},
}
"""
self.__leaves: dict[int | str, Leaf] = {}
for leaf_id, info in definitions.items():
ref = info['ref']
assert ref != '*', "The set reference '*' is reserved for the universal set."
leaf = Leaf(leaf_id, ref)
self.__leaves[leaf_id] = leaf
self.__leaves[ref] = leaf
# compute transitive closure of subsets and supersets
subsets = {leaf.id: leaf.subsets for leaf in self.__leaves.values()}
supersets = {leaf.id: leaf.supersets for leaf in self.__leaves.values()}
for leaf_id, info in definitions.items():
for greater_id in info.get('supersets', ()):
# transitive closure: smaller_ids <= leaf_id <= greater_id <= greater_ids
smaller_ids = subsets[leaf_id]
greater_ids = supersets[greater_id]
for smaller_id in smaller_ids:
supersets[smaller_id].update(greater_ids)
for greater_id in greater_ids:
subsets[greater_id].update(smaller_ids)
# compute transitive closure of disjoint relation
disjoints = {leaf.id: leaf.disjoints for leaf in self.__leaves.values()}
for leaf_id, info in definitions.items():
for distinct_id in info.get('disjoints', set()):
# all subsets[leaf_id] are disjoint from all subsets[distinct_id]
left_ids = subsets[leaf_id]
right_ids = subsets[distinct_id]
for left_id in left_ids:
disjoints[left_id].update(right_ids)
for right_id in right_ids:
disjoints[right_id].update(left_ids)
@property
def empty(self) -> SetExpression:
return EMPTY_UNION
@property
def universe(self) -> SetExpression:
return UNIVERSAL_UNION
def parse(self, refs: str, raise_if_not_found: bool = True) -> SetExpression:
""" Return the set expression corresponding to ``refs``
:param str refs: comma-separated list of set references
optionally preceded by ``!`` (negative item). The result is
an union between positive item who intersect every negative
group.
(e.g. ``base.group_user,base.group_portal,!base.group_system``)
"""
positives: list[Leaf] = []
negatives: list[Leaf] = []
for xmlid in refs.split(','):
if xmlid.startswith('!'):
negatives.append(~self.__get_leaf(xmlid[1:], raise_if_not_found))
else:
positives.append(self.__get_leaf(xmlid, raise_if_not_found))
if positives:
return Union(Inter([leaf] + negatives) for leaf in positives)
else:
return Union([Inter(negatives)])
def from_ids(self, ids: Iterable[int], keep_subsets: bool = False) -> SetExpression:
""" Return the set expression corresponding to given set ids. """
if keep_subsets:
ids = set(ids)
ids = [leaf_id for leaf_id in ids if not any((self.__leaves[leaf_id].subsets - {leaf_id}) & ids)]
return Union(Inter([self.__leaves[leaf_id]]) for leaf_id in ids)
def from_key(self, key: str) -> SetExpression:
""" Return the set expression corresponding to the given key. """
# union_tuple = tuple(tuple(tuple(leaf_id, negative), ...), ...)
union_tuple = ast.literal_eval(key)
return Union([
Inter([
~leaf if negative else leaf
for leaf_id, negative in inter_tuple
for leaf in [self.__get_leaf(leaf_id, raise_if_not_found=False)]
], optimal=True)
for inter_tuple in union_tuple
], optimal=True)
def get_id(self, ref: LeafIdType) -> LeafIdType | None:
""" Return a set id from its reference, or ``None`` if it does not exist. """
if ref == '*':
return UNIVERSAL_LEAF.id
leaf = self.__leaves.get(ref)
return None if leaf is None else leaf.id
def __get_leaf(self, ref: str | int, raise_if_not_found: bool = True) -> Leaf:
""" Return the group object from the string.
:param str ref: the ref of a leaf
"""
if ref == '*':
return UNIVERSAL_LEAF
if not raise_if_not_found and ref not in self.__leaves:
return Leaf(UnknownId(ref), ref)
return self.__leaves[ref]
class SetExpression(ABC):
""" An object that represents a combination of named sets with union,
intersection and complement.
"""
@abstractmethod
def is_empty(self) -> bool:
""" Returns whether ``self`` is the empty set, that contains nothing. """
raise NotImplementedError()
@abstractmethod
def is_universal(self) -> bool:
""" Returns whether ``self`` is the universal set, that contains all possible elements. """
raise NotImplementedError()
@abstractmethod
def invert_intersect(self, factor: SetExpression) -> SetExpression | None:
""" Performs the inverse operation of intersection (a sort of factorization)
such that: ``self == result & factor``.
"""
raise NotImplementedError()
@abstractmethod
def matches(self, user_group_ids: Iterable[int]) -> bool:
""" Return whether the given group ids are included to ``self``. """
raise NotImplementedError()
@property
@abstractmethod
def key(self) -> str:
""" Return a unique identifier for the expression. """
raise NotImplementedError()
@abstractmethod
def __and__(self, other: SetExpression) -> SetExpression:
raise NotImplementedError()
@abstractmethod
def __or__(self, other: SetExpression) -> SetExpression:
raise NotImplementedError()
@abstractmethod
def __invert__(self) -> SetExpression:
raise NotImplementedError()
@abstractmethod
def __eq__(self, other) -> bool:
raise NotImplementedError()
@abstractmethod
def __le__(self, other: SetExpression) -> bool:
raise NotImplementedError()
@abstractmethod
def __lt__(self, other: SetExpression) -> bool:
raise NotImplementedError()
@abstractmethod
def __hash__(self):
raise NotImplementedError()
class Union(SetExpression):
""" Implementation of a set expression, that represents it as a union of
intersections of named sets or their complement.
"""
def __init__(self, inters: Iterable[Inter] = (), optimal=False):
if inters and not optimal:
inters = self.__combine((), inters)
self.__inters = sorted(inters, key=lambda inter: inter.key)
self.__key = str(tuple(inter.key for inter in self.__inters))
self.__hash = hash(self.__key)
@property
def key(self) -> str:
return self.__key
@staticmethod
def __combine(inters: Iterable[Inter], inters_to_add: Iterable[Inter]) -> list[Inter]:
""" Combine some existing union of intersections with extra intersections. """
result = list(inters)
todo = list(inters_to_add)
while todo:
inter_to_add = todo.pop()
if inter_to_add.is_universal():
return [UNIVERSAL_INTER]
if inter_to_add.is_empty():
continue
for index, inter in enumerate(result):
merged = inter._union_merge(inter_to_add)
if merged is not None:
result.pop(index)
todo.append(merged)
break
else:
result.append(inter_to_add)
return result
def is_empty(self) -> bool:
""" Returns whether ``self`` is the empty set, that contains nothing. """
return not self.__inters
def is_universal(self) -> bool:
""" Returns whether ``self`` is the universal set, that contains all possible elements. """
return any(item.is_universal() for item in self.__inters)
def invert_intersect(self, factor: SetExpression) -> Union | None:
""" Performs the inverse operation of intersection (a sort of factorization)
such that: ``self == result & factor``.
"""
if factor == self:
return UNIVERSAL_UNION
rfactor = ~factor
if rfactor.is_empty() or rfactor.is_universal():
return None
rself = ~self
assert isinstance(rfactor, Union)
inters = [inter for inter in rself.__inters if inter not in rfactor.__inters]
if len(rself.__inters) - len(inters) != len(rfactor.__inters):
# not possible to invert the intersection
return None
rself_value = Union(inters)
return ~rself_value
def __and__(self, other: SetExpression) -> Union:
assert isinstance(other, Union)
if self.is_universal():
return other
if other.is_universal():
return self
if self.is_empty() or other.is_empty():
return EMPTY_UNION
if self == other:
return self
return Union(
self_inter & other_inter
for self_inter in self.__inters
for other_inter in other.__inters
)
def __or__(self, other: SetExpression) -> Union:
assert isinstance(other, Union)
if self.is_empty():
return other
if other.is_empty():
return self
if self.is_universal() or other.is_universal():
return UNIVERSAL_UNION
if self == other:
return self
inters = self.__combine(self.__inters, other.__inters)
return Union(inters, optimal=True)
def __invert__(self) -> Union:
if self.is_empty():
return UNIVERSAL_UNION
if self.is_universal():
return EMPTY_UNION
# apply De Morgan's laws
inverses_of_inters = [
# ~(A & B) = ~A | ~B
Union(Inter([~leaf]) for leaf in inter.leaves)
for inter in self.__inters
]
result = inverses_of_inters[0]
# ~(A | B) = ~A & ~B
for inverse in inverses_of_inters[1:]:
result = result & inverse
return result
def matches(self, user_group_ids) -> bool:
if self.is_empty() or not user_group_ids:
return False
if self.is_universal():
return True
user_group_ids = set(user_group_ids)
return any(inter.matches(user_group_ids) for inter in self.__inters)
def __bool__(self):
raise NotImplementedError()
def __eq__(self, other) -> bool:
return isinstance(other, Union) and self.__key == other.__key
def __le__(self, other: SetExpression) -> bool:
if not isinstance(other, Union):
return False
if self.__key == other.__key:
return True
if self.is_universal() or other.is_empty():
return False
if other.is_universal() or self.is_empty():
return True
return all(
any(self_inter <= other_inter for other_inter in other.__inters)
for self_inter in self.__inters
)
def __lt__(self, other: SetExpression) -> bool:
return self != other and self.__le__(other)
def __str__(self):
""" Returns an intersection union representation of groups using user-readable references.
e.g. ('base.group_user' & 'base.group_multi_company') | ('base.group_portal' & ~'base.group_multi_company') | 'base.group_public'
"""
if self.is_empty():
return "~*"
def leaf_to_str(leaf):
return f"{'~' if leaf.negative else ''}{leaf.ref!r}"
def inter_to_str(inter, wrapped=False):
result = " & ".join(leaf_to_str(leaf) for leaf in inter.leaves) or "*"
return f"({result})" if wrapped and len(inter.leaves) > 1 else result
wrapped = len(self.__inters) > 1
return " | ".join(inter_to_str(inter, wrapped) for inter in self.__inters)
def __repr__(self):
return repr(self.__str__())
def __hash__(self):
return self.__hash
class Inter:
""" Part of the implementation of a set expression, that represents an
intersection of named sets or their complement.
"""
__slots__ = ('key', 'leaves')
def __init__(self, leaves: Iterable[Leaf] = (), optimal=False):
if leaves and not optimal:
leaves = self.__combine((), leaves)
self.leaves: list[Leaf] = sorted(leaves, key=lambda leaf: leaf.key)
self.key: tuple[tuple[LeafIdType, bool], ...] = tuple(leaf.key for leaf in self.leaves)
@staticmethod
def __combine(leaves: Iterable[Leaf], leaves_to_add: Iterable[Leaf]) -> list[Leaf]:
""" Combine some existing intersection of leaves with extra leaves. """
result = list(leaves)
for leaf_to_add in leaves_to_add:
for index, leaf in enumerate(result):
if leaf.isdisjoint(leaf_to_add): # leaf & leaf_to_add = empty
return [EMPTY_LEAF]
if leaf <= leaf_to_add: # leaf & leaf_to_add = leaf
break
if leaf_to_add <= leaf: # leaf & leaf_to_add = leaf_to_add
result[index] = leaf_to_add
break
else:
if not leaf_to_add.is_universal():
result.append(leaf_to_add)
return result
def is_empty(self) -> bool:
return any(item.is_empty() for item in self.leaves)
def is_universal(self) -> bool:
""" Returns whether ``self`` is the universal set, that contains all possible elements. """
return not self.leaves
def matches(self, user_group_ids) -> bool:
return all(leaf.matches(user_group_ids) for leaf in self.leaves)
def _union_merge(self, other: Inter) -> Inter | None:
""" Return the union of ``self`` with another intersection, if it can be
represented as an intersection. Otherwise return ``None``.
"""
# the following covers cases like (A & B) | A -> A
if self.is_universal() or other <= self:
return self
if self <= other:
return other
# combine complementary parts: (A & ~B) | (A & B) -> A
if len(self.leaves) == len(other.leaves):
opposite_index = None
# we use the property that __leaves are ordered
for index, self_leaf, other_leaf in zip(range(len(self.leaves)), self.leaves, other.leaves):
if self_leaf.id != other_leaf.id:
return None
if self_leaf.negative != other_leaf.negative:
if opposite_index is not None:
return None # we already have two opposite leaves
opposite_index = index
if opposite_index is not None:
leaves = list(self.leaves)
leaves.pop(opposite_index)
return Inter(leaves, optimal=True)
return None
def __and__(self, other: Inter) -> Inter:
if self.is_empty() or other.is_empty():
return EMPTY_INTER
if self.is_universal():
return other
if other.is_universal():
return self
leaves = self.__combine(self.leaves, other.leaves)
return Inter(leaves, optimal=True)
def __eq__(self, other) -> bool:
return isinstance(other, Inter) and self.key == other.key
def __le__(self, other: Inter) -> bool:
return self.key == other.key or all(
any(self_leaf <= other_leaf for self_leaf in self.leaves)
for other_leaf in other.leaves
)
def __lt__(self, other: Inter) -> bool:
return self != other and self <= other
def __hash__(self):
return hash(self.key)
class Leaf:
""" Part of the implementation of a set expression, that represents a named
set or its complement.
"""
__slots__ = ('disjoints', 'id', 'inverse', 'key', 'negative', 'ref', 'subsets', 'supersets')
def __init__(self, leaf_id: LeafIdType, ref: str | int | None = None, negative: bool = False):
self.id = leaf_id
self.ref = ref or str(leaf_id)
self.negative = bool(negative)
self.key: tuple[LeafIdType, bool] = (leaf_id, self.negative)
self.subsets: set[LeafIdType] = {leaf_id} # all the leaf ids that are <= self
self.supersets: set[LeafIdType] = {leaf_id} # all the leaf ids that are >= self
self.disjoints: set[LeafIdType] = set() # all the leaf ids disjoint from self
self.inverse: Leaf | None = None
def __invert__(self) -> Leaf:
if self.inverse is None:
self.inverse = Leaf(self.id, self.ref, negative=not self.negative)
self.inverse.inverse = self
self.inverse.subsets = self.subsets
self.inverse.supersets = self.supersets
self.inverse.disjoints = self.disjoints
return self.inverse
def is_empty(self) -> bool:
return self.ref == '*' and self.negative
def is_universal(self) -> bool:
return self.ref == '*' and not self.negative
def isdisjoint(self, other: Leaf) -> bool:
if self.negative:
return other <= ~self
elif other.negative:
return self <= ~other
else:
return self.id in other.disjoints
def matches(self, user_group_ids: Collection[int]) -> bool:
return (self.id not in user_group_ids) if self.negative else (self.id in user_group_ids)
def __eq__(self, other) -> bool:
return isinstance(other, Leaf) and self.key == other.key
def __le__(self, other: Leaf) -> bool:
if self.is_empty() or other.is_universal():
return True
elif self.is_universal() or other.is_empty():
return False
elif self.negative:
return other.negative and ~other <= ~self
elif other.negative:
return self.id in other.disjoints
else:
return self.id in other.subsets
def __lt__(self, other: Leaf) -> bool:
return self != other and self <= other
def __hash__(self):
return hash(self.key)
class UnknownId(str):
""" Special id object for unknown leaves. It behaves as being strictly
greater than any other kind of id.
"""
__slots__ = ()
def __lt__(self, other) -> bool:
if isinstance(other, UnknownId):
return super().__lt__(other)
return False
def __gt__(self, other) -> bool:
if isinstance(other, UnknownId):
return super().__gt__(other)
return True
LeafIdType = int | typing.Literal["*"] | UnknownId
# constants
UNIVERSAL_LEAF = Leaf('*')
EMPTY_LEAF = ~UNIVERSAL_LEAF
EMPTY_INTER = Inter([EMPTY_LEAF])
UNIVERSAL_INTER = Inter()
EMPTY_UNION = Union()
UNIVERSAL_UNION = Union([UNIVERSAL_INTER])

View file

@ -9,12 +9,29 @@ import logging
import re
from binascii import crc32
from collections import defaultdict
from typing import Iterable, Union
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from odoo.fields import Field
from collections.abc import Iterable
import psycopg2
import psycopg2.sql as pgsql
from .misc import named_to_positional_printf
__all__ = [
"SQL",
"create_index",
"create_unique_index",
"drop_view_if_exists",
"escape_psql",
"index_exists",
"make_identifier",
"make_index_name",
"reverse_order",
]
_schema = logging.getLogger('odoo.schema')
IDENT_RE = re.compile(r'^[a-z0-9_][a-z0-9_$\-]*$', re.I)
@ -58,64 +75,83 @@ class SQL:
if ``code`` is a string literal (not a dynamic string), then the SQL object
made with ``code`` is guaranteed to be safe, provided the SQL objects
within its parameters are themselves safe.
The wrapper may also contain some metadata ``to_flush``. If not ``None``,
its value is a field which the SQL code depends on. The metadata of a
wrapper and its parts can be accessed by the iterator ``sql.to_flush``.
"""
__slots__ = ('__code', '__args')
__slots__ = ('__code', '__params', '__to_flush')
__code: str
__params: tuple
__to_flush: tuple
# pylint: disable=keyword-arg-before-vararg
def __new__(cls, code: (str | SQL) = "", /, *args, **kwargs):
def __init__(self, code: (str | SQL) = "", /, *args, to_flush: (Field | None) = None, **kwargs):
if isinstance(code, SQL):
return code
if args or kwargs or to_flush:
raise TypeError("SQL() unexpected arguments when code has type SQL")
self.__code = code.__code
self.__params = code.__params
self.__to_flush = code.__to_flush
return
# validate the format of code and parameters
if args and kwargs:
raise TypeError("SQL() takes either positional arguments, or named arguments")
if args:
code % tuple("" for arg in args)
elif kwargs:
code, args = named_to_positional_printf(code, kwargs)
self = object.__new__(cls)
self.__code = code
self.__args = args
return self
if kwargs:
code, args = named_to_positional_printf(code, kwargs)
elif not args:
code % () # check that code does not contain %s
self.__code = code
self.__params = ()
self.__to_flush = () if to_flush is None else (to_flush,)
return
code_list = []
params_list = []
to_flush_list = []
for arg in args:
if isinstance(arg, SQL):
code_list.append(arg.__code)
params_list.extend(arg.__params)
to_flush_list.extend(arg.__to_flush)
else:
code_list.append("%s")
params_list.append(arg)
if to_flush is not None:
to_flush_list.append(to_flush)
self.__code = code % tuple(code_list)
self.__params = tuple(params_list)
self.__to_flush = tuple(to_flush_list)
@property
def code(self) -> str:
""" Return the combined SQL code string. """
stack = [] # stack of intermediate results
for node in self.__postfix():
if not isinstance(node, SQL):
stack.append("%s")
elif arity := len(node.__args):
stack[-arity:] = [node.__code % tuple(stack[-arity:])]
else:
stack.append(node.__code)
return stack[0]
return self.__code
@property
def params(self) -> list:
""" Return the combined SQL code params as a list of values. """
return [node for node in self.__postfix() if not isinstance(node, SQL)]
return list(self.__params)
def __postfix(self):
""" Return a postfix iterator for the SQL tree ``self``. """
stack = [(self, False)]
while stack:
node, ispostfix = stack.pop()
if ispostfix or not isinstance(node, SQL):
yield node
else:
stack.append((node, True))
stack.extend((arg, False) for arg in reversed(node.__args))
@property
def to_flush(self) -> Iterable[Field]:
""" Return an iterator on the fields to flush in the metadata of
``self`` and all of its parts.
"""
return self.__to_flush
def __repr__(self):
return f"SQL({', '.join(map(repr, [self.code, *self.params]))})"
return f"SQL({', '.join(map(repr, [self.__code, *self.__params]))})"
def __bool__(self):
return bool(self.__code)
def __eq__(self, other):
return self.code == other.code and self.params == other.params
return isinstance(other, SQL) and self.__code == other.__code and self.__params == other.__params
def __iter__(self):
""" Yields ``self.code`` and ``self.params``. This was introduced for
@ -134,9 +170,9 @@ class SQL:
# optimizations for special cases
if len(args) == 0:
return SQL()
if len(args) == 1:
if len(args) == 1 and isinstance(args[0], SQL):
return args[0]
if not self.__args:
if not self.__params:
return SQL(self.__code.join("%s" for arg in args), *args)
# general case: alternate args with self
items = [self] * (len(args) * 2 - 1)
@ -145,13 +181,13 @@ class SQL:
return SQL("%s" * len(items), *items)
@classmethod
def identifier(cls, name: str, subname: (str | None) = None) -> SQL:
def identifier(cls, name: str, subname: (str | None) = None, to_flush: (Field | None) = None) -> SQL:
""" Return an SQL object that represents an identifier. """
assert name.isidentifier() or IDENT_RE.match(name), f"{name!r} invalid for SQL.identifier()"
if subname is None:
return cls(f'"{name}"')
return cls(f'"{name}"', to_flush=to_flush)
assert subname.isidentifier() or IDENT_RE.match(subname), f"{subname!r} invalid for SQL.identifier()"
return cls(f'"{name}"."{subname}"')
return cls(f'"{name}"."{subname}"', to_flush=to_flush)
def existing_tables(cr, tablenames):
@ -181,7 +217,7 @@ class TableKind(enum.Enum):
Other = None
def table_kind(cr, tablename: str) -> Union[TableKind, None]:
def table_kind(cr, tablename: str) -> TableKind | None:
""" Return the kind of a table, if ``tablename`` is a regular or foreign
table, or a view (ignores indexes, sequences, toast tables, and partitioned
tables; unlogged tables are considered regular)
@ -405,9 +441,11 @@ def constraint_definition(cr, tablename, constraintname):
def add_constraint(cr, tablename, constraintname, definition):
""" Add a constraint on the given table. """
query1 = SQL(
"ALTER TABLE %s ADD CONSTRAINT %s %s",
SQL.identifier(tablename), SQL.identifier(constraintname), SQL(definition),
# There is a fundamental issue with SQL implementation that messes up with queries
# using %, for details check the PR discussion of this patch #188716. To be fixed
# in master. Here we use instead psycopg.sql
query1 = pgsql.SQL("ALTER TABLE {} ADD CONSTRAINT {} {}").format(
pgsql.Identifier(tablename), pgsql.Identifier(constraintname), pgsql.SQL(definition),
)
query2 = SQL(
"COMMENT ON CONSTRAINT %s ON %s IS %s",

View file

@ -1,18 +1,25 @@
from lxml import etree
from lxml.builder import E
import copy
import itertools
import logging
import re
from odoo.tools.translate import _
from odoo.tools import SKIPPED_ELEMENT_TYPES, html_escape
from odoo.exceptions import ValidationError
from lxml import etree
from lxml.builder import E
from odoo.tools.translate import LazyTranslate
from odoo.exceptions import ValidationError
from .misc import SKIPPED_ELEMENT_TYPES, html_escape
__all__ = []
_lt = LazyTranslate('base')
_logger = logging.getLogger(__name__)
RSTRIP_REGEXP = re.compile(r'\n[ \t]*$')
# attribute names that contain Python expressions
PYTHON_ATTRIBUTES = {'readonly', 'required', 'invisible', 'column_invisible', 't-if', 't-elif'}
def add_stripped_items_before(node, spec, extract):
text = spec.text or ''
@ -36,7 +43,9 @@ def add_stripped_items_before(node, spec, extract):
for child in spec:
if child.get('position') == 'move':
tail = child.tail
child = extract(child)
child.tail = tail
node.addprevious(child)
@ -77,7 +86,7 @@ def locate_node(arch, spec):
try:
xPath = etree.ETXPath(expr)
except etree.XPathSyntaxError as e:
raise ValidationError(_("Invalid Expression while parsing xpath %r", expr)) from e
raise ValidationError(_lt("Invalid Expression while parsing xpath %s", expr)) from e
nodes = xPath(arch)
return nodes[0] if nodes else None
elif spec.tag == 'field':
@ -122,7 +131,7 @@ def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_loca
"""
if len(spec):
raise ValueError(
_("Invalid specification for moved nodes: %r", etree.tostring(spec, encoding='unicode'))
_lt("Invalid specification for moved nodes: %s", etree.tostring(spec, encoding='unicode'))
)
pre_locate(spec)
to_extract = locate_node(source, spec)
@ -131,7 +140,7 @@ def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_loca
return to_extract
else:
raise ValueError(
_("Element %r cannot be located in parent view", etree.tostring(spec, encoding='unicode'))
_lt("Element “%s cannot be located in parent view", etree.tostring(spec, encoding='unicode'))
)
while len(specs):
@ -210,26 +219,80 @@ def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_loca
node.text = spec.text
else:
raise ValueError(_("Invalid mode attribute:") + " '%s'" % mode)
raise ValueError(_lt("Invalid mode attribute:%s", mode))
elif pos == 'attributes':
for child in spec.getiterator('attribute'):
attribute = child.get('name')
value = child.text or ''
if child.get('add') or child.get('remove'):
assert not child.text
separator = child.get('separator', ',')
if separator == ' ':
separator = None # squash spaces
to_add = (
s for s in (s.strip() for s in child.get('add', '').split(separator))
if s
)
to_remove = {s.strip() for s in child.get('remove', '').split(separator)}
values = (s.strip() for s in node.get(attribute, '').split(separator))
value = (separator or ' ').join(itertools.chain(
(v for v in values if v not in to_remove),
to_add
# The element should only have attributes:
# - name (mandatory),
# - add, remove, separator
# - any attribute that starts with data-oe-*
unknown = [
key
for key in child.attrib
if key not in ('name', 'add', 'remove', 'separator')
and not key.startswith('data-oe-')
]
if unknown:
raise ValueError(_lt(
"Invalid attributes %s in element <attribute>",
", ".join(map(repr, unknown)),
))
attribute = child.get('name')
value = None
if child.get('add') or child.get('remove'):
if child.text:
raise ValueError(_lt(
"Element <attribute> with 'add' or 'remove' cannot contain text %s",
repr(child.text),
))
value = node.get(attribute, '')
add = child.get('add', '')
remove = child.get('remove', '')
separator = child.get('separator')
if attribute in PYTHON_ATTRIBUTES or attribute.startswith('decoration-'):
# attribute containing a python expression
separator = separator.strip()
if separator not in ('and', 'or'):
raise ValueError(_lt(
"Invalid separator %(separator)s for python expression %(expression)s; "
"valid values are 'and' and 'or'",
separator=repr(separator), expression=repr(attribute),
))
if remove:
if re.match(rf'^\(*{remove}\)*$', value):
value = ''
else:
patterns = [
f"({remove}) {separator} ",
f" {separator} ({remove})",
f"{remove} {separator} ",
f" {separator} {remove}",
]
for pattern in patterns:
index = value.find(pattern)
if index != -1:
value = value[:index] + value[index + len(pattern):]
break
if add:
value = f"({value}) {separator} ({add})" if value else add
else:
if separator is None:
separator = ','
elif separator == ' ':
separator = None # squash spaces
values = (s.strip() for s in value.split(separator))
to_add = filter(None, (s.strip() for s in add.split(separator)))
to_remove = {s.strip() for s in remove.split(separator)}
value = (separator or ' ').join(itertools.chain(
(v for v in values if v and v not in to_remove),
to_add
))
else:
value = child.text or ''
if value:
node.set(attribute, value)
elif attribute in node.attrib:
@ -255,10 +318,7 @@ def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_loca
add_stripped_items_before(node, spec, extract)
else:
raise ValueError(
_("Invalid position attribute: '%s'") %
pos
)
raise ValueError(_lt("Invalid position attribute: '%s'", pos))
else:
attrs = ''.join([
@ -268,7 +328,7 @@ def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_loca
])
tag = "<%s%s>" % (spec.tag, attrs)
raise ValueError(
_("Element '%s' cannot be located in parent view", tag)
_lt("Element '%s' cannot be located in parent view", tag)
)
return source

View file

@ -14,7 +14,6 @@ from lxml import etree
from subprocess import Popen, PIPE
from .. import api
from . import ustr, config
from .safe_eval import safe_eval
_logger = logging.getLogger(__name__)
@ -41,21 +40,20 @@ def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None, re
if res_format == 'pdf':
if res_data[:5] != b'%PDF-':
raise ValueError("Report %s produced a non-pdf header, %r" % (rname, res_data[:10]))
res_text = False
res_text = None
try:
fd, rfname = tempfile.mkstemp(suffix=res_format)
os.write(fd, res_data)
os.close(fd)
proc = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', rfname, '-'], shell=False, stdout=PIPE)
stdout, stderr = proc.communicate()
res_text = ustr(stdout)
proc = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', rfname, '-'], shell=False, stdout=PIPE, encoding="utf-8")
res_text, _stderr = proc.communicate()
os.unlink(rfname)
except Exception:
_logger.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
if res_text is not False:
for line in res_text.split('\n'):
if res_text:
for line in res_text.splitlines():
if ('[[' in line) or ('[ [' in line):
_logger.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
# TODO more checks, what else can be a sign of a faulty report?

View file

@ -1,6 +1,10 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# When using quotation marks in translation strings, please use curly quotes (“”)
# instead of straight quotes (""). On Linux, the keyboard shortcuts are:
# AltGr + V for the opening curly quotes “
# AltGr + B for the closing curly quotes ”
from __future__ import annotations
import codecs
@ -13,18 +17,18 @@ import json
import locale
import logging
import os
from tokenize import generate_tokens, STRING, NEWLINE, INDENT, DEDENT
import polib
import re
import tarfile
import threading
import typing
import warnings
from collections import defaultdict, namedtuple
from contextlib import suppress
from datetime import datetime
from os.path import join
from pathlib import Path
from tokenize import generate_tokens, STRING, NEWLINE, INDENT, DEDENT
from babel.messages import extract
from lxml import etree, html
from markupsafe import escape, Markup
@ -32,8 +36,15 @@ from psycopg2.extras import Json
import odoo
from odoo.exceptions import UserError
from . import config, pycompat
from .misc import file_open, file_path, get_iso_codes, SKIPPED_ELEMENT_TYPES
from .config import config
from .misc import file_open, file_path, get_iso_codes, OrderedSet, ReadonlyDict, SKIPPED_ELEMENT_TYPES
__all__ = [
"_",
"LazyTranslate",
"html_translate",
"xml_translate",
]
_logger = logging.getLogger(__name__)
@ -41,9 +52,6 @@ PYTHON_TRANSLATION_COMMENT = 'odoo-python'
# translation used for javascript code in web client
JAVASCRIPT_TRANSLATION_COMMENT = 'odoo-javascript'
# used to notify web client that these translations should be loaded in the UI
# deprecated comment since Odoo 16.0
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style', 'title')
@ -141,12 +149,6 @@ class UNIX_LINE_TERMINATOR(csv.excel):
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
# FIXME: holy shit this whole thing needs to be cleaned up hard it's a mess
def encode(s):
assert isinstance(s, str)
return s
# which elements are translated inline
TRANSLATED_ELEMENTS = {
'abbr', 'b', 'bdi', 'bdo', 'br', 'cite', 'code', 'data', 'del', 'dfn', 'em',
@ -161,7 +163,7 @@ TRANSLATED_ELEMENTS = {
TRANSLATED_ATTRS = dict.fromkeys({
'string', 'add-label', 'help', 'sum', 'avg', 'confirm', 'placeholder', 'alt', 'title', 'aria-label',
'aria-keyshortcuts', 'aria-placeholder', 'aria-roledescription', 'aria-valuetext',
'value_label', 'data-tooltip', 'data-editor-message', 'label', 'cancel-label', 'confirm-label',
'value_label', 'data-tooltip', 'label', 'cancel-label', 'confirm-label',
}, lambda e: True)
def translate_attrib_value(node):
@ -317,7 +319,7 @@ def serialize_xml(node):
return etree.tostring(node, method='xml', encoding='unicode')
MODIFIER_ATTRS = {"invisible", "readonly", "required", "column_invisible", "attrs", "states"}
MODIFIER_ATTRS = {"invisible", "readonly", "required", "column_invisible", "attrs"}
def xml_term_adapter(term_en):
"""
Returns an `adapter(term)` function that will ensure the modifiers are copied
@ -453,158 +455,218 @@ def translate_sql_constraint(cr, key, lang):
""", (lang, key))
return cr.fetchone()[0]
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.current_thread(), 'dbname', None)
if db_name:
return odoo.sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from odoo.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from odoo.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the original uid, so the language may
# be wrong when the admin language differs.
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if cr and uid:
env = odoo.api.Environment(cr, uid, {})
lang = env['res.users'].context_get()['lang']
return lang
def __call__(self, source, *args, **kwargs):
translation = self._get_translation(source)
assert not (args and kwargs)
if args or kwargs:
if any(isinstance(a, Markup) for a in itertools.chain(args, kwargs.values())):
translation = escape(translation)
try:
return translation % (args or kwargs)
except (TypeError, ValueError, KeyError):
bad = translation
# fallback: apply to source before logging exception (in case source fails)
translation = source % (args or kwargs)
_logger.exception('Bad translation %r for string %r', bad, source)
def get_translation(module: str, lang: str, source: str, args: tuple | dict) -> str:
"""Translate and format using a module, language, source text and args."""
# get the translation by using the language
assert lang, "missing language for translation"
if lang == 'en_US':
translation = source
else:
assert module, "missing module name for translation"
translation = code_translations.get_python_translations(module, lang).get(source, source)
# skip formatting if we have no args
if not args:
return translation
# we need to check the args for markup values and for lazy translations
args_is_dict = isinstance(args, dict)
if any(isinstance(a, Markup) for a in (args.values() if args_is_dict else args)):
translation = escape(translation)
if any(isinstance(a, LazyGettext) for a in (args.values() if args_is_dict else args)):
if args_is_dict:
args = {k: v._translate(lang) if isinstance(v, LazyGettext) else v for k, v in args.items()}
else:
args = tuple(v._translate(lang) if isinstance(v, LazyGettext) else v for v in args)
# format
try:
return translation % args
except (TypeError, ValueError, KeyError):
bad = translation
# fallback: apply to source before logging exception (in case source fails)
translation = source % args
_logger.exception('Bad translation %r for string %r', bad, source)
return translation
def _get_translation(self, source, module=None):
try:
frame = inspect.currentframe().f_back.f_back
lang = self._get_lang(frame)
if lang and lang != 'en_US':
if not module:
path = inspect.getfile(frame)
path_info = odoo.modules.get_resource_from_path(path)
module = path_info[0] if path_info else 'base'
return code_translations.get_python_translations(module, lang).get(source, source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
return source
def get_translated_module(arg: str | int | typing.Any) -> str: # frame not represented as hint
"""Get the addons name.
:param arg: can be any of the following:
str ("name_of_module") returns itself;
str (__name__) use to resolve module name;
int is number of frames to go back to the caller;
frame of the caller function
"""
if isinstance(arg, str):
if arg.startswith('odoo.addons.'):
# get the name of the module
return arg.split('.')[2]
if '.' in arg or not arg:
# module name is not in odoo.addons.
return 'base'
else:
return arg
else:
if isinstance(arg, int):
frame = inspect.currentframe()
while arg > 0:
arg -= 1
frame = frame.f_back
else:
frame = arg
if not frame:
return 'base'
if (module_name := frame.f_globals.get("__name__")) and module_name.startswith('odoo.addons.'):
# just a quick lookup because `get_resource_from_path is slow compared to this`
return module_name.split('.')[2]
path = inspect.getfile(frame)
path_info = odoo.modules.get_resource_from_path(path)
return path_info[0] if path_info else 'base'
def _get_cr(frame):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr']
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor']
if (local_self := frame.f_locals.get('self')) is not None:
if (local_env := getattr(local_self, 'env', None)) is not None:
return local_env.cr
if (cr := getattr(local_self, 'cr', None)) is not None:
return cr
try:
from odoo.http import request # noqa: PLC0415
request_env = request.env
if request_env is not None and (cr := request_env.cr) is not None:
return cr
except RuntimeError:
pass
return None
def _get_uid(frame) -> int | None:
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
if (local_self := frame.f_locals.get('self')) is not None:
if hasattr(local_self, 'env') and (uid := local_self.env.uid):
return uid
return None
def _get_lang(frame, default_lang='') -> str:
# get from: context.get('lang'), kwargs['context'].get('lang'),
if local_context := frame.f_locals.get('context'):
if lang := local_context.get('lang'):
return lang
if (local_kwargs := frame.f_locals.get('kwargs')) and (local_context := local_kwargs.get('context')):
if lang := local_context.get('lang'):
return lang
# get from self.env
log_level = logging.WARNING
local_self = frame.f_locals.get('self')
local_env = local_self is not None and getattr(local_self, 'env', None)
if local_env:
if lang := local_env.lang:
return lang
# we found the env, in case we fail, just log in debug
log_level = logging.DEBUG
# get from request?
try:
from odoo.http import request # noqa: PLC0415
request_env = request.env
if request_env and (lang := request_env.lang):
return lang
except RuntimeError:
pass
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the original uid, so the language may
# be wrong when the admin language differs.
cr = _get_cr(frame)
uid = _get_uid(frame)
if cr and uid:
env = odoo.api.Environment(cr, uid, {})
if lang := env['res.users'].context_get().get('lang'):
return lang
# fallback
if default_lang:
_logger.debug('no translation language detected, fallback to %s', default_lang)
return default_lang
# give up
_logger.log(log_level, 'no translation language detected, skipping translation %s', frame, stack_info=True)
return ''
def _get_translation_source(stack_level: int, module: str = '', lang: str = '', default_lang: str = '') -> tuple[str, str]:
if not (module and lang):
frame = inspect.currentframe()
for _index in range(stack_level + 1):
frame = frame.f_back
lang = lang or _get_lang(frame, default_lang)
if lang and lang != 'en_US':
return get_translated_module(module or frame), lang
else:
# we don't care about the module for 'en_US'
return module or 'base', 'en_US'
def get_text_alias(source: str, *args, **kwargs):
assert not (args and kwargs)
assert isinstance(source, str)
module, lang = _get_translation_source(1)
return get_translation(module, lang, source, args or kwargs)
@functools.total_ordering
class _lt:
""" Lazy code translation
class LazyGettext:
""" Lazy code translated term.
Similar to GettextAlias but the translation lookup will be done only at
Similar to get_text_alias but the translation lookup will be done only at
__str__ execution.
This eases the search for terms to translate as lazy evaluated strings
are declared early.
A code using translated global variables such as:
```
_lt = LazyTranslate(__name__)
LABEL = _lt("User")
def _compute_label(self):
context = {'lang': self.partner_id.lang}
self.user_label = LABEL
env = self.with_env(lang=self.partner_id.lang).env
self.user_label = env._(LABEL)
```
works as expected (unlike the classic GettextAlias implementation).
works as expected (unlike the classic get_text_alias implementation).
"""
__slots__ = ['_source', '_args', '_module']
__slots__ = ('_args', '_default_lang', '_module', '_source')
def __init__(self, source, *args, **kwargs):
self._source = source
def __init__(self, source, *args, _module='', _default_lang='', **kwargs):
assert not (args and kwargs)
assert isinstance(source, str)
self._source = source
self._args = args or kwargs
self._module = get_translated_module(_module or 2)
self._default_lang = _default_lang
frame = inspect.currentframe().f_back
path = inspect.getfile(frame)
path_info = odoo.modules.get_resource_from_path(path)
self._module = path_info[0] if path_info else 'base'
def _translate(self, lang: str = '') -> str:
module, lang = _get_translation_source(2, self._module, lang, default_lang=self._default_lang)
return get_translation(module, lang, self._source, self._args)
def __repr__(self):
""" Show for the debugger"""
args = {'_module': self._module, '_default_lang': self._default_lang, '_args': self._args}
return f"_lt({self._source!r}, **{args!r})"
def __str__(self):
# Call _._get_translation() like _() does, so that we have the same number
# of stack frames calling _get_translation()
translation = _._get_translation(self._source, self._module)
if self._args:
try:
return translation % self._args
except (TypeError, ValueError, KeyError):
bad = translation
# fallback: apply to source before logging exception (in case source fails)
translation = self._source % self._args
_logger.exception('Bad translation %r for string %r', bad, self._source)
return translation
""" Translate."""
return self._translate()
def __eq__(self, other):
""" Prevent using equal operators
@ -614,26 +676,50 @@ class _lt:
"""
raise NotImplementedError()
def __hash__(self):
raise NotImplementedError()
def __lt__(self, other):
raise NotImplementedError()
def __add__(self, other):
# Call _._get_translation() like _() does, so that we have the same number
# of stack frames calling _get_translation()
if isinstance(other, str):
return _._get_translation(self._source) + other
elif isinstance(other, _lt):
return _._get_translation(self._source) + _._get_translation(other._source)
return self._translate() + other
elif isinstance(other, LazyGettext):
return self._translate() + other._translate()
return NotImplemented
def __radd__(self, other):
# Call _._get_translation() like _() does, so that we have the same number
# of stack frames calling _get_translation()
if isinstance(other, str):
return other + _._get_translation(self._source)
return other + self._translate()
return NotImplemented
_ = GettextAlias()
class LazyTranslate:
""" Lazy translation template.
Usage:
```
_lt = LazyTranslate(__name__)
MYSTR = _lt('Translate X')
```
You may specify a `default_lang` to fallback to a given language on error
"""
module: str
default_lang: str
def __init__(self, module: str, *, default_lang: str = '') -> None:
self.module = module = get_translated_module(module or 2)
# set the default lang to en_US for lazy translations in the base module
self.default_lang = default_lang or ('en_US' if module == 'base' else '')
def __call__(self, source: str, *args, **kwargs) -> LazyGettext:
return LazyGettext(source, *args, **kwargs, _module=self.module, _default_lang=self.default_lang)
_ = get_text_alias
_lt = LazyGettext
def quote(s):
@ -795,9 +881,10 @@ def TranslationFileWriter(target, fileformat='po', lang=None):
'.csv, .po, or .tgz (received .%s).') % fileformat)
_writer = codecs.getwriter('utf-8')
class CSVFileWriter:
def __init__(self, target):
self.writer = pycompat.csv_writer(target, dialect='UNIX')
self.writer = csv.writer(_writer(target), dialect='UNIX')
# write header first
self.writer.writerow(("module","type","name","res_id","src","value","comments"))
@ -867,22 +954,20 @@ class PoFileWriter:
entry.comment = "module%s: %s" % (plural, ', '.join(modules))
if comments:
entry.comment += "\n" + "\n".join(comments)
code = False
for typy, name, res_id in tnrs:
if typy == 'code':
code = True
res_id = 0
if isinstance(res_id, int) or res_id.isdigit():
# second term of occurrence must be a digit
# occurrence line at 0 are discarded when rendered to string
entry.occurrences.append((u"%s:%s" % (typy, name), str(res_id)))
occurrences = OrderedSet()
for type_, *ref in tnrs:
if type_ == "code":
fpath, lineno = ref
name = f"code:{fpath}"
# lineno is set to 0 to avoid creating diff in PO files every
# time the code is moved around
lineno = "0"
else:
entry.occurrences.append((u"%s:%s:%s" % (typy, name, res_id), ''))
if code:
# TODO 17.0: remove the flag python-format in all PO/POT files
# The flag is used in a wrong way. It marks all code translations even for javascript translations.
entry.flags.append("python-format")
field_name, xmlid = ref
name = f"{type_}:{field_name}:{xmlid}"
lineno = None # no lineno for model/model_terms sources
occurrences.add((name, lineno))
entry.occurrences = list(occurrences)
self.po.append(entry)
@ -952,7 +1037,6 @@ def _extract_translatable_qweb_terms(element, callback):
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
and not (el.tag == 'attribute' and el.get('name') not in TRANSLATED_ATTRS)
and el.get("t-translation", '').strip() != "off"):
@ -997,7 +1081,7 @@ def extract_formula_terms(formula):
tokens = generate_tokens(io.StringIO(formula).readline)
tokens = (token for token in tokens if token.type not in {NEWLINE, INDENT, DEDENT})
for t1 in tokens:
if not t1.string == '_t':
if t1.string != '_t':
continue
t2 = next(tokens, None)
if t2 and t2.string == '(':
@ -1031,16 +1115,19 @@ def extract_spreadsheet_terms(fileobj, keywords, comment_tags, options):
if markdown_link:
terms.add(markdown_link[1])
for figure in sheet['figures']:
terms.add(figure['data']['title'])
if 'baselineDescr' in figure['data']:
terms.add(figure['data']['baselineDescr'])
pivots = data.get('pivots', {}).values()
lists = data.get('lists', {}).values()
for data_source in itertools.chain(lists, pivots):
if 'name' in data_source:
terms.add(data_source['name'])
for global_filter in data.get('globalFilters', []):
terms.add(global_filter['label'])
if figure['tag'] == 'chart':
title = figure['data']['title']
if isinstance(title, str):
terms.add(title)
elif 'text' in title:
terms.add(title['text'])
if 'axesDesign' in figure['data']:
terms.update(
axes.get('title', {}).get('text', '') for axes in figure['data']['axesDesign'].values()
)
if 'baselineDescr' in figure['data']:
terms.add(figure['data']['baselineDescr'])
terms.update(global_filter['label'] for global_filter in data.get('globalFilters', []))
return (
(0, None, term, [])
for term in terms
@ -1059,7 +1146,7 @@ class TranslationReader:
def __iter__(self):
for module, source, name, res_id, ttype, comments, _record_id, value in self._to_translate:
yield (module, ttype, name, res_id, source, encode(odoo.tools.ustr(value)), comments)
yield (module, ttype, name, res_id, source, value, comments)
def _push_translation(self, module, ttype, name, res_id, source, comments=None, record_id=None, value=None):
""" Insert a translation that will be used in the file generation
@ -1310,7 +1397,7 @@ class TranslationModuleReader(TranslationReader):
lineno, message, comments = extracted[:3]
value = translations.get(message, '')
self._push_translation(module, trans_type, display_path, lineno,
encode(message), comments + extra_comments, value=value)
message, comments + extra_comments, value=value)
except Exception:
_logger.exception("Failed to extract terms from %s", fabsolutepath)
finally:
@ -1321,7 +1408,7 @@ class TranslationModuleReader(TranslationReader):
This will include:
- the python strings marked with _() or _lt()
- the javascript strings marked with _t() or _lt() inside static/src/js/
- the javascript strings marked with _t() inside static/src/js/
- the strings inside Qweb files inside static/src/xml/
- the spreadsheet data files
"""
@ -1348,7 +1435,7 @@ class TranslationModuleReader(TranslationReader):
for fname in fnmatch.filter(files, '*.js'):
self._babel_extract_terms(fname, path, root, 'javascript',
extra_comments=[JAVASCRIPT_TRANSLATION_COMMENT],
extract_keywords={'_t': None, '_lt': None})
extract_keywords={'_t': None})
# QWeb template files
for fname in fnmatch.filter(files, '*.xml'):
self._babel_extract_terms(fname, path, root, 'odoo.tools.translate:babel_extract_qweb',
@ -1629,11 +1716,7 @@ def load_language(cr, lang):
installer.lang_install()
def get_po_paths(module_name: str, lang: str):
return get_po_paths_env(module_name, lang)
def get_po_paths_env(module_name: str, lang: str, env: odoo.api.Environment | None = None):
def get_po_paths(module_name: str, lang: str, env: odoo.api.Environment | None = None):
lang_base = lang.split('_', 1)[0]
# Load the base as a fallback in case a translation is missing:
po_names = [lang_base, lang]
@ -1642,7 +1725,7 @@ def get_po_paths_env(module_name: str, lang: str, env: odoo.api.Environment | No
po_names.insert(1, 'es_419')
po_paths = (
join(module_name, dir_, filename + '.po')
for filename in po_names
for filename in OrderedSet(po_names)
for dir_ in ('i18n', 'i18n_extra')
)
for path in po_paths:
@ -1690,25 +1773,19 @@ class CodeTranslations:
def _load_python_translations(self, module_name, lang):
def filter_func(row):
# In the pot files with new translations, a code translation should have either
# PYTHON_TRANSLATION_COMMENT or JAVASCRIPT_TRANSLATION_COMMENT for comments.
# If a comment has neither the above comments, the pot file uses the deprecated
# comments. And all code translations are stored as python translations.
return row.get('value') and (
PYTHON_TRANSLATION_COMMENT in row['comments']
or JAVASCRIPT_TRANSLATION_COMMENT not in row['comments'])
return row.get('value') and PYTHON_TRANSLATION_COMMENT in row['comments']
translations = CodeTranslations._get_code_translations(module_name, lang, filter_func)
self.python_translations[(module_name, lang)] = translations
self.python_translations[(module_name, lang)] = ReadonlyDict(translations)
def _load_web_translations(self, module_name, lang):
def filter_func(row):
return row.get('value') and (
JAVASCRIPT_TRANSLATION_COMMENT in row['comments']
or WEB_TRANSLATION_COMMENT in row['comments'])
return row.get('value') and JAVASCRIPT_TRANSLATION_COMMENT in row['comments']
translations = CodeTranslations._get_code_translations(module_name, lang, filter_func)
self.web_translations[(module_name, lang)] = {
"messages": [{"id": src, "string": value} for src, value in translations.items()]
}
self.web_translations[(module_name, lang)] = ReadonlyDict({
"messages": tuple(
ReadonlyDict({"id": src, "string": value})
for src, value in translations.items())
})
def get_python_translations(self, module_name, lang):
if (module_name, lang) not in self.python_translations:
@ -1730,7 +1807,8 @@ def _get_translation_upgrade_queries(cr, field):
field's column, while the queries in ``cleanup_queries`` remove the corresponding data from
table ``_ir_translation``.
"""
Model = odoo.registry(cr.dbname)[field.model_name]
from odoo.modules.registry import Registry # noqa: PLC0415
Model = Registry(cr.dbname)[field.model_name]
translation_name = f"{field.model_name},{field.name}"
migrate_queries = []
cleanup_queries = []

View file

@ -238,6 +238,8 @@ def get_expression_field_names(expression):
ignore 'parent.truc' and 'parent.truc.id')
:return: set(str)
"""
if not expression:
return set()
item_ast = ast.parse(expression.strip(), mode='eval').body
contextual_values = _get_expression_contextual_values(item_ast)
@ -304,14 +306,12 @@ def relaxng(view_type):
return _relaxng_cache[view_type]
@validate('calendar', 'graph', 'pivot', 'search', 'tree', 'activity')
@validate('calendar', 'graph', 'pivot', 'search', 'list', 'activity')
def schema_valid(arch, **kwargs):
""" Get RNG validator and validate RNG file."""
validator = relaxng(arch.tag)
if validator and not validator.validate(arch):
result = True
for error in validator.error_log:
_logger.warning(tools.ustr(error))
result = False
return result
_logger.warning("%s", error)
return False
return True

View file

@ -1,16 +1,24 @@
# -*- coding: utf-8 -*-
"""Utilities for generating, parsing and checking XML/XSD files on top of the lxml.etree module."""
import base64
import contextlib
import logging
import re
import requests
import zipfile
from io import BytesIO
import requests
from lxml import etree
import contextlib
from odoo.exceptions import UserError
from odoo.tools.misc import file_open
__all__ = [
"cleanup_xml_node",
"load_xsd_files_from_url",
"validate_xml_from_attachment",
]
_logger = logging.getLogger(__name__)
@ -55,6 +63,29 @@ class odoo_resolver(etree.Resolver):
return self.resolve_string(attachment.raw, context)
def _validate_xml(env, url, path, xmls):
# Get the XSD data
xsd_attachment = env['ir.attachment']
if path:
with file_open(path, filter_ext=('.xsd',)) as file:
content = file.read()
attachment_vals = {
'name': path.split('/')[-1],
'datas': base64.b64encode(content.encode()),
}
xsd_attachment = env['ir.attachment'].create(attachment_vals)
elif url:
xsd_attachment = load_xsd_files_from_url(env, url)
# Validate the XML against the XSD
if not isinstance(xmls, list):
xmls = [xmls]
for xml in xmls:
validate_xml_from_attachment(env, xml, xsd_attachment.name)
xsd_attachment.unlink()
def _check_with_xsd(tree_or_str, stream, env=None, prefix=None):
"""Check an XML against an XSD schema.
@ -94,7 +125,7 @@ def create_xml_node_chain(first_parent_node, nodes_list, last_node_value=None):
in `nodes_list`, under the given node `first_parent_node`.
:param etree._Element first_parent_node: parent of the created tree/chain
:param iterable[str] nodes_list: tag names to be created
:param Iterable[str] nodes_list: tag names to be created
:param str last_node_value: if specified, set the last node's text to this value
:returns: the list of created nodes
:rtype: list[etree._Element]
@ -285,7 +316,6 @@ def load_xsd_files_from_url(env, url, file_name=None, force_reload=False,
def validate_xml_from_attachment(env, xml_content, xsd_name, reload_files_function=None, prefix=None):
"""Try and validate the XML content with an XSD attachment.
If the XSD attachment cannot be found in database, skip validation without raising.
If the skip_xsd context key is truthy, skip validation.
:param odoo.api.Environment env: environment of calling module
:param xml_content: the XML content to validate
@ -293,8 +323,6 @@ def validate_xml_from_attachment(env, xml_content, xsd_name, reload_files_functi
:param reload_files_function: Deprecated.
:return: the result of the function :func:`odoo.tools.xml_utils._check_with_xsd`
"""
if env.context.get('skip_xsd', False):
return
prefixed_xsd_name = f"{prefix}.{xsd_name}" if prefix else xsd_name
try:
@ -303,6 +331,10 @@ def validate_xml_from_attachment(env, xml_content, xsd_name, reload_files_functi
_logger.info("XSD validation successful!")
except FileNotFoundError:
_logger.info("XSD file not found, skipping validation")
except etree.XMLSchemaParseError as e:
_logger.error("XSD file not valid: ")
for arg in e.args:
_logger.error(arg)
def find_xml_value(xpath, xml_element, namespaces=None):

View file

@ -181,5 +181,5 @@ class SerialProxy(SimpleNamespace):
@classmethod
def __check(cls, key, value):
assert not key.startswith('_')
assert not key.startswith('_') or key.startswith('_value_')
assert type(value) in SERIALIZABLE_TYPES + (SerialProxy,)